repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
NuGrid/NuGridPy | nugridpy/grain.py | get_svnpath | def get_svnpath():
'''
This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run.
'''
svnpathtmp = __file__
splitsvnpath = svnpathtmp.split('/')
if len(splitsvnpath) == 1:
svnpath = os.path.abspath('.') + '/../../'
else:
svnpath = ''
for i in range(len(splitsvnpath)-3):
svnpath += splitsvnpath[i] + '/'
return svnpath | python | def get_svnpath():
'''
This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run.
'''
svnpathtmp = __file__
splitsvnpath = svnpathtmp.split('/')
if len(splitsvnpath) == 1:
svnpath = os.path.abspath('.') + '/../../'
else:
svnpath = ''
for i in range(len(splitsvnpath)-3):
svnpath += splitsvnpath[i] + '/'
return svnpath | [
"def",
"get_svnpath",
"(",
")",
":",
"svnpathtmp",
"=",
"__file__",
"splitsvnpath",
"=",
"svnpathtmp",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"splitsvnpath",
")",
"==",
"1",
":",
"svnpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
... | This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run. | [
"This",
"subroutine",
"gives",
"back",
"the",
"path",
"of",
"the",
"whole",
"svn",
"tree",
"installation",
"which",
"is",
"necessary",
"for",
"the",
"script",
"to",
"run",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L1156-L1170 | train | 41,800 |
NuGrid/NuGridPy | nugridpy/grain.py | gdb.reset_filter | def reset_filter(self):
'''
Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it.
'''
self.header_desc = self._header_desc
self.header_data = self._header_data
self.header_style = self._header_style
self.desc = self._desc
self.data = self._data
self.style = self._style
self.descdict = self._descdict
self.datadict = self._datadict
self.styledict = self._styledict | python | def reset_filter(self):
'''
Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it.
'''
self.header_desc = self._header_desc
self.header_data = self._header_data
self.header_style = self._header_style
self.desc = self._desc
self.data = self._data
self.style = self._style
self.descdict = self._descdict
self.datadict = self._datadict
self.styledict = self._styledict | [
"def",
"reset_filter",
"(",
"self",
")",
":",
"self",
".",
"header_desc",
"=",
"self",
".",
"_header_desc",
"self",
".",
"header_data",
"=",
"self",
".",
"_header_data",
"self",
".",
"header_style",
"=",
"self",
".",
"_header_style",
"self",
".",
"desc",
"... | Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it. | [
"Resets",
"the",
"filter",
"and",
"goes",
"back",
"to",
"initialized",
"value",
".",
"This",
"routine",
"also",
"resets",
"the",
"style",
"if",
"you",
"have",
"changed",
"it",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L133-L147 | train | 41,801 |
NuGrid/NuGridPy | nugridpy/grain.py | gdb.info | def info(self, graintype=True, group=True, reference=False,
phase=True):
'''
This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False.
'''
# create a list with all graintypes
gtype_info = []
group_info = []
ref_info = []
phase_info = []
# how many grains in database
print('There are ' + str(len(self.data)) + ' grains in your database.\n')
# graintypes
if graintype:
for i in range(len(self.desc)):
gtype_tmp = self.desc[i][self.descdict['Type']]
wrtchk = True
for j in range(len(gtype_info)):
if gtype_info[j] == gtype_tmp:
wrtchk = False
break
if wrtchk:
gtype_info.append(gtype_tmp)
print('Available graintypes are:')
print('-------------------------')
print(gtype_info)
# groups
if group:
for i in range(len(self.desc)):
group_tmp = self.desc[i][self.descdict['Group']]
wrtchk = True
for j in range(len(group_info)):
if group_info[j] == group_tmp:
wrtchk = False
break
if wrtchk:
group_info.append(group_tmp)
print('\nAvailable groups of grains (for silicates and oxides) are:')
print('----------------------------------------------------------')
print(group_info)
# Phases
if phase:
for i in range(len(self.desc)):
phase_tmp = self.desc[i][self.descdict['Phase']]
wrtchk = True
for j in range(len(phase_info)):
if phase_info[j] == phase_tmp:
wrtchk = False
break
if wrtchk:
phase_info.append(phase_tmp)
print('\nAvailable Phases of grains are:')
print('----------------------------------------------------------')
print(phase_info)
# references
if reference:
for i in range(len(self.desc)):
ref_tmp = self.desc[i][self.descdict['Reference']]
wrtchk = True
for j in range(len(ref_info)):
if ref_info[j] == ref_tmp:
wrtchk = False
break
if wrtchk:
ref_info.append(ref_tmp)
print('\nReferences for grains:')
print('----------------------')
print(ref_info) | python | def info(self, graintype=True, group=True, reference=False,
phase=True):
'''
This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False.
'''
# create a list with all graintypes
gtype_info = []
group_info = []
ref_info = []
phase_info = []
# how many grains in database
print('There are ' + str(len(self.data)) + ' grains in your database.\n')
# graintypes
if graintype:
for i in range(len(self.desc)):
gtype_tmp = self.desc[i][self.descdict['Type']]
wrtchk = True
for j in range(len(gtype_info)):
if gtype_info[j] == gtype_tmp:
wrtchk = False
break
if wrtchk:
gtype_info.append(gtype_tmp)
print('Available graintypes are:')
print('-------------------------')
print(gtype_info)
# groups
if group:
for i in range(len(self.desc)):
group_tmp = self.desc[i][self.descdict['Group']]
wrtchk = True
for j in range(len(group_info)):
if group_info[j] == group_tmp:
wrtchk = False
break
if wrtchk:
group_info.append(group_tmp)
print('\nAvailable groups of grains (for silicates and oxides) are:')
print('----------------------------------------------------------')
print(group_info)
# Phases
if phase:
for i in range(len(self.desc)):
phase_tmp = self.desc[i][self.descdict['Phase']]
wrtchk = True
for j in range(len(phase_info)):
if phase_info[j] == phase_tmp:
wrtchk = False
break
if wrtchk:
phase_info.append(phase_tmp)
print('\nAvailable Phases of grains are:')
print('----------------------------------------------------------')
print(phase_info)
# references
if reference:
for i in range(len(self.desc)):
ref_tmp = self.desc[i][self.descdict['Reference']]
wrtchk = True
for j in range(len(ref_info)):
if ref_info[j] == ref_tmp:
wrtchk = False
break
if wrtchk:
ref_info.append(ref_tmp)
print('\nReferences for grains:')
print('----------------------')
print(ref_info) | [
"def",
"info",
"(",
"self",
",",
"graintype",
"=",
"True",
",",
"group",
"=",
"True",
",",
"reference",
"=",
"False",
",",
"phase",
"=",
"True",
")",
":",
"# create a list with all graintypes",
"gtype_info",
"=",
"[",
"]",
"group_info",
"=",
"[",
"]",
"r... | This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False. | [
"This",
"routine",
"gives",
"you",
"informations",
"what",
"kind",
"of",
"grains",
"are",
"currently",
"available",
"in",
"your",
"filtered",
"version",
".",
"It",
"gives",
"you",
"the",
"type",
"of",
"grains",
"available",
".",
"More",
"to",
"be",
"implemen... | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L149-L232 | train | 41,802 |
NuGrid/NuGridPy | nugridpy/grain.py | gdb._filter_desc | def _filter_desc(self, indexing):
'''
Private function to filter data, goes with filter_desc
'''
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
style_tmp= np.zeros((len(indexing),len(self.header_style)),dtype='|S1024')
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[indexing[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
else:
print('No filter selected or no data found!') | python | def _filter_desc(self, indexing):
'''
Private function to filter data, goes with filter_desc
'''
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
style_tmp= np.zeros((len(indexing),len(self.header_style)),dtype='|S1024')
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[indexing[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
else:
print('No filter selected or no data found!') | [
"def",
"_filter_desc",
"(",
"self",
",",
"indexing",
")",
":",
"# now filter data",
"if",
"len",
"(",
"indexing",
")",
">",
"0",
":",
"desc_tmp",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"indexing",
")",
",",
"len",
"(",
"self",
".",
"header_de... | Private function to filter data, goes with filter_desc | [
"Private",
"function",
"to",
"filter",
"data",
"goes",
"with",
"filter_desc"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L372-L394 | train | 41,803 |
NuGrid/NuGridPy | nugridpy/grain.py | gdb.filter_data | def filter_data(self, isos, limit, delta=True):
'''
This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True.
'''
# check availability
dat_index, delta_b, ratio_b = self.check_availability(isos)
if dat_index == -1:
print('Isotopes selected are not available. Check i.datadict (where i is your instance) for availability of isotopes.')
return None
# select if larger or smaller and define limit
if limit[0:1] == '>':
comperator = 'gt'
elif limit[0:1] == '<':
comperator = 'st'
else:
print('Comperator not specified. Limit must be given as \'>5.\' for example.')
return None
try:
limit = float(limit[1:len(limit)])
except ValueError:
print('Limit must be given as \'>5.\' for example.')
return None
# now calculate the actual limit to compare with, depending on if it delta or not or whatsoever
if delta == delta_b: # input and available same
if ratio_b: # one over
if delta:
tmp = self.delta_to_ratio(isos,limit,oneover=True)
comp_lim = self.ratio_to_delta(isos,tmp) # check
else:
comp_lim = old_div(1.,limit) # check
else: # all fine
comp_lim = limit
else: # input and availability not the same
if ratio_b: # one over
if delta: # delta given, ratio one over wanted
comp_lim = self.delta_to_ratio(isos,limit,oneover=True)
else: # ratio given, delta one over wanted
comp_lim = self.ratio_to_delta(isos,limit,oneover=True)
else: # not one over
if delta: # delta given, ratio wanted
comp_lim = self.delta_to_ratio(isos,limit)
else:
comp_lim = self.ratio_to_delta(isos,limit)
# indexing vector
indexing = []
for i in range(len(self.data)):
dat_val = self.data[i][dat_index]
if comperator == 'st':
if dat_val < comp_lim:
indexing.append(i)
else:
if dat_val > comp_lim:
indexing.append(i)
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
self.desc = desc_tmp
self.data = data_tmp
else:
print('No filter selected!') | python | def filter_data(self, isos, limit, delta=True):
'''
This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True.
'''
# check availability
dat_index, delta_b, ratio_b = self.check_availability(isos)
if dat_index == -1:
print('Isotopes selected are not available. Check i.datadict (where i is your instance) for availability of isotopes.')
return None
# select if larger or smaller and define limit
if limit[0:1] == '>':
comperator = 'gt'
elif limit[0:1] == '<':
comperator = 'st'
else:
print('Comperator not specified. Limit must be given as \'>5.\' for example.')
return None
try:
limit = float(limit[1:len(limit)])
except ValueError:
print('Limit must be given as \'>5.\' for example.')
return None
# now calculate the actual limit to compare with, depending on if it delta or not or whatsoever
if delta == delta_b: # input and available same
if ratio_b: # one over
if delta:
tmp = self.delta_to_ratio(isos,limit,oneover=True)
comp_lim = self.ratio_to_delta(isos,tmp) # check
else:
comp_lim = old_div(1.,limit) # check
else: # all fine
comp_lim = limit
else: # input and availability not the same
if ratio_b: # one over
if delta: # delta given, ratio one over wanted
comp_lim = self.delta_to_ratio(isos,limit,oneover=True)
else: # ratio given, delta one over wanted
comp_lim = self.ratio_to_delta(isos,limit,oneover=True)
else: # not one over
if delta: # delta given, ratio wanted
comp_lim = self.delta_to_ratio(isos,limit)
else:
comp_lim = self.ratio_to_delta(isos,limit)
# indexing vector
indexing = []
for i in range(len(self.data)):
dat_val = self.data[i][dat_index]
if comperator == 'st':
if dat_val < comp_lim:
indexing.append(i)
else:
if dat_val > comp_lim:
indexing.append(i)
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
self.desc = desc_tmp
self.data = data_tmp
else:
print('No filter selected!') | [
"def",
"filter_data",
"(",
"self",
",",
"isos",
",",
"limit",
",",
"delta",
"=",
"True",
")",
":",
"# check availability",
"dat_index",
",",
"delta_b",
",",
"ratio_b",
"=",
"self",
".",
"check_availability",
"(",
"isos",
")",
"if",
"dat_index",
"==",
"-",
... | This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True. | [
"This",
"subroutine",
"filters",
"isotopic",
"values",
"according",
"to",
"the",
"limit",
"you",
"give",
".",
"You",
"can",
"filter",
"in",
"ratio",
"or",
"in",
"delta",
"space",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L454-L541 | train | 41,804 |
NuGrid/NuGridPy | nugridpy/grain.py | gdb.check_availability | def check_availability(self, isos):
'''
This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not
'''
# make names
iso1name = iso_name_converter(isos[0])
iso2name = iso_name_converter(isos[1])
ratio = iso1name + '/' + iso2name
ratio_inv = iso2name + '/' + iso1name
delta = 'd(' + iso1name + '/' + iso2name + ')'
delta_inv = 'd(' + iso2name + '/' + iso1name + ')'
index = -1
# search for data entry
try:
index = self.datadict[ratio]
delta_b = False
ratio_b = False
except KeyError:
try:
index = self.datadict[ratio_inv]
delta_b = False
ratio_b = True
except KeyError:
try:
index = self.datadict[delta]
delta_b = True
ratio_b = False
except KeyError:
try:
index = self.datadict[delta_inv]
delta_b = True
ratio_b = True
except KeyError:
index = -1
delta_b = None
ratio_b = None
return index, delta_b, ratio_b | python | def check_availability(self, isos):
'''
This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not
'''
# make names
iso1name = iso_name_converter(isos[0])
iso2name = iso_name_converter(isos[1])
ratio = iso1name + '/' + iso2name
ratio_inv = iso2name + '/' + iso1name
delta = 'd(' + iso1name + '/' + iso2name + ')'
delta_inv = 'd(' + iso2name + '/' + iso1name + ')'
index = -1
# search for data entry
try:
index = self.datadict[ratio]
delta_b = False
ratio_b = False
except KeyError:
try:
index = self.datadict[ratio_inv]
delta_b = False
ratio_b = True
except KeyError:
try:
index = self.datadict[delta]
delta_b = True
ratio_b = False
except KeyError:
try:
index = self.datadict[delta_inv]
delta_b = True
ratio_b = True
except KeyError:
index = -1
delta_b = None
ratio_b = None
return index, delta_b, ratio_b | [
"def",
"check_availability",
"(",
"self",
",",
"isos",
")",
":",
"# make names",
"iso1name",
"=",
"iso_name_converter",
"(",
"isos",
"[",
"0",
"]",
")",
"iso2name",
"=",
"iso_name_converter",
"(",
"isos",
"[",
"1",
"]",
")",
"ratio",
"=",
"iso1name",
"+",
... | This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not | [
"This",
"routine",
"checks",
"if",
"the",
"requested",
"set",
"of",
"isotopes",
"is",
"available",
"in",
"the",
"dataset",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L1006-L1059 | train | 41,805 |
NuGrid/NuGridPy | nugridpy/grain.py | gdb.ratio_to_delta | def ratio_to_delta(self, isos_ss, ratio, oneover=False):
'''
Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value
'''
# define if isos_ss is the ratio or the isotopes
if type(isos_ss) == float:
ss_ratio = isos_ss
elif type(isos_ss) == list:
ss_ratio = self.inut.isoratio_init(isos_ss)
else:
print('Check input of isos_ss into ratio_to_delta routine')
return None
# check if one over is necessary or not
if oneover:
ratio = old_div(1,ratio)
# calculate delta value
delta = (old_div(ratio, ss_ratio) - 1.) * 1000.
return delta | python | def ratio_to_delta(self, isos_ss, ratio, oneover=False):
'''
Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value
'''
# define if isos_ss is the ratio or the isotopes
if type(isos_ss) == float:
ss_ratio = isos_ss
elif type(isos_ss) == list:
ss_ratio = self.inut.isoratio_init(isos_ss)
else:
print('Check input of isos_ss into ratio_to_delta routine')
return None
# check if one over is necessary or not
if oneover:
ratio = old_div(1,ratio)
# calculate delta value
delta = (old_div(ratio, ss_ratio) - 1.) * 1000.
return delta | [
"def",
"ratio_to_delta",
"(",
"self",
",",
"isos_ss",
",",
"ratio",
",",
"oneover",
"=",
"False",
")",
":",
"# define if isos_ss is the ratio or the isotopes",
"if",
"type",
"(",
"isos_ss",
")",
"==",
"float",
":",
"ss_ratio",
"=",
"isos_ss",
"elif",
"type",
"... | Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value | [
"Transforms",
"an",
"isotope",
"ratio",
"into",
"a",
"delta",
"value"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L1063-L1100 | train | 41,806 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/storageclient.py | StorageClient.with_filter | def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient`
'''
res = copy.deepcopy(self)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res | python | def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient`
'''
res = copy.deepcopy(self)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res | [
"def",
"with_filter",
"(",
"self",
",",
"filter",
")",
":",
"res",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"old_filter",
"=",
"self",
".",
"_filter",
"def",
"new_filter",
"(",
"request",
")",
":",
"return",
"filter",
"(",
"request",
",",
"old_f... | Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient` | [
"Returns",
"a",
"new",
"service",
"which",
"will",
"process",
"requests",
"with",
"the",
"specified",
"filter",
".",
"Filtering",
"operations",
"can",
"include",
"logging",
"automatic",
"retrying",
"etc",
"...",
"The",
"filter",
"is",
"a",
"lambda",
"which",
"... | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/storageclient.py#L70-L90 | train | 41,807 |
openstack/proliantutils | proliantutils/ilo/client.py | IloClient._validate_snmp | def _validate_snmp(self):
"""Validates SNMP credentials.
:raises exception.IloInvalidInputError
"""
cred = self.snmp_credentials
if cred is not None:
if cred.get('snmp_inspection') is True:
if not all([cred.get('auth_user'),
cred.get('auth_prot_pp'),
cred.get('auth_priv_pp')]):
msg = self._('Either few or all mandatory '
'SNMP credentials '
'are missing.')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
try:
auth_protocol = cred['auth_protocol']
if auth_protocol not in ["SHA", "MD5"]:
msg = self._('Invalid SNMP auth protocol '
'provided. '
'Valid values are SHA or MD5')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Auth protocol not provided by user. '
'The default value of MD5 will '
'be considered.')
LOG.debug(msg)
pass
try:
priv_protocol = cred['priv_protocol']
if priv_protocol not in ["AES", "DES"]:
msg = self._('Invalid SNMP privacy protocol '
'provided. '
'Valid values are AES or DES')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Privacy protocol not provided '
'by user. '
'The default value of DES will '
'be considered.')
LOG.debug(msg)
pass
else:
LOG.debug(self._('snmp_inspection set to False. SNMP'
'inspection will not be performed.'))
else:
LOG.debug(self._('SNMP credentials not provided. SNMP '
'inspection will not be performed.')) | python | def _validate_snmp(self):
"""Validates SNMP credentials.
:raises exception.IloInvalidInputError
"""
cred = self.snmp_credentials
if cred is not None:
if cred.get('snmp_inspection') is True:
if not all([cred.get('auth_user'),
cred.get('auth_prot_pp'),
cred.get('auth_priv_pp')]):
msg = self._('Either few or all mandatory '
'SNMP credentials '
'are missing.')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
try:
auth_protocol = cred['auth_protocol']
if auth_protocol not in ["SHA", "MD5"]:
msg = self._('Invalid SNMP auth protocol '
'provided. '
'Valid values are SHA or MD5')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Auth protocol not provided by user. '
'The default value of MD5 will '
'be considered.')
LOG.debug(msg)
pass
try:
priv_protocol = cred['priv_protocol']
if priv_protocol not in ["AES", "DES"]:
msg = self._('Invalid SNMP privacy protocol '
'provided. '
'Valid values are AES or DES')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Privacy protocol not provided '
'by user. '
'The default value of DES will '
'be considered.')
LOG.debug(msg)
pass
else:
LOG.debug(self._('snmp_inspection set to False. SNMP'
'inspection will not be performed.'))
else:
LOG.debug(self._('SNMP credentials not provided. SNMP '
'inspection will not be performed.')) | [
"def",
"_validate_snmp",
"(",
"self",
")",
":",
"cred",
"=",
"self",
".",
"snmp_credentials",
"if",
"cred",
"is",
"not",
"None",
":",
"if",
"cred",
".",
"get",
"(",
"'snmp_inspection'",
")",
"is",
"True",
":",
"if",
"not",
"all",
"(",
"[",
"cred",
".... | Validates SNMP credentials.
:raises exception.IloInvalidInputError | [
"Validates",
"SNMP",
"credentials",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/client.py#L192-L242 | train | 41,808 |
openstack/proliantutils | proliantutils/ilo/client.py | IloClient._call_method | def _call_method(self, method_name, *args, **kwargs):
"""Call the corresponding method using RIBCL, RIS or REDFISH
Make the decision to invoke the corresponding method using RIBCL,
RIS or REDFISH way. In case of none, throw out ``NotImplementedError``
"""
if self.use_redfish_only:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
raise NotImplementedError()
else:
the_operation_object = self.ribcl
if 'Gen10' in self.model:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
if (self.is_ribcl_enabled is not None
and not self.is_ribcl_enabled):
raise NotImplementedError()
elif ('Gen9' in self.model) and (method_name in
SUPPORTED_RIS_METHODS):
the_operation_object = self.ris
method = getattr(the_operation_object, method_name)
LOG.debug(self._("Using %(class)s for method %(method)s."),
{'class': type(the_operation_object).__name__,
'method': method_name})
return method(*args, **kwargs) | python | def _call_method(self, method_name, *args, **kwargs):
"""Call the corresponding method using RIBCL, RIS or REDFISH
Make the decision to invoke the corresponding method using RIBCL,
RIS or REDFISH way. In case of none, throw out ``NotImplementedError``
"""
if self.use_redfish_only:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
raise NotImplementedError()
else:
the_operation_object = self.ribcl
if 'Gen10' in self.model:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
if (self.is_ribcl_enabled is not None
and not self.is_ribcl_enabled):
raise NotImplementedError()
elif ('Gen9' in self.model) and (method_name in
SUPPORTED_RIS_METHODS):
the_operation_object = self.ris
method = getattr(the_operation_object, method_name)
LOG.debug(self._("Using %(class)s for method %(method)s."),
{'class': type(the_operation_object).__name__,
'method': method_name})
return method(*args, **kwargs) | [
"def",
"_call_method",
"(",
"self",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"use_redfish_only",
":",
"if",
"method_name",
"in",
"SUPPORTED_REDFISH_METHODS",
":",
"the_operation_object",
"=",
"self",
".",
"r... | Call the corresponding method using RIBCL, RIS or REDFISH
Make the decision to invoke the corresponding method using RIBCL,
RIS or REDFISH way. In case of none, throw out ``NotImplementedError`` | [
"Call",
"the",
"corresponding",
"method",
"using",
"RIBCL",
"RIS",
"or",
"REDFISH"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/client.py#L244-L274 | train | 41,809 |
openstack/proliantutils | proliantutils/ilo/client.py | IloClient.set_vm_status | def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option,
write_protect) | python | def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option,
write_protect) | [
"def",
"set_vm_status",
"(",
"self",
",",
"device",
"=",
"'FLOPPY'",
",",
"boot_option",
"=",
"'BOOT_ONCE'",
",",
"write_protect",
"=",
"'YES'",
")",
":",
"return",
"self",
".",
"_call_method",
"(",
"'set_vm_status'",
",",
"device",
",",
"boot_option",
",",
... | Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media. | [
"Sets",
"the",
"Virtual",
"Media",
"drive",
"status",
"and",
"allows",
"the"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/client.py#L438-L445 | train | 41,810 |
openstack/proliantutils | proliantutils/ilo/client.py | IloClient.get_essential_properties | def get_essential_properties(self):
"""Get the essential scheduling properties
:returns: a dictionary containing memory size, disk size,
number of cpus, cpu arch, port numbers and
mac addresses.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
data = self._call_method('get_essential_properties')
if (data['properties']['local_gb'] == 0):
cred = self.snmp_credentials
if cred and cred.get('snmp_inspection'):
disksize = snmp.get_local_gb(self.host, cred)
if disksize:
data['properties']['local_gb'] = disksize
else:
msg = self._('SNMP inspection failed to '
'get the disk size. Returning '
'local_gb as 0.')
LOG.debug(msg)
else:
msg = self._("SNMP credentials were not set and "
"RIBCL/Redfish failed to get the disk size. "
"Returning local_gb as 0.")
LOG.debug(msg)
return data | python | def get_essential_properties(self):
"""Get the essential scheduling properties
:returns: a dictionary containing memory size, disk size,
number of cpus, cpu arch, port numbers and
mac addresses.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
data = self._call_method('get_essential_properties')
if (data['properties']['local_gb'] == 0):
cred = self.snmp_credentials
if cred and cred.get('snmp_inspection'):
disksize = snmp.get_local_gb(self.host, cred)
if disksize:
data['properties']['local_gb'] = disksize
else:
msg = self._('SNMP inspection failed to '
'get the disk size. Returning '
'local_gb as 0.')
LOG.debug(msg)
else:
msg = self._("SNMP credentials were not set and "
"RIBCL/Redfish failed to get the disk size. "
"Returning local_gb as 0.")
LOG.debug(msg)
return data | [
"def",
"get_essential_properties",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"_call_method",
"(",
"'get_essential_properties'",
")",
"if",
"(",
"data",
"[",
"'properties'",
"]",
"[",
"'local_gb'",
"]",
"==",
"0",
")",
":",
"cred",
"=",
"self",
".",
... | Get the essential scheduling properties
:returns: a dictionary containing memory size, disk size,
number of cpus, cpu arch, port numbers and
mac addresses.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Get",
"the",
"essential",
"scheduling",
"properties"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/client.py#L594-L621 | train | 41,811 |
openstack/proliantutils | proliantutils/ilo/client.py | IloClient.get_server_capabilities | def get_server_capabilities(self):
"""Get hardware properties which can be used for scheduling
:return: a dictionary of server capabilities.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
capabilities = self._call_method('get_server_capabilities')
# TODO(nisha): Assumption is that Redfish always see the pci_device
# member name field populated similarly to IPMI.
# If redfish is not able to get nic_capacity, we can fall back to
# IPMI way of retrieving nic_capacity in the future. As of now
# the IPMI is not tested on Gen10, hence assuming that
# Redfish will always be able to give the data.
if ('Gen10' not in self.model):
major_minor = (
self._call_method('get_ilo_firmware_version_as_major_minor'))
# NOTE(vmud213): Even if it is None, pass it on to get_nic_capacity
# as we still want to try getting nic capacity through ipmitool
# irrespective of what firmware we are using.
nic_capacity = ipmi.get_nic_capacity(self.ipmi_host_info,
major_minor)
if nic_capacity:
capabilities.update({'nic_capacity': nic_capacity})
if capabilities:
return capabilities | python | def get_server_capabilities(self):
"""Get hardware properties which can be used for scheduling
:return: a dictionary of server capabilities.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
capabilities = self._call_method('get_server_capabilities')
# TODO(nisha): Assumption is that Redfish always see the pci_device
# member name field populated similarly to IPMI.
# If redfish is not able to get nic_capacity, we can fall back to
# IPMI way of retrieving nic_capacity in the future. As of now
# the IPMI is not tested on Gen10, hence assuming that
# Redfish will always be able to give the data.
if ('Gen10' not in self.model):
major_minor = (
self._call_method('get_ilo_firmware_version_as_major_minor'))
# NOTE(vmud213): Even if it is None, pass it on to get_nic_capacity
# as we still want to try getting nic capacity through ipmitool
# irrespective of what firmware we are using.
nic_capacity = ipmi.get_nic_capacity(self.ipmi_host_info,
major_minor)
if nic_capacity:
capabilities.update({'nic_capacity': nic_capacity})
if capabilities:
return capabilities | [
"def",
"get_server_capabilities",
"(",
"self",
")",
":",
"capabilities",
"=",
"self",
".",
"_call_method",
"(",
"'get_server_capabilities'",
")",
"# TODO(nisha): Assumption is that Redfish always see the pci_device",
"# member name field populated similarly to IPMI.",
"# If redfish i... | Get hardware properties which can be used for scheduling
:return: a dictionary of server capabilities.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Get",
"hardware",
"properties",
"which",
"can",
"be",
"used",
"for",
"scheduling"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/client.py#L623-L651 | train | 41,812 |
NuGrid/NuGridPy | nugridpy/mesa.py | _read_mesafile | def _read_mesafile(filename,data_rows=0,only='all'):
""" private routine that is not directly called by the user"""
f=open(filename,'r')
vv=[]
v=[]
lines = []
line = ''
for i in range(0,6):
line = f.readline()
lines.extend([line])
hval = lines[2].split()
hlist = lines[1].split()
header_attr = {}
for a,b in zip(hlist,hval):
header_attr[a] = float(b)
if only is 'header_attr':
return header_attr
cols = {}
colnum = lines[4].split()
colname = lines[5].split()
for a,b in zip(colname,colnum):
cols[a] = int(b)
data = []
old_percent = 0
for i in range(data_rows):
# writing reading status
percent = int(i*100/np.max([1, data_rows-1]))
if percent >= old_percent + 5:
sys.stdout.flush()
sys.stdout.write("\r reading " + "...%d%%" % percent)
old_percent = percent
line = f.readline()
v=line.split()
try:
vv=np.array(v,dtype='float64')
except ValueError:
for item in v:
if item.__contains__('.') and not item.__contains__('E'):
v[v.index(item)]='0'
data.append(vv)
print(' \n')
f.close()
a=np.array(data)
data = []
return header_attr, cols, a | python | def _read_mesafile(filename,data_rows=0,only='all'):
""" private routine that is not directly called by the user"""
f=open(filename,'r')
vv=[]
v=[]
lines = []
line = ''
for i in range(0,6):
line = f.readline()
lines.extend([line])
hval = lines[2].split()
hlist = lines[1].split()
header_attr = {}
for a,b in zip(hlist,hval):
header_attr[a] = float(b)
if only is 'header_attr':
return header_attr
cols = {}
colnum = lines[4].split()
colname = lines[5].split()
for a,b in zip(colname,colnum):
cols[a] = int(b)
data = []
old_percent = 0
for i in range(data_rows):
# writing reading status
percent = int(i*100/np.max([1, data_rows-1]))
if percent >= old_percent + 5:
sys.stdout.flush()
sys.stdout.write("\r reading " + "...%d%%" % percent)
old_percent = percent
line = f.readline()
v=line.split()
try:
vv=np.array(v,dtype='float64')
except ValueError:
for item in v:
if item.__contains__('.') and not item.__contains__('E'):
v[v.index(item)]='0'
data.append(vv)
print(' \n')
f.close()
a=np.array(data)
data = []
return header_attr, cols, a | [
"def",
"_read_mesafile",
"(",
"filename",
",",
"data_rows",
"=",
"0",
",",
"only",
"=",
"'all'",
")",
":",
"f",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
"vv",
"=",
"[",
"]",
"v",
"=",
"[",
"]",
"lines",
"=",
"[",
"]",
"line",
"=",
"''",
... | private routine that is not directly called by the user | [
"private",
"routine",
"that",
"is",
"not",
"directly",
"called",
"by",
"the",
"user"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3725-L3774 | train | 41,813 |
NuGrid/NuGridPy | nugridpy/mesa.py | mesa_profile._profiles_index | def _profiles_index(self):
"""
read profiles.index and make hash array
Notes
-----
sets the attributes.
log_ind : hash array that returns profile.data or log.data
file number from model number.
model : the models for which profile.data or log.data is
available
"""
prof_ind_name = self.prof_ind_name
f = open(self.sldir+'/'+prof_ind_name,'r')
line = f.readline()
numlines=int(line.split()[0])
print(str(numlines)+' in profiles.index file ...')
model=[]
log_file_num=[]
for line in f:
model.append(int(line.split()[0]))
log_file_num.append(int(line.split()[2]))
log_ind={} # profile.data number from model
for a,b in zip(model,log_file_num):
log_ind[a] = b
self.log_ind=log_ind
self.model=model | python | def _profiles_index(self):
"""
read profiles.index and make hash array
Notes
-----
sets the attributes.
log_ind : hash array that returns profile.data or log.data
file number from model number.
model : the models for which profile.data or log.data is
available
"""
prof_ind_name = self.prof_ind_name
f = open(self.sldir+'/'+prof_ind_name,'r')
line = f.readline()
numlines=int(line.split()[0])
print(str(numlines)+' in profiles.index file ...')
model=[]
log_file_num=[]
for line in f:
model.append(int(line.split()[0]))
log_file_num.append(int(line.split()[2]))
log_ind={} # profile.data number from model
for a,b in zip(model,log_file_num):
log_ind[a] = b
self.log_ind=log_ind
self.model=model | [
"def",
"_profiles_index",
"(",
"self",
")",
":",
"prof_ind_name",
"=",
"self",
".",
"prof_ind_name",
"f",
"=",
"open",
"(",
"self",
".",
"sldir",
"+",
"'/'",
"+",
"prof_ind_name",
",",
"'r'",
")",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"numlines"... | read profiles.index and make hash array
Notes
-----
sets the attributes.
log_ind : hash array that returns profile.data or log.data
file number from model number.
model : the models for which profile.data or log.data is
available | [
"read",
"profiles",
".",
"index",
"and",
"make",
"hash",
"array"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L421-L455 | train | 41,814 |
NuGrid/NuGridPy | nugridpy/mesa.py | mesa_profile._log_file_ind | def _log_file_ind(self,inum):
"""
Information about available profile.data or log.data files.
Parameters
----------
inum : integer
Attempt to get number of inum's profile.data file.
inum_max: max number of profile.data or log.data files
available
"""
self._profiles_index()
if inum <= 0:
print("Smallest argument is 1")
return
inum_max = len(self.log_ind)
inum -= 1
if inum > inum_max:
print('There are only '+str(inum_max)+' profile file available.')
log_data_number = -1
return log_data_number
else:
log_data_number=self.log_ind[self.model[inum]]
print('The '+str(inum+1)+'. profile.data file is '+ \
str(log_data_number))
return log_data_number | python | def _log_file_ind(self,inum):
"""
Information about available profile.data or log.data files.
Parameters
----------
inum : integer
Attempt to get number of inum's profile.data file.
inum_max: max number of profile.data or log.data files
available
"""
self._profiles_index()
if inum <= 0:
print("Smallest argument is 1")
return
inum_max = len(self.log_ind)
inum -= 1
if inum > inum_max:
print('There are only '+str(inum_max)+' profile file available.')
log_data_number = -1
return log_data_number
else:
log_data_number=self.log_ind[self.model[inum]]
print('The '+str(inum+1)+'. profile.data file is '+ \
str(log_data_number))
return log_data_number | [
"def",
"_log_file_ind",
"(",
"self",
",",
"inum",
")",
":",
"self",
".",
"_profiles_index",
"(",
")",
"if",
"inum",
"<=",
"0",
":",
"print",
"(",
"\"Smallest argument is 1\"",
")",
"return",
"inum_max",
"=",
"len",
"(",
"self",
".",
"log_ind",
")",
"inum... | Information about available profile.data or log.data files.
Parameters
----------
inum : integer
Attempt to get number of inum's profile.data file.
inum_max: max number of profile.data or log.data files
available | [
"Information",
"about",
"available",
"profile",
".",
"data",
"or",
"log",
".",
"data",
"files",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L459-L488 | train | 41,815 |
NuGrid/NuGridPy | nugridpy/mesa.py | mesa_profile.get | def get(self,str_name):
"""
return a column of data with the name str_name.
Parameters
----------
str_name : string
Is the name of the column as printed in the
profilennn.data or lognnn.data file; get the available
columns from self.cols (where you replace self with the
name of your instance)
"""
column_array = self.data[:,self.cols[str_name]-1].astype('float')
return column_array | python | def get(self,str_name):
"""
return a column of data with the name str_name.
Parameters
----------
str_name : string
Is the name of the column as printed in the
profilennn.data or lognnn.data file; get the available
columns from self.cols (where you replace self with the
name of your instance)
"""
column_array = self.data[:,self.cols[str_name]-1].astype('float')
return column_array | [
"def",
"get",
"(",
"self",
",",
"str_name",
")",
":",
"column_array",
"=",
"self",
".",
"data",
"[",
":",
",",
"self",
".",
"cols",
"[",
"str_name",
"]",
"-",
"1",
"]",
".",
"astype",
"(",
"'float'",
")",
"return",
"column_array"
] | return a column of data with the name str_name.
Parameters
----------
str_name : string
Is the name of the column as printed in the
profilennn.data or lognnn.data file; get the available
columns from self.cols (where you replace self with the
name of your instance) | [
"return",
"a",
"column",
"of",
"data",
"with",
"the",
"name",
"str_name",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L490-L505 | train | 41,816 |
NuGrid/NuGridPy | nugridpy/mesa.py | mesa_profile.write_LEAFS_model | def write_LEAFS_model(self,nzn=30000000,dr=5.e4,
rhostrip=5.e-4):
"""
write an ascii file that will be read by Sam's version of
inimod.F90 in order to make an initial model for LEAFS
"""
from scipy import interpolate
ye = self.get('ye')
newye=[]
rho = 10.**self.get('logRho')[::-1] # centre to surface
# get index to strip all but the core:
idx = np.abs(rho - rhostrip).argmin() + 1
rho = rho[:idx]
rhoc = rho[0]
rad = 10.**self.get('logR') * ast.rsun_cm
rad = rad[::-1][:idx]
ye = ye[::-1][:idx]
print('there will be about ',old_div(rad[-1], dr), 'mass cells...')
# add r = 0 point to all arrays
rad = np.insert(rad,0,0)
ye = np.insert(ye,0,ye[0])
rho = np.insert(rho,0,rho[0])
print(rad)
# interpolate
fye = interpolate.interp1d(rad,ye)
frho = interpolate.interp1d(rad,rho)
newye = []
newrho = []
newrad = []
Tc = 10.**self.get('logT')[-1]
for i in range(nzn):
if i * dr > rad[-1]: break
newye.append(fye( i * dr ))
newrho.append(frho( i * dr ))
newrad.append( i * dr )
f = open('M875.inimod','w')
f.write(str(Tc)+' \n')
f.write(str(rhoc)+' \n')
for i in range(len(newye)):
f.write(str(i+1)+' '+str(newrad[i])+' '+\
str(newrho[i])+' '+str(newye[i])+' \n')
f.close() | python | def write_LEAFS_model(self,nzn=30000000,dr=5.e4,
rhostrip=5.e-4):
"""
write an ascii file that will be read by Sam's version of
inimod.F90 in order to make an initial model for LEAFS
"""
from scipy import interpolate
ye = self.get('ye')
newye=[]
rho = 10.**self.get('logRho')[::-1] # centre to surface
# get index to strip all but the core:
idx = np.abs(rho - rhostrip).argmin() + 1
rho = rho[:idx]
rhoc = rho[0]
rad = 10.**self.get('logR') * ast.rsun_cm
rad = rad[::-1][:idx]
ye = ye[::-1][:idx]
print('there will be about ',old_div(rad[-1], dr), 'mass cells...')
# add r = 0 point to all arrays
rad = np.insert(rad,0,0)
ye = np.insert(ye,0,ye[0])
rho = np.insert(rho,0,rho[0])
print(rad)
# interpolate
fye = interpolate.interp1d(rad,ye)
frho = interpolate.interp1d(rad,rho)
newye = []
newrho = []
newrad = []
Tc = 10.**self.get('logT')[-1]
for i in range(nzn):
if i * dr > rad[-1]: break
newye.append(fye( i * dr ))
newrho.append(frho( i * dr ))
newrad.append( i * dr )
f = open('M875.inimod','w')
f.write(str(Tc)+' \n')
f.write(str(rhoc)+' \n')
for i in range(len(newye)):
f.write(str(i+1)+' '+str(newrad[i])+' '+\
str(newrho[i])+' '+str(newye[i])+' \n')
f.close() | [
"def",
"write_LEAFS_model",
"(",
"self",
",",
"nzn",
"=",
"30000000",
",",
"dr",
"=",
"5.e4",
",",
"rhostrip",
"=",
"5.e-4",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"ye",
"=",
"self",
".",
"get",
"(",
"'ye'",
")",
"newye",
"=",
"[",
"]",
... | write an ascii file that will be read by Sam's version of
inimod.F90 in order to make an initial model for LEAFS | [
"write",
"an",
"ascii",
"file",
"that",
"will",
"be",
"read",
"by",
"Sam",
"s",
"version",
"of",
"inimod",
".",
"F90",
"in",
"order",
"to",
"make",
"an",
"initial",
"model",
"for",
"LEAFS"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L692-L744 | train | 41,817 |
NuGrid/NuGridPy | nugridpy/mesa.py | mesa_profile.energy_profile | def energy_profile(self,ixaxis):
"""
Plot radial profile of key energy generations eps_nuc,
eps_neu etc.
Parameters
----------
ixaxis : 'mass' or 'radius'
"""
mass = self.get('mass')
radius = self.get('radius') * ast.rsun_cm
eps_nuc = self.get('eps_nuc')
eps_neu = self.get('non_nuc_neu')
if ixaxis == 'mass':
xaxis = mass
xlab = 'Mass / M$_\odot$'
else:
xaxis = old_div(radius, 1.e8) # Mm
xlab = 'Radius (Mm)'
pl.plot(xaxis, np.log10(eps_nuc),
'k-',
label='$\epsilon_\mathrm{nuc}>0$')
pl.plot(xaxis, np.log10(-eps_nuc),
'k--',
label='$\epsilon_\mathrm{nuc}<0$')
pl.plot(xaxis, np.log10(eps_neu),
'r-',
label='$\epsilon_\\nu$')
pl.xlabel(xlab)
pl.ylabel('$\log(\epsilon_\mathrm{nuc},\epsilon_\\nu)$')
pl.legend(loc='best').draw_frame(False) | python | def energy_profile(self,ixaxis):
"""
Plot radial profile of key energy generations eps_nuc,
eps_neu etc.
Parameters
----------
ixaxis : 'mass' or 'radius'
"""
mass = self.get('mass')
radius = self.get('radius') * ast.rsun_cm
eps_nuc = self.get('eps_nuc')
eps_neu = self.get('non_nuc_neu')
if ixaxis == 'mass':
xaxis = mass
xlab = 'Mass / M$_\odot$'
else:
xaxis = old_div(radius, 1.e8) # Mm
xlab = 'Radius (Mm)'
pl.plot(xaxis, np.log10(eps_nuc),
'k-',
label='$\epsilon_\mathrm{nuc}>0$')
pl.plot(xaxis, np.log10(-eps_nuc),
'k--',
label='$\epsilon_\mathrm{nuc}<0$')
pl.plot(xaxis, np.log10(eps_neu),
'r-',
label='$\epsilon_\\nu$')
pl.xlabel(xlab)
pl.ylabel('$\log(\epsilon_\mathrm{nuc},\epsilon_\\nu)$')
pl.legend(loc='best').draw_frame(False) | [
"def",
"energy_profile",
"(",
"self",
",",
"ixaxis",
")",
":",
"mass",
"=",
"self",
".",
"get",
"(",
"'mass'",
")",
"radius",
"=",
"self",
".",
"get",
"(",
"'radius'",
")",
"*",
"ast",
".",
"rsun_cm",
"eps_nuc",
"=",
"self",
".",
"get",
"(",
"'eps_... | Plot radial profile of key energy generations eps_nuc,
eps_neu etc.
Parameters
----------
ixaxis : 'mass' or 'radius' | [
"Plot",
"radial",
"profile",
"of",
"key",
"energy",
"generations",
"eps_nuc",
"eps_neu",
"etc",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L746-L780 | train | 41,818 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data._read_starlog | def _read_starlog(self):
""" read history.data or star.log file again"""
sldir = self.sldir
slname = self.slname
slaname = slname+'sa'
if not os.path.exists(sldir+'/'+slaname):
print('No '+self.slname+'sa file found, create new one from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
if self.clean_starlog:
print('Requested new '+self.slname+'sa; create new from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
print('Using old '+self.slname+'sa file ...')
cmd=os.popen('wc '+sldir+'/'+slaname)
cmd_out=cmd.readline()
cnum_cycles=cmd_out.split()[0]
num_cycles=int(cnum_cycles) - 6
filename=sldir+'/'+slaname
header_attr,cols,data = _read_mesafile(filename,data_rows=num_cycles)
self.cols = cols
self.header_attr = header_attr
self.data = data | python | def _read_starlog(self):
""" read history.data or star.log file again"""
sldir = self.sldir
slname = self.slname
slaname = slname+'sa'
if not os.path.exists(sldir+'/'+slaname):
print('No '+self.slname+'sa file found, create new one from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
if self.clean_starlog:
print('Requested new '+self.slname+'sa; create new from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
print('Using old '+self.slname+'sa file ...')
cmd=os.popen('wc '+sldir+'/'+slaname)
cmd_out=cmd.readline()
cnum_cycles=cmd_out.split()[0]
num_cycles=int(cnum_cycles) - 6
filename=sldir+'/'+slaname
header_attr,cols,data = _read_mesafile(filename,data_rows=num_cycles)
self.cols = cols
self.header_attr = header_attr
self.data = data | [
"def",
"_read_starlog",
"(",
"self",
")",
":",
"sldir",
"=",
"self",
".",
"sldir",
"slname",
"=",
"self",
".",
"slname",
"slaname",
"=",
"slname",
"+",
"'sa'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sldir",
"+",
"'/'",
"+",
"slaname",
... | read history.data or star.log file again | [
"read",
"history",
".",
"data",
"or",
"star",
".",
"log",
"file",
"again"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L898-L926 | train | 41,819 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.hrd | def hrd(self,ifig=None,label=None,colour=None,s2ms=False,
dashes=None,**kwargs):
"""
Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
# fsize=18
#
# params = {'axes.labelsize': fsize,
# # 'font.family': 'serif',
# 'font.family': 'Times New Roman',
# 'figure.facecolor': 'white',
# 'text.fontsize': fsize,
# 'legend.fontsize': fsize,
# 'xtick.labelsize': fsize*0.8,
# 'ytick.labelsize': fsize*0.8,
# 'text.usetex': False}
#
# try:
# pl.rcParams.update(params)
# except:
# pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
x = self.get('log_Teff')[skip:]
y = self.get('log_L')[skip:]
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour,**kwargs)
else:
line,=pl.plot(x,y,label=label,**kwargs)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour,**kwargs)
else:
line,=pl.plot(x,y,**kwargs)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
# pyl.plot(self.data[:,self.cols['log_Teff']-1],\
# self.data[:,self.cols['log_L']-1],\
# label = "M="+str(self.header_attr['initial_mass'])+", Z="\
# +str(self.header_attr['initial_z']))
pyl.xlabel('$\log T_{\\rm eff}$')
pyl.ylabel('$\log L$')
x1,x2=pl.xlim()
if x2 > x1:
ax=pl.gca()
ax.invert_xaxis() | python | def hrd(self,ifig=None,label=None,colour=None,s2ms=False,
dashes=None,**kwargs):
"""
Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
# fsize=18
#
# params = {'axes.labelsize': fsize,
# # 'font.family': 'serif',
# 'font.family': 'Times New Roman',
# 'figure.facecolor': 'white',
# 'text.fontsize': fsize,
# 'legend.fontsize': fsize,
# 'xtick.labelsize': fsize*0.8,
# 'ytick.labelsize': fsize*0.8,
# 'text.usetex': False}
#
# try:
# pl.rcParams.update(params)
# except:
# pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
x = self.get('log_Teff')[skip:]
y = self.get('log_L')[skip:]
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour,**kwargs)
else:
line,=pl.plot(x,y,label=label,**kwargs)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour,**kwargs)
else:
line,=pl.plot(x,y,**kwargs)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
# pyl.plot(self.data[:,self.cols['log_Teff']-1],\
# self.data[:,self.cols['log_L']-1],\
# label = "M="+str(self.header_attr['initial_mass'])+", Z="\
# +str(self.header_attr['initial_z']))
pyl.xlabel('$\log T_{\\rm eff}$')
pyl.ylabel('$\log L$')
x1,x2=pl.xlim()
if x2 > x1:
ax=pl.gca()
ax.invert_xaxis() | [
"def",
"hrd",
"(",
"self",
",",
"ifig",
"=",
"None",
",",
"label",
"=",
"None",
",",
"colour",
"=",
"None",
",",
"s2ms",
"=",
"False",
",",
"dashes",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# fsize=18",
"#",
"# params = {'axes.la... | Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None. | [
"Plot",
"an",
"HR",
"diagram"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L973-L1056 | train | 41,820 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.hrd_key | def hrd_key(self, key_str):
"""
plot an HR diagram
Parameters
----------
key_str : string
A label string
"""
pyl.plot(self.data[:,self.cols['log_Teff']-1],\
self.data[:,self.cols['log_L']-1],label = key_str)
pyl.legend()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
x1,x2=pl.xlim()
if x2 > x1:
self._xlimrev() | python | def hrd_key(self, key_str):
"""
plot an HR diagram
Parameters
----------
key_str : string
A label string
"""
pyl.plot(self.data[:,self.cols['log_Teff']-1],\
self.data[:,self.cols['log_L']-1],label = key_str)
pyl.legend()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
x1,x2=pl.xlim()
if x2 > x1:
self._xlimrev() | [
"def",
"hrd_key",
"(",
"self",
",",
"key_str",
")",
":",
"pyl",
".",
"plot",
"(",
"self",
".",
"data",
"[",
":",
",",
"self",
".",
"cols",
"[",
"'log_Teff'",
"]",
"-",
"1",
"]",
",",
"self",
".",
"data",
"[",
":",
",",
"self",
".",
"cols",
"[... | plot an HR diagram
Parameters
----------
key_str : string
A label string | [
"plot",
"an",
"HR",
"diagram"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1059-L1077 | train | 41,821 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.hrd_new | def hrd_new(self, input_label="", skip=0):
"""
plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0.
"""
xl_old=pyl.gca().get_xlim()
if input_label == "":
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])
else:
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label)
pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label)
pyl.legend(loc=0)
xl_new=pyl.gca().get_xlim()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
if any(array(xl_old)==0):
pyl.gca().set_xlim(max(xl_new),min(xl_new))
elif any(array(xl_new)==0):
pyl.gca().set_xlim(max(xl_old),min(xl_old))
else:
pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)]) | python | def hrd_new(self, input_label="", skip=0):
"""
plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0.
"""
xl_old=pyl.gca().get_xlim()
if input_label == "":
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])
else:
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label)
pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label)
pyl.legend(loc=0)
xl_new=pyl.gca().get_xlim()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
if any(array(xl_old)==0):
pyl.gca().set_xlim(max(xl_new),min(xl_new))
elif any(array(xl_new)==0):
pyl.gca().set_xlim(max(xl_old),min(xl_old))
else:
pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)]) | [
"def",
"hrd_new",
"(",
"self",
",",
"input_label",
"=",
"\"\"",
",",
"skip",
"=",
"0",
")",
":",
"xl_old",
"=",
"pyl",
".",
"gca",
"(",
")",
".",
"get_xlim",
"(",
")",
"if",
"input_label",
"==",
"\"\"",
":",
"my_label",
"=",
"\"M=\"",
"+",
"str",
... | plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0. | [
"plot",
"an",
"HR",
"diagram",
"with",
"options",
"to",
"skip",
"the",
"first",
"N",
"lines",
"and",
"add",
"a",
"label",
"string"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1079-L1108 | train | 41,822 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.xche4_teff | def xche4_teff(self,ifig=None,lims=[1.,0.,3.4,4.7],label=None,colour=None,
s2ms=True,dashes=None):
"""
Plot effective temperature against central helium abundance.
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"
The default is True
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
fsize=18
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
try:
pl.rcParams.update(params)
except:
pass
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=1.e-3)[0][0]
skip=idx
else:
skip=0
x = self.get('center_he4')[skip:]
y = self.get('log_Teff')[skip:]
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour)
else:
line,=pl.plot(x,y,label=label)
pl.legend(loc='best').draw_frame(False)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour)
else:
line,=pl.plot(x,y)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.xlabel('$X_{\\rm c}(\,^4{\\rm He}\,)$')
pl.ylabel('$\log\,T_{\\rm eff}$') | python | def xche4_teff(self,ifig=None,lims=[1.,0.,3.4,4.7],label=None,colour=None,
s2ms=True,dashes=None):
"""
Plot effective temperature against central helium abundance.
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"
The default is True
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
fsize=18
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
try:
pl.rcParams.update(params)
except:
pass
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=1.e-3)[0][0]
skip=idx
else:
skip=0
x = self.get('center_he4')[skip:]
y = self.get('log_Teff')[skip:]
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour)
else:
line,=pl.plot(x,y,label=label)
pl.legend(loc='best').draw_frame(False)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour)
else:
line,=pl.plot(x,y)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.xlabel('$X_{\\rm c}(\,^4{\\rm He}\,)$')
pl.ylabel('$\log\,T_{\\rm eff}$') | [
"def",
"xche4_teff",
"(",
"self",
",",
"ifig",
"=",
"None",
",",
"lims",
"=",
"[",
"1.",
",",
"0.",
",",
"3.4",
",",
"4.7",
"]",
",",
"label",
"=",
"None",
",",
"colour",
"=",
"None",
",",
"s2ms",
"=",
"True",
",",
"dashes",
"=",
"None",
")",
... | Plot effective temperature against central helium abundance.
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"
The default is True
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None. | [
"Plot",
"effective",
"temperature",
"against",
"central",
"helium",
"abundance",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1110-L1184 | train | 41,823 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.tcrhoc | def tcrhoc(self,ifig=None,lims=[3.,10.,8.,10.],label=None,colour=None,
dashes=None):
"""
Central temperature again central density plot
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
# fsize=18
#
# params = {'axes.labelsize': fsize,
# # 'font.family': 'serif',
# 'font.family': 'Times New Roman',
# 'figure.facecolor': 'white',
# 'text.fontsize': fsize,
# 'legend.fontsize': fsize,
# 'xtick.labelsize': fsize*0.8,
# 'ytick.labelsize': fsize*0.8,
# 'text.usetex': False}
#
# try:
# pl.rcParams.update(params)
# except:
# pass
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),label=label,
color=colour)
else:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),label=label)
else:
if colour is not None:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),
color=colour)
else:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'))
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.xlabel('log $\\rho_{\\rm c}$')
pl.ylabel('log $T_{\\rm c}$') | python | def tcrhoc(self,ifig=None,lims=[3.,10.,8.,10.],label=None,colour=None,
dashes=None):
"""
Central temperature again central density plot
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
# fsize=18
#
# params = {'axes.labelsize': fsize,
# # 'font.family': 'serif',
# 'font.family': 'Times New Roman',
# 'figure.facecolor': 'white',
# 'text.fontsize': fsize,
# 'legend.fontsize': fsize,
# 'xtick.labelsize': fsize*0.8,
# 'ytick.labelsize': fsize*0.8,
# 'text.usetex': False}
#
# try:
# pl.rcParams.update(params)
# except:
# pass
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),label=label,
color=colour)
else:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),label=label)
else:
if colour is not None:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),
color=colour)
else:
line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'))
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.xlabel('log $\\rho_{\\rm c}$')
pl.ylabel('log $T_{\\rm c}$') | [
"def",
"tcrhoc",
"(",
"self",
",",
"ifig",
"=",
"None",
",",
"lims",
"=",
"[",
"3.",
",",
"10.",
",",
"8.",
",",
"10.",
"]",
",",
"label",
"=",
"None",
",",
"colour",
"=",
"None",
",",
"dashes",
"=",
"None",
")",
":",
"# fsize=18",
"#",
... | Central temperature again central density plot
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None. | [
"Central",
"temperature",
"again",
"central",
"density",
"plot"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1187-L1251 | train | 41,824 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.mdot_t | def mdot_t(self,ifig=None,lims=[7.4,2.6,-8.5,-4.5],label=None,colour=None,s2ms=False,
dashes=None):
"""
Plot mass loss history as a function of log-time-left
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"skip to main sequence"
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
fsize=18
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
try:
pl.rcParams.update(params)
except:
pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
gage= self.get('star_age')
lage=np.zeros(len(gage))
agemin = max(old_div(abs(gage[-1]-gage[-2]),5.),1.e-10)
for i in np.arange(len(gage)):
if gage[-1]-gage[i]>agemin:
lage[i]=np.log10(gage[-1]-gage[i]+agemin)
else :
lage[i]=np.log10(agemin)
x = lage[skip:]
y = self.get('log_abs_mdot')[skip:]
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour)
else:
line,=pl.plot(x,y,label=label)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour)
else:
line,=pl.plot(x,y)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.ylabel('$\mathrm{log}_{10}(\|\dot{M}\|/M_\odot\,\mathrm{yr}^{-1})$')
pl.xlabel('$\mathrm{log}_{10}(t^*/\mathrm{yr})$') | python | def mdot_t(self,ifig=None,lims=[7.4,2.6,-8.5,-4.5],label=None,colour=None,s2ms=False,
dashes=None):
"""
Plot mass loss history as a function of log-time-left
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"skip to main sequence"
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
fsize=18
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
try:
pl.rcParams.update(params)
except:
pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
gage= self.get('star_age')
lage=np.zeros(len(gage))
agemin = max(old_div(abs(gage[-1]-gage[-2]),5.),1.e-10)
for i in np.arange(len(gage)):
if gage[-1]-gage[i]>agemin:
lage[i]=np.log10(gage[-1]-gage[i]+agemin)
else :
lage[i]=np.log10(agemin)
x = lage[skip:]
y = self.get('log_abs_mdot')[skip:]
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour)
else:
line,=pl.plot(x,y,label=label)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour)
else:
line,=pl.plot(x,y)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.ylabel('$\mathrm{log}_{10}(\|\dot{M}\|/M_\odot\,\mathrm{yr}^{-1})$')
pl.xlabel('$\mathrm{log}_{10}(t^*/\mathrm{yr})$') | [
"def",
"mdot_t",
"(",
"self",
",",
"ifig",
"=",
"None",
",",
"lims",
"=",
"[",
"7.4",
",",
"2.6",
",",
"-",
"8.5",
",",
"-",
"4.5",
"]",
",",
"label",
"=",
"None",
",",
"colour",
"=",
"None",
",",
"s2ms",
"=",
"False",
",",
"dashes",
"=",
"No... | Plot mass loss history as a function of log-time-left
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"skip to main sequence"
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None. | [
"Plot",
"mass",
"loss",
"history",
"as",
"a",
"function",
"of",
"log",
"-",
"time",
"-",
"left"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1253-L1337 | train | 41,825 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.t_surfabu | def t_surfabu(self, num_frame, xax, t0_model=0,
title='surface abundance', t_eps=1.e-3,
plot_CO_ratio=False):
"""
t_surfabu plots surface abundance evolution as a function of
time.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into, if <0 don't open
figure.
xax : string
Either model, time or logrevtime to indicate what is to be
used on the x-axis.
t0_model : integer, optional
Model for the zero point in time, for AGB plots this would
be usually the model of the 1st TP, which can be found with
the Kippenhahn plot. The default is 0.
title : string, optional
Figure title. The default is "surface abundance".
t_eps : float, optional
Time eps at end for logrevtime. The default is 1.e-3.
plot_CO_ratio : boolean, optional
On second axis True/False. The default is False.
"""
if num_frame >= 0:
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')[t0_model:]
elif xax == 'model':
xaxisarray = self.get('model_number')[t0_model:]
elif xax == 'logrevtime':
xaxisarray = self.get('star_age')
xaxisarray=np.log10(max(xaxisarray[t0_model:])+t_eps-xaxisarray[t0_model:])
else:
print('t-surfabu error: invalid string for x-axis selction.'+ \
' needs to be "time" or "model"')
star_mass = self.get('star_mass')
surface_c12 = self.get('surface_c12')
surface_c13 = self.get('surface_c13')
surface_n14 = self.get('surface_n14')
surface_o16 = self.get('surface_o16')
target_n14 = -3.5
COratio=old_div((surface_c12*4.),(surface_o16*3.))
t0_mod=xaxisarray[t0_model]
log10_c12=np.log10(surface_c12[t0_model:])
symbs=['k:','-','--','-.','b:','-','--','k-.',':','-','--','-.']
pyl.plot(xaxisarray,log10_c12,\
symbs[0],label='$^{12}\mathrm{C}$')
pyl.plot(xaxisarray,np.log10(surface_c13[t0_model:]),\
symbs[1],label='$^{13}\mathrm{C}$')
pyl.plot(xaxisarray,np.log10(surface_n14[t0_model:]),\
symbs[2],label='$^{14}\mathrm{N}$')
pyl.plot(xaxisarray,np.log10(surface_o16[t0_model:]),\
symbs[3],label='$^{16}\mathrm{O}$')
# pyl.plot([min(xaxisarray[t0_model:]-t0_mod),max(xaxisarray[t0_model:]-t0_mod)],[target_n14,target_n14])
pyl.ylabel('mass fraction $\log X$')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
elif xax == 'logrevtime':
pyl.xlabel('$\\log t-tfinal$')
if plot_CO_ratio:
pyl.twinx()
pyl.plot(xaxisarray,COratio[t0_model:],'-k',label='CO ratio')
pyl.ylabel('C/O ratio')
pyl.legend(loc=4)
pyl.title(title)
if xax == 'logrevtime':
self._xlimrev() | python | def t_surfabu(self, num_frame, xax, t0_model=0,
title='surface abundance', t_eps=1.e-3,
plot_CO_ratio=False):
"""
t_surfabu plots surface abundance evolution as a function of
time.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into, if <0 don't open
figure.
xax : string
Either model, time or logrevtime to indicate what is to be
used on the x-axis.
t0_model : integer, optional
Model for the zero point in time, for AGB plots this would
be usually the model of the 1st TP, which can be found with
the Kippenhahn plot. The default is 0.
title : string, optional
Figure title. The default is "surface abundance".
t_eps : float, optional
Time eps at end for logrevtime. The default is 1.e-3.
plot_CO_ratio : boolean, optional
On second axis True/False. The default is False.
"""
if num_frame >= 0:
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')[t0_model:]
elif xax == 'model':
xaxisarray = self.get('model_number')[t0_model:]
elif xax == 'logrevtime':
xaxisarray = self.get('star_age')
xaxisarray=np.log10(max(xaxisarray[t0_model:])+t_eps-xaxisarray[t0_model:])
else:
print('t-surfabu error: invalid string for x-axis selction.'+ \
' needs to be "time" or "model"')
star_mass = self.get('star_mass')
surface_c12 = self.get('surface_c12')
surface_c13 = self.get('surface_c13')
surface_n14 = self.get('surface_n14')
surface_o16 = self.get('surface_o16')
target_n14 = -3.5
COratio=old_div((surface_c12*4.),(surface_o16*3.))
t0_mod=xaxisarray[t0_model]
log10_c12=np.log10(surface_c12[t0_model:])
symbs=['k:','-','--','-.','b:','-','--','k-.',':','-','--','-.']
pyl.plot(xaxisarray,log10_c12,\
symbs[0],label='$^{12}\mathrm{C}$')
pyl.plot(xaxisarray,np.log10(surface_c13[t0_model:]),\
symbs[1],label='$^{13}\mathrm{C}$')
pyl.plot(xaxisarray,np.log10(surface_n14[t0_model:]),\
symbs[2],label='$^{14}\mathrm{N}$')
pyl.plot(xaxisarray,np.log10(surface_o16[t0_model:]),\
symbs[3],label='$^{16}\mathrm{O}$')
# pyl.plot([min(xaxisarray[t0_model:]-t0_mod),max(xaxisarray[t0_model:]-t0_mod)],[target_n14,target_n14])
pyl.ylabel('mass fraction $\log X$')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
elif xax == 'logrevtime':
pyl.xlabel('$\\log t-tfinal$')
if plot_CO_ratio:
pyl.twinx()
pyl.plot(xaxisarray,COratio[t0_model:],'-k',label='CO ratio')
pyl.ylabel('C/O ratio')
pyl.legend(loc=4)
pyl.title(title)
if xax == 'logrevtime':
self._xlimrev() | [
"def",
"t_surfabu",
"(",
"self",
",",
"num_frame",
",",
"xax",
",",
"t0_model",
"=",
"0",
",",
"title",
"=",
"'surface abundance'",
",",
"t_eps",
"=",
"1.e-3",
",",
"plot_CO_ratio",
"=",
"False",
")",
":",
"if",
"num_frame",
">=",
"0",
":",
"pyl",
".",... | t_surfabu plots surface abundance evolution as a function of
time.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into, if <0 don't open
figure.
xax : string
Either model, time or logrevtime to indicate what is to be
used on the x-axis.
t0_model : integer, optional
Model for the zero point in time, for AGB plots this would
be usually the model of the 1st TP, which can be found with
the Kippenhahn plot. The default is 0.
title : string, optional
Figure title. The default is "surface abundance".
t_eps : float, optional
Time eps at end for logrevtime. The default is 1.e-3.
plot_CO_ratio : boolean, optional
On second axis True/False. The default is False. | [
"t_surfabu",
"plots",
"surface",
"abundance",
"evolution",
"as",
"a",
"function",
"of",
"time",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1660-L1742 | train | 41,826 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.t_lumi | def t_lumi(self,num_frame,xax):
"""
Luminosity evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logLH = self.get('log_LH')
logLHe = self.get('log_LHe')
pyl.plot(xaxisarray,logLH,label='L_(H)')
pyl.plot(xaxisarray,logLHe,label='L(He)')
pyl.ylabel('log L')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number') | python | def t_lumi(self,num_frame,xax):
"""
Luminosity evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logLH = self.get('log_LH')
logLHe = self.get('log_LHe')
pyl.plot(xaxisarray,logLH,label='L_(H)')
pyl.plot(xaxisarray,logLHe,label='L(He)')
pyl.ylabel('log L')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number') | [
"def",
"t_lumi",
"(",
"self",
",",
"num_frame",
",",
"xax",
")",
":",
"pyl",
".",
"figure",
"(",
"num_frame",
")",
"if",
"xax",
"==",
"'time'",
":",
"xaxisarray",
"=",
"self",
".",
"get",
"(",
"'star_age'",
")",
"elif",
"xax",
"==",
"'model'",
":",
... | Luminosity evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis | [
"Luminosity",
"evolution",
"as",
"a",
"function",
"of",
"time",
"or",
"model",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1747-L1783 | train | 41,827 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.t_surf_parameter | def t_surf_parameter(self, num_frame, xax):
"""
Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logL = self.get('log_L')
logTeff = self.get('log_Teff')
pyl.plot(xaxisarray,logL,'-k',label='log L')
pyl.plot(xaxisarray,logTeff,'-k',label='log Teff')
pyl.ylabel('log L, log Teff')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number') | python | def t_surf_parameter(self, num_frame, xax):
"""
Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logL = self.get('log_L')
logTeff = self.get('log_Teff')
pyl.plot(xaxisarray,logL,'-k',label='log L')
pyl.plot(xaxisarray,logTeff,'-k',label='log Teff')
pyl.ylabel('log L, log Teff')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number') | [
"def",
"t_surf_parameter",
"(",
"self",
",",
"num_frame",
",",
"xax",
")",
":",
"pyl",
".",
"figure",
"(",
"num_frame",
")",
"if",
"xax",
"==",
"'time'",
":",
"xaxisarray",
"=",
"self",
".",
"get",
"(",
"'star_age'",
")",
"elif",
"xax",
"==",
"'model'"... | Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis | [
"Surface",
"parameter",
"evolution",
"as",
"a",
"function",
"of",
"time",
"or",
"model",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1785-L1821 | train | 41,828 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.find_first_TP | def find_first_TP(self):
"""
Find first TP of the TPAGB phase and returns the model
number at its LHe maximum.
Parameters
----------
"""
star_mass = self.get('star_mass')
he_lumi = self.get('log_LHe')
h_lumi = self.get('log_LH')
mx2_bot = self.get('mx2_bot')*star_mass
try:
h1_boundary_mass = self.get('h1_boundary_mass')
he4_boundary_mass = self.get('he4_boundary_mass')
except:
try:
h1_boundary_mass = self.get('he_core_mass')
he4_boundary_mass = self.get('c_core_mass')
except:
pass
TP_bot=np.array(self.get('conv_mx2_bot'))*np.array(self.get('star_mass'))
TP_top=np.array(self.get('conv_mx2_top'))*np.array(self.get('star_mass'))
lum_array=[]
activate=False
models=[]
pdcz_size=[]
for i in range(len(h1_boundary_mass)):
if (h1_boundary_mass[i]-he4_boundary_mass[i] <0.2) and (he4_boundary_mass[i]>0.2):
if (mx2_bot[i]>he4_boundary_mass[i]) and (he_lumi[i]>h_lumi[i]):
if TP_top[i]>he4_boundary_mass[i]:
pdcz_size.append(TP_top[i]-TP_bot[i])
activate=True
lum_array.append(he_lumi[i])
models.append(i)
#print(TP_bot[i],TP_top[i])
if (activate == True) and (he_lumi[i]<h_lumi[i]):
#if fake tp
if max(pdcz_size)<1e-5:
active=False
lum_array=[]
models=[]
print('fake tp')
else:
break
t0_model = models[np.argmax(lum_array)]
return t0_model | python | def find_first_TP(self):
"""
Find first TP of the TPAGB phase and returns the model
number at its LHe maximum.
Parameters
----------
"""
star_mass = self.get('star_mass')
he_lumi = self.get('log_LHe')
h_lumi = self.get('log_LH')
mx2_bot = self.get('mx2_bot')*star_mass
try:
h1_boundary_mass = self.get('h1_boundary_mass')
he4_boundary_mass = self.get('he4_boundary_mass')
except:
try:
h1_boundary_mass = self.get('he_core_mass')
he4_boundary_mass = self.get('c_core_mass')
except:
pass
TP_bot=np.array(self.get('conv_mx2_bot'))*np.array(self.get('star_mass'))
TP_top=np.array(self.get('conv_mx2_top'))*np.array(self.get('star_mass'))
lum_array=[]
activate=False
models=[]
pdcz_size=[]
for i in range(len(h1_boundary_mass)):
if (h1_boundary_mass[i]-he4_boundary_mass[i] <0.2) and (he4_boundary_mass[i]>0.2):
if (mx2_bot[i]>he4_boundary_mass[i]) and (he_lumi[i]>h_lumi[i]):
if TP_top[i]>he4_boundary_mass[i]:
pdcz_size.append(TP_top[i]-TP_bot[i])
activate=True
lum_array.append(he_lumi[i])
models.append(i)
#print(TP_bot[i],TP_top[i])
if (activate == True) and (he_lumi[i]<h_lumi[i]):
#if fake tp
if max(pdcz_size)<1e-5:
active=False
lum_array=[]
models=[]
print('fake tp')
else:
break
t0_model = models[np.argmax(lum_array)]
return t0_model | [
"def",
"find_first_TP",
"(",
"self",
")",
":",
"star_mass",
"=",
"self",
".",
"get",
"(",
"'star_mass'",
")",
"he_lumi",
"=",
"self",
".",
"get",
"(",
"'log_LHe'",
")",
"h_lumi",
"=",
"self",
".",
"get",
"(",
"'log_LH'",
")",
"mx2_bot",
"=",
"self",
... | Find first TP of the TPAGB phase and returns the model
number at its LHe maximum.
Parameters
---------- | [
"Find",
"first",
"TP",
"of",
"the",
"TPAGB",
"phase",
"and",
"returns",
"the",
"model",
"number",
"at",
"its",
"LHe",
"maximum",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3150-L3201 | train | 41,829 |
NuGrid/NuGridPy | nugridpy/mesa.py | history_data.calc_DUP_parameter | def calc_DUP_parameter(self, modeln, label, fig=10, color='r', marker_type='*',
h_core_mass=False):
"""
Method to calculate the DUP parameter evolution for different
TPs specified specified by their model number.
Parameters
----------
fig : integer
Figure number to plot.
modeln : list
Array containing pairs of models each corresponding to a
TP. First model where h boundary mass will be taken before
DUP, second model where DUP reaches lowest mass.
leg : string
Plot label.
color : string
Color of the plot.
marker_type : string
marker type.
h_core_mass : boolean, optional
If True: plot dependence from h free core , else star mass.
The default is False.
"""
number_DUP=(old_div(len(modeln),2) -1) #START WITH SECOND
try:
h1_bnd_m=self.get('h1_boundary_mass')
except:
try:
h1_bnd_m=self.get('he_core_mass')
except:
pass
star_mass=self.get('star_mass')
age=self.get("star_age")
firstTP=h1_bnd_m[modeln[0]]
first_m_dredge=h1_bnd_m[modeln[1]]
DUP_parameter=np.zeros(number_DUP)
DUP_xaxis=np.zeros(number_DUP)
j=0
for i in np.arange(2,len(modeln),2):
TP=h1_bnd_m[modeln[i]]
m_dredge=h1_bnd_m[modeln[i+1]]
if i ==2:
last_m_dredge=first_m_dredge
#print "testest"
#print modeln[i]
if h_core_mass==True:
DUP_xaxis[j]=h1_bnd_m[modeln[i]] #age[modeln[i]] - age[modeln[0]]
else:
DUP_xaxis[j]=star_mass[modeln[i]]
#DUP_xaxis[j]=modeln[i]
DUP_parameter[j]=old_div((TP-m_dredge),(TP-last_m_dredge))
last_m_dredge=m_dredge
j+=1
pl.figure(fig)
pl.rcParams.update({'font.size': 18})
pl.rc('xtick', labelsize=18)
pl.rc('ytick', labelsize=18)
pl.plot(DUP_xaxis,DUP_parameter,marker=marker_type,markersize=12,mfc=color,color='k',linestyle='-',label=label)
if h_core_mass==True:
pl.xlabel("$M_H$",fontsize=20)
else:
pl.xlabel("M/M$_{\odot}$",fontsize=24)
pl.ylabel("$\lambda_{DUP}$",fontsize=24)
pl.minorticks_on()
pl.legend() | python | def calc_DUP_parameter(self, modeln, label, fig=10, color='r', marker_type='*',
h_core_mass=False):
"""
Method to calculate the DUP parameter evolution for different
TPs specified specified by their model number.
Parameters
----------
fig : integer
Figure number to plot.
modeln : list
Array containing pairs of models each corresponding to a
TP. First model where h boundary mass will be taken before
DUP, second model where DUP reaches lowest mass.
leg : string
Plot label.
color : string
Color of the plot.
marker_type : string
marker type.
h_core_mass : boolean, optional
If True: plot dependence from h free core , else star mass.
The default is False.
"""
number_DUP=(old_div(len(modeln),2) -1) #START WITH SECOND
try:
h1_bnd_m=self.get('h1_boundary_mass')
except:
try:
h1_bnd_m=self.get('he_core_mass')
except:
pass
star_mass=self.get('star_mass')
age=self.get("star_age")
firstTP=h1_bnd_m[modeln[0]]
first_m_dredge=h1_bnd_m[modeln[1]]
DUP_parameter=np.zeros(number_DUP)
DUP_xaxis=np.zeros(number_DUP)
j=0
for i in np.arange(2,len(modeln),2):
TP=h1_bnd_m[modeln[i]]
m_dredge=h1_bnd_m[modeln[i+1]]
if i ==2:
last_m_dredge=first_m_dredge
#print "testest"
#print modeln[i]
if h_core_mass==True:
DUP_xaxis[j]=h1_bnd_m[modeln[i]] #age[modeln[i]] - age[modeln[0]]
else:
DUP_xaxis[j]=star_mass[modeln[i]]
#DUP_xaxis[j]=modeln[i]
DUP_parameter[j]=old_div((TP-m_dredge),(TP-last_m_dredge))
last_m_dredge=m_dredge
j+=1
pl.figure(fig)
pl.rcParams.update({'font.size': 18})
pl.rc('xtick', labelsize=18)
pl.rc('ytick', labelsize=18)
pl.plot(DUP_xaxis,DUP_parameter,marker=marker_type,markersize=12,mfc=color,color='k',linestyle='-',label=label)
if h_core_mass==True:
pl.xlabel("$M_H$",fontsize=20)
else:
pl.xlabel("M/M$_{\odot}$",fontsize=24)
pl.ylabel("$\lambda_{DUP}$",fontsize=24)
pl.minorticks_on()
pl.legend() | [
"def",
"calc_DUP_parameter",
"(",
"self",
",",
"modeln",
",",
"label",
",",
"fig",
"=",
"10",
",",
"color",
"=",
"'r'",
",",
"marker_type",
"=",
"'*'",
",",
"h_core_mass",
"=",
"False",
")",
":",
"number_DUP",
"=",
"(",
"old_div",
"(",
"len",
"(",
"m... | Method to calculate the DUP parameter evolution for different
TPs specified specified by their model number.
Parameters
----------
fig : integer
Figure number to plot.
modeln : list
Array containing pairs of models each corresponding to a
TP. First model where h boundary mass will be taken before
DUP, second model where DUP reaches lowest mass.
leg : string
Plot label.
color : string
Color of the plot.
marker_type : string
marker type.
h_core_mass : boolean, optional
If True: plot dependence from h free core , else star mass.
The default is False. | [
"Method",
"to",
"calculate",
"the",
"DUP",
"parameter",
"evolution",
"for",
"different",
"TPs",
"specified",
"specified",
"by",
"their",
"model",
"number",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3641-L3709 | train | 41,830 |
openstack/proliantutils | proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py | _create_usm_user_obj | def _create_usm_user_obj(snmp_cred):
"""Creates the UsmUserData obj for the given credentials.
This method creates an instance for the method hlapi.UsmUserData.
The UsmUserData() allows the 'auth_protocol' and 'priv_protocol'
to be undefined by user if their pass phrases are provided.
:param snmp_cred: Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns UsmUserData object as per given credentials.
"""
auth_protocol = snmp_cred.get('auth_protocol')
priv_protocol = snmp_cred.get('priv_protocol')
auth_user = snmp_cred.get('auth_user')
auth_prot_pp = snmp_cred.get('auth_prot_pp')
auth_priv_pp = snmp_cred.get('auth_priv_pp')
if ((not auth_protocol) and priv_protocol):
priv_protocol = (
MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
privProtocol=priv_protocol)
elif ((not priv_protocol) and auth_protocol):
auth_protocol = (
MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
authProtocol=auth_protocol)
elif not all([priv_protocol and auth_protocol]):
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp)
else:
auth_protocol = (
MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])
priv_protocol = (
MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
authProtocol=auth_protocol,
privProtocol=priv_protocol)
return usm_user_obj | python | def _create_usm_user_obj(snmp_cred):
"""Creates the UsmUserData obj for the given credentials.
This method creates an instance for the method hlapi.UsmUserData.
The UsmUserData() allows the 'auth_protocol' and 'priv_protocol'
to be undefined by user if their pass phrases are provided.
:param snmp_cred: Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns UsmUserData object as per given credentials.
"""
auth_protocol = snmp_cred.get('auth_protocol')
priv_protocol = snmp_cred.get('priv_protocol')
auth_user = snmp_cred.get('auth_user')
auth_prot_pp = snmp_cred.get('auth_prot_pp')
auth_priv_pp = snmp_cred.get('auth_priv_pp')
if ((not auth_protocol) and priv_protocol):
priv_protocol = (
MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
privProtocol=priv_protocol)
elif ((not priv_protocol) and auth_protocol):
auth_protocol = (
MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
authProtocol=auth_protocol)
elif not all([priv_protocol and auth_protocol]):
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp)
else:
auth_protocol = (
MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])
priv_protocol = (
MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
authProtocol=auth_protocol,
privProtocol=priv_protocol)
return usm_user_obj | [
"def",
"_create_usm_user_obj",
"(",
"snmp_cred",
")",
":",
"auth_protocol",
"=",
"snmp_cred",
".",
"get",
"(",
"'auth_protocol'",
")",
"priv_protocol",
"=",
"snmp_cred",
".",
"get",
"(",
"'priv_protocol'",
")",
"auth_user",
"=",
"snmp_cred",
".",
"get",
"(",
"... | Creates the UsmUserData obj for the given credentials.
This method creates an instance for the method hlapi.UsmUserData.
The UsmUserData() allows the 'auth_protocol' and 'priv_protocol'
to be undefined by user if their pass phrases are provided.
:param snmp_cred: Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns UsmUserData object as per given credentials. | [
"Creates",
"the",
"UsmUserData",
"obj",
"for",
"the",
"given",
"credentials",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py#L55-L100 | train | 41,831 |
openstack/proliantutils | proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py | _parse_mibs | def _parse_mibs(iLOIP, snmp_credentials):
"""Parses the MIBs.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials: a Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of parsed MIBs.
:raises exception.InvalidInputError if pysnmp is unable to get
SNMP data due to wrong inputs provided.
:raises exception.IloError if pysnmp raises any exception.
"""
result = {}
usm_user_obj = _create_usm_user_obj(snmp_credentials)
try:
for(errorIndication,
errorStatus,
errorIndex,
varBinds) in hlapi.nextCmd(
hlapi.SnmpEngine(),
usm_user_obj,
hlapi.UdpTransportTarget((iLOIP, 161), timeout=3, retries=3),
hlapi.ContextData(),
# cpqida cpqDaPhyDrvTable Drive Array Physical Drive Table
hlapi.ObjectType(
hlapi.ObjectIdentity('1.3.6.1.4.1.232.3.2.5.1')),
# cpqscsi SCSI Physical Drive Table
hlapi.ObjectType(
hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.2.4.1')),
# cpqscsi SAS Physical Drive Table
hlapi.ObjectType(
hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.5.2.1')),
lexicographicMode=False,
ignoreNonIncreasingOid=True):
if errorIndication:
LOG.error(errorIndication)
msg = "SNMP failed to traverse MIBs %s", errorIndication
raise exception.IloSNMPInvalidInputFailure(msg)
else:
if errorStatus:
msg = ('Parsing MIBs failed. %s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBinds[-1][int(errorIndex)-1]
or '?'
)
)
LOG.error(msg)
raise exception.IloSNMPInvalidInputFailure(msg)
else:
for varBindTableRow in varBinds:
name, val = tuple(varBindTableRow)
oid, label, suffix = (
mibViewController.getNodeName(name))
key = name.prettyPrint()
# Don't traverse outside the tables we requested
if not (key.find("SNMPv2-SMI::enterprises.232.3") >= 0
or (key.find(
"SNMPv2-SMI::enterprises.232.5") >= 0)):
break
if key not in result:
result[key] = {}
result[key][label[-1]] = {}
result[key][label[-1]][suffix] = val
except Exception as e:
msg = "SNMP library failed with error %s", e
LOG.error(msg)
raise exception.IloSNMPExceptionFailure(msg)
return result | python | def _parse_mibs(iLOIP, snmp_credentials):
"""Parses the MIBs.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials: a Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of parsed MIBs.
:raises exception.InvalidInputError if pysnmp is unable to get
SNMP data due to wrong inputs provided.
:raises exception.IloError if pysnmp raises any exception.
"""
result = {}
usm_user_obj = _create_usm_user_obj(snmp_credentials)
try:
for(errorIndication,
errorStatus,
errorIndex,
varBinds) in hlapi.nextCmd(
hlapi.SnmpEngine(),
usm_user_obj,
hlapi.UdpTransportTarget((iLOIP, 161), timeout=3, retries=3),
hlapi.ContextData(),
# cpqida cpqDaPhyDrvTable Drive Array Physical Drive Table
hlapi.ObjectType(
hlapi.ObjectIdentity('1.3.6.1.4.1.232.3.2.5.1')),
# cpqscsi SCSI Physical Drive Table
hlapi.ObjectType(
hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.2.4.1')),
# cpqscsi SAS Physical Drive Table
hlapi.ObjectType(
hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.5.2.1')),
lexicographicMode=False,
ignoreNonIncreasingOid=True):
if errorIndication:
LOG.error(errorIndication)
msg = "SNMP failed to traverse MIBs %s", errorIndication
raise exception.IloSNMPInvalidInputFailure(msg)
else:
if errorStatus:
msg = ('Parsing MIBs failed. %s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBinds[-1][int(errorIndex)-1]
or '?'
)
)
LOG.error(msg)
raise exception.IloSNMPInvalidInputFailure(msg)
else:
for varBindTableRow in varBinds:
name, val = tuple(varBindTableRow)
oid, label, suffix = (
mibViewController.getNodeName(name))
key = name.prettyPrint()
# Don't traverse outside the tables we requested
if not (key.find("SNMPv2-SMI::enterprises.232.3") >= 0
or (key.find(
"SNMPv2-SMI::enterprises.232.5") >= 0)):
break
if key not in result:
result[key] = {}
result[key][label[-1]] = {}
result[key][label[-1]][suffix] = val
except Exception as e:
msg = "SNMP library failed with error %s", e
LOG.error(msg)
raise exception.IloSNMPExceptionFailure(msg)
return result | [
"def",
"_parse_mibs",
"(",
"iLOIP",
",",
"snmp_credentials",
")",
":",
"result",
"=",
"{",
"}",
"usm_user_obj",
"=",
"_create_usm_user_obj",
"(",
"snmp_credentials",
")",
"try",
":",
"for",
"(",
"errorIndication",
",",
"errorStatus",
",",
"errorIndex",
",",
"v... | Parses the MIBs.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials: a Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of parsed MIBs.
:raises exception.InvalidInputError if pysnmp is unable to get
SNMP data due to wrong inputs provided.
:raises exception.IloError if pysnmp raises any exception. | [
"Parses",
"the",
"MIBs",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py#L103-L175 | train | 41,832 |
openstack/proliantutils | proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py | _get_disksize_MiB | def _get_disksize_MiB(iLOIP, cred):
"""Reads the dictionary of parsed MIBs and gets the disk size.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of disk sizes of all physical drives.
"""
# '1.3.6.1.4.1.232.5.5.1.1', # cpqscsi SAS HBA Table
# '1.3.6.1.4.1.232.3.2.3.1', # cpqida Drive Array Logical Drive Table
result = _parse_mibs(iLOIP, cred)
disksize = {}
for uuid in sorted(result):
for key in result[uuid]:
# We only track the Physical Disk Size
if key.find('PhyDrvSize') >= 0:
disksize[uuid] = dict()
for suffix in sorted(result[uuid][key]):
size = result[uuid][key][suffix]
disksize[uuid][key] = str(size)
return disksize | python | def _get_disksize_MiB(iLOIP, cred):
"""Reads the dictionary of parsed MIBs and gets the disk size.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of disk sizes of all physical drives.
"""
# '1.3.6.1.4.1.232.5.5.1.1', # cpqscsi SAS HBA Table
# '1.3.6.1.4.1.232.3.2.3.1', # cpqida Drive Array Logical Drive Table
result = _parse_mibs(iLOIP, cred)
disksize = {}
for uuid in sorted(result):
for key in result[uuid]:
# We only track the Physical Disk Size
if key.find('PhyDrvSize') >= 0:
disksize[uuid] = dict()
for suffix in sorted(result[uuid][key]):
size = result[uuid][key][suffix]
disksize[uuid][key] = str(size)
return disksize | [
"def",
"_get_disksize_MiB",
"(",
"iLOIP",
",",
"cred",
")",
":",
"# '1.3.6.1.4.1.232.5.5.1.1', # cpqscsi SAS HBA Table",
"# '1.3.6.1.4.1.232.3.2.3.1', # cpqida Drive Array Logical Drive Table",
"result",
"=",
"_parse_mibs",
"(",
"iLOIP",
",",
"cred",
")",
"disksize",
"=",
"... | Reads the dictionary of parsed MIBs and gets the disk size.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of disk sizes of all physical drives. | [
"Reads",
"the",
"dictionary",
"of",
"parsed",
"MIBs",
"and",
"gets",
"the",
"disk",
"size",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py#L178-L205 | train | 41,833 |
openstack/proliantutils | proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py | get_local_gb | def get_local_gb(iLOIP, snmp_credentials):
"""Gets the maximum disk size among all disks.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
"""
disk_sizes = _get_disksize_MiB(iLOIP, snmp_credentials)
max_size = 0
for uuid in disk_sizes:
for key in disk_sizes[uuid]:
if int(disk_sizes[uuid][key]) > max_size:
max_size = int(disk_sizes[uuid][key])
max_size_gb = max_size/1024
return max_size_gb | python | def get_local_gb(iLOIP, snmp_credentials):
"""Gets the maximum disk size among all disks.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
"""
disk_sizes = _get_disksize_MiB(iLOIP, snmp_credentials)
max_size = 0
for uuid in disk_sizes:
for key in disk_sizes[uuid]:
if int(disk_sizes[uuid][key]) > max_size:
max_size = int(disk_sizes[uuid][key])
max_size_gb = max_size/1024
return max_size_gb | [
"def",
"get_local_gb",
"(",
"iLOIP",
",",
"snmp_credentials",
")",
":",
"disk_sizes",
"=",
"_get_disksize_MiB",
"(",
"iLOIP",
",",
"snmp_credentials",
")",
"max_size",
"=",
"0",
"for",
"uuid",
"in",
"disk_sizes",
":",
"for",
"key",
"in",
"disk_sizes",
"[",
"... | Gets the maximum disk size among all disks.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol. | [
"Gets",
"the",
"maximum",
"disk",
"size",
"among",
"all",
"disks",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py#L208-L228 | train | 41,834 |
openstack/proliantutils | proliantutils/redfish/resources/system/ethernet_interface.py | EthernetInterfaceCollection.summary | def summary(self):
"""property to return the summary MAC addresses and state
This filters the MACs whose health is OK,
and in 'Enabled' State would be returned.
The returned format will be {<port_id>: <mac_address>}.
This is because RIBCL returns the data in format
{'Port 1': 'aa:bb:cc:dd:ee:ff'} and ironic ilo drivers inspection
consumes the data in this format.
Note: 'Id' is referred to as "Port number".
"""
mac_dict = {}
for eth in self.get_members():
if eth.mac_address is not None:
if (eth.status is not None and
eth.status.health == sys_cons.HEALTH_OK
and eth.status.state ==
sys_cons.HEALTH_STATE_ENABLED):
mac_dict.update(
{'Port ' + eth.identity: eth.mac_address})
return mac_dict | python | def summary(self):
"""property to return the summary MAC addresses and state
This filters the MACs whose health is OK,
and in 'Enabled' State would be returned.
The returned format will be {<port_id>: <mac_address>}.
This is because RIBCL returns the data in format
{'Port 1': 'aa:bb:cc:dd:ee:ff'} and ironic ilo drivers inspection
consumes the data in this format.
Note: 'Id' is referred to as "Port number".
"""
mac_dict = {}
for eth in self.get_members():
if eth.mac_address is not None:
if (eth.status is not None and
eth.status.health == sys_cons.HEALTH_OK
and eth.status.state ==
sys_cons.HEALTH_STATE_ENABLED):
mac_dict.update(
{'Port ' + eth.identity: eth.mac_address})
return mac_dict | [
"def",
"summary",
"(",
"self",
")",
":",
"mac_dict",
"=",
"{",
"}",
"for",
"eth",
"in",
"self",
".",
"get_members",
"(",
")",
":",
"if",
"eth",
".",
"mac_address",
"is",
"not",
"None",
":",
"if",
"(",
"eth",
".",
"status",
"is",
"not",
"None",
"a... | property to return the summary MAC addresses and state
This filters the MACs whose health is OK,
and in 'Enabled' State would be returned.
The returned format will be {<port_id>: <mac_address>}.
This is because RIBCL returns the data in format
{'Port 1': 'aa:bb:cc:dd:ee:ff'} and ironic ilo drivers inspection
consumes the data in this format.
Note: 'Id' is referred to as "Port number". | [
"property",
"to",
"return",
"the",
"summary",
"MAC",
"addresses",
"and",
"state"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/ethernet_interface.py#L36-L56 | train | 41,835 |
openstack/proliantutils | proliantutils/hpssa/manager.py | _update_physical_disk_details | def _update_physical_disk_details(raid_config, server):
"""Adds the physical disk details to the RAID configuration passed."""
raid_config['physical_disks'] = []
physical_drives = server.get_physical_drives()
for physical_drive in physical_drives:
physical_drive_dict = physical_drive.get_physical_drive_dict()
raid_config['physical_disks'].append(physical_drive_dict) | python | def _update_physical_disk_details(raid_config, server):
"""Adds the physical disk details to the RAID configuration passed."""
raid_config['physical_disks'] = []
physical_drives = server.get_physical_drives()
for physical_drive in physical_drives:
physical_drive_dict = physical_drive.get_physical_drive_dict()
raid_config['physical_disks'].append(physical_drive_dict) | [
"def",
"_update_physical_disk_details",
"(",
"raid_config",
",",
"server",
")",
":",
"raid_config",
"[",
"'physical_disks'",
"]",
"=",
"[",
"]",
"physical_drives",
"=",
"server",
".",
"get_physical_drives",
"(",
")",
"for",
"physical_drive",
"in",
"physical_drives",... | Adds the physical disk details to the RAID configuration passed. | [
"Adds",
"the",
"physical",
"disk",
"details",
"to",
"the",
"RAID",
"configuration",
"passed",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L31-L37 | train | 41,836 |
openstack/proliantutils | proliantutils/hpssa/manager.py | validate | def validate(raid_config):
"""Validates the RAID configuration provided.
This method validates the RAID configuration provided against
a JSON schema.
:param raid_config: The RAID configuration to be validated.
:raises: InvalidInputError, if validation of the input fails.
"""
raid_schema_fobj = open(RAID_CONFIG_SCHEMA, 'r')
raid_config_schema = json.load(raid_schema_fobj)
try:
jsonschema.validate(raid_config, raid_config_schema)
except json_schema_exc.ValidationError as e:
raise exception.InvalidInputError(e.message)
for logical_disk in raid_config['logical_disks']:
# If user has provided 'number_of_physical_disks' or
# 'physical_disks', validate that they have mentioned at least
# minimum number of physical disks required for that RAID level.
raid_level = logical_disk['raid_level']
min_disks_reqd = constants.RAID_LEVEL_MIN_DISKS[raid_level]
no_of_disks_specified = None
if 'number_of_physical_disks' in logical_disk:
no_of_disks_specified = logical_disk['number_of_physical_disks']
elif 'physical_disks' in logical_disk:
no_of_disks_specified = len(logical_disk['physical_disks'])
if (no_of_disks_specified and
no_of_disks_specified < min_disks_reqd):
msg = ("RAID level %(raid_level)s requires at least %(number)s "
"disks." % {'raid_level': raid_level,
'number': min_disks_reqd})
raise exception.InvalidInputError(msg) | python | def validate(raid_config):
"""Validates the RAID configuration provided.
This method validates the RAID configuration provided against
a JSON schema.
:param raid_config: The RAID configuration to be validated.
:raises: InvalidInputError, if validation of the input fails.
"""
raid_schema_fobj = open(RAID_CONFIG_SCHEMA, 'r')
raid_config_schema = json.load(raid_schema_fobj)
try:
jsonschema.validate(raid_config, raid_config_schema)
except json_schema_exc.ValidationError as e:
raise exception.InvalidInputError(e.message)
for logical_disk in raid_config['logical_disks']:
# If user has provided 'number_of_physical_disks' or
# 'physical_disks', validate that they have mentioned at least
# minimum number of physical disks required for that RAID level.
raid_level = logical_disk['raid_level']
min_disks_reqd = constants.RAID_LEVEL_MIN_DISKS[raid_level]
no_of_disks_specified = None
if 'number_of_physical_disks' in logical_disk:
no_of_disks_specified = logical_disk['number_of_physical_disks']
elif 'physical_disks' in logical_disk:
no_of_disks_specified = len(logical_disk['physical_disks'])
if (no_of_disks_specified and
no_of_disks_specified < min_disks_reqd):
msg = ("RAID level %(raid_level)s requires at least %(number)s "
"disks." % {'raid_level': raid_level,
'number': min_disks_reqd})
raise exception.InvalidInputError(msg) | [
"def",
"validate",
"(",
"raid_config",
")",
":",
"raid_schema_fobj",
"=",
"open",
"(",
"RAID_CONFIG_SCHEMA",
",",
"'r'",
")",
"raid_config_schema",
"=",
"json",
".",
"load",
"(",
"raid_schema_fobj",
")",
"try",
":",
"jsonschema",
".",
"validate",
"(",
"raid_co... | Validates the RAID configuration provided.
This method validates the RAID configuration provided against
a JSON schema.
:param raid_config: The RAID configuration to be validated.
:raises: InvalidInputError, if validation of the input fails. | [
"Validates",
"the",
"RAID",
"configuration",
"provided",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L40-L75 | train | 41,837 |
openstack/proliantutils | proliantutils/hpssa/manager.py | _select_controllers_by | def _select_controllers_by(server, select_condition, msg):
"""Filters out the hpssa controllers based on the condition.
This method updates the server with only the controller which satisfies
the condition. The controllers which doesn't satisfies the selection
condition will be removed from the list.
:param server: The object containing all the supported hpssa controllers
details.
:param select_condition: A lambda function to select the controllers based
on requirement.
:param msg: A String which describes the controller selection.
:raises exception.HPSSAOperationError, if all the controller are in HBA
mode.
"""
all_controllers = server.controllers
supported_controllers = [c for c in all_controllers if select_condition(c)]
if not supported_controllers:
reason = ("None of the available SSA controllers %(controllers)s "
"have %(msg)s"
% {'controllers': ', '.join([c.id for c in all_controllers]),
'msg': msg})
raise exception.HPSSAOperationError(reason=reason)
server.controllers = supported_controllers | python | def _select_controllers_by(server, select_condition, msg):
"""Filters out the hpssa controllers based on the condition.
This method updates the server with only the controller which satisfies
the condition. The controllers which doesn't satisfies the selection
condition will be removed from the list.
:param server: The object containing all the supported hpssa controllers
details.
:param select_condition: A lambda function to select the controllers based
on requirement.
:param msg: A String which describes the controller selection.
:raises exception.HPSSAOperationError, if all the controller are in HBA
mode.
"""
all_controllers = server.controllers
supported_controllers = [c for c in all_controllers if select_condition(c)]
if not supported_controllers:
reason = ("None of the available SSA controllers %(controllers)s "
"have %(msg)s"
% {'controllers': ', '.join([c.id for c in all_controllers]),
'msg': msg})
raise exception.HPSSAOperationError(reason=reason)
server.controllers = supported_controllers | [
"def",
"_select_controllers_by",
"(",
"server",
",",
"select_condition",
",",
"msg",
")",
":",
"all_controllers",
"=",
"server",
".",
"controllers",
"supported_controllers",
"=",
"[",
"c",
"for",
"c",
"in",
"all_controllers",
"if",
"select_condition",
"(",
"c",
... | Filters out the hpssa controllers based on the condition.
This method updates the server with only the controller which satisfies
the condition. The controllers which doesn't satisfies the selection
condition will be removed from the list.
:param server: The object containing all the supported hpssa controllers
details.
:param select_condition: A lambda function to select the controllers based
on requirement.
:param msg: A String which describes the controller selection.
:raises exception.HPSSAOperationError, if all the controller are in HBA
mode. | [
"Filters",
"out",
"the",
"hpssa",
"controllers",
"based",
"on",
"the",
"condition",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L78-L103 | train | 41,838 |
openstack/proliantutils | proliantutils/hpssa/manager.py | create_configuration | def create_configuration(raid_config):
"""Create a RAID configuration on this server.
This method creates the given RAID configuration on the
server based on the input passed.
:param raid_config: The dictionary containing the requested
RAID configuration. This data structure should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100},
<info-for-logical-disk-2>
]}
:returns: the current raid configuration. This is same as raid_config
with some extra properties like root_device_hint, volume_name,
controller, physical_disks, etc filled for each logical disk
after its creation.
:raises exception.InvalidInputError, if input is invalid.
:raises exception.HPSSAOperationError, if all the controllers are in HBA
mode.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
validate(raid_config)
# Make sure we create the large disks first. This is avoid the
# situation that we avoid giving large disks to smaller requests.
# For example, consider this:
# - two logical disks - LD1(50), LD(100)
# - have 4 physical disks - PD1(50), PD2(50), PD3(100), PD4(100)
#
# In this case, for RAID1 configuration, if we were to consider
# LD1 first and allocate PD3 and PD4 for it, then allocation would
# fail. So follow a particular order for allocation.
#
# Also make sure we create the MAX logical_disks the last to make sure
# we allot only the remaining space available.
logical_disks_sorted = (
sorted((x for x in raid_config['logical_disks']
if x['size_gb'] != "MAX"),
reverse=True,
key=lambda x: x['size_gb']) +
[x for x in raid_config['logical_disks'] if x['size_gb'] == "MAX"])
if any(logical_disk['share_physical_disks']
for logical_disk in logical_disks_sorted
if 'share_physical_disks' in logical_disk):
logical_disks_sorted = _sort_shared_logical_disks(logical_disks_sorted)
# We figure out the new disk created by recording the wwns
# before and after the create, and then figuring out the
# newly found wwn from it.
wwns_before_create = set([x.wwn for x in
server.get_logical_drives()])
for logical_disk in logical_disks_sorted:
if 'physical_disks' not in logical_disk:
disk_allocator.allocate_disks(logical_disk, server,
raid_config)
controller_id = logical_disk['controller']
controller = server.get_controller_by_id(controller_id)
if not controller:
msg = ("Unable to find controller named '%(controller)s'."
" The available controllers are '%(ctrl_list)s'." %
{'controller': controller_id,
'ctrl_list': ', '.join(
[c.id for c in server.controllers])})
raise exception.InvalidInputError(reason=msg)
if 'physical_disks' in logical_disk:
for physical_disk in logical_disk['physical_disks']:
disk_obj = controller.get_physical_drive_by_id(physical_disk)
if not disk_obj:
msg = ("Unable to find physical disk '%(physical_disk)s' "
"on '%(controller)s'" %
{'physical_disk': physical_disk,
'controller': controller_id})
raise exception.InvalidInputError(msg)
controller.create_logical_drive(logical_disk)
# Now find the new logical drive created.
server.refresh()
wwns_after_create = set([x.wwn for x in
server.get_logical_drives()])
new_wwn = wwns_after_create - wwns_before_create
if not new_wwn:
reason = ("Newly created logical disk with raid_level "
"'%(raid_level)s' and size %(size_gb)s GB not "
"found." % {'raid_level': logical_disk['raid_level'],
'size_gb': logical_disk['size_gb']})
raise exception.HPSSAOperationError(reason=reason)
new_logical_disk = server.get_logical_drive_by_wwn(new_wwn.pop())
new_log_drive_properties = new_logical_disk.get_logical_drive_dict()
logical_disk.update(new_log_drive_properties)
wwns_before_create = wwns_after_create.copy()
_update_physical_disk_details(raid_config, server)
return raid_config | python | def create_configuration(raid_config):
"""Create a RAID configuration on this server.
This method creates the given RAID configuration on the
server based on the input passed.
:param raid_config: The dictionary containing the requested
RAID configuration. This data structure should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100},
<info-for-logical-disk-2>
]}
:returns: the current raid configuration. This is same as raid_config
with some extra properties like root_device_hint, volume_name,
controller, physical_disks, etc filled for each logical disk
after its creation.
:raises exception.InvalidInputError, if input is invalid.
:raises exception.HPSSAOperationError, if all the controllers are in HBA
mode.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
validate(raid_config)
# Make sure we create the large disks first. This is avoid the
# situation that we avoid giving large disks to smaller requests.
# For example, consider this:
# - two logical disks - LD1(50), LD(100)
# - have 4 physical disks - PD1(50), PD2(50), PD3(100), PD4(100)
#
# In this case, for RAID1 configuration, if we were to consider
# LD1 first and allocate PD3 and PD4 for it, then allocation would
# fail. So follow a particular order for allocation.
#
# Also make sure we create the MAX logical_disks the last to make sure
# we allot only the remaining space available.
logical_disks_sorted = (
sorted((x for x in raid_config['logical_disks']
if x['size_gb'] != "MAX"),
reverse=True,
key=lambda x: x['size_gb']) +
[x for x in raid_config['logical_disks'] if x['size_gb'] == "MAX"])
if any(logical_disk['share_physical_disks']
for logical_disk in logical_disks_sorted
if 'share_physical_disks' in logical_disk):
logical_disks_sorted = _sort_shared_logical_disks(logical_disks_sorted)
# We figure out the new disk created by recording the wwns
# before and after the create, and then figuring out the
# newly found wwn from it.
wwns_before_create = set([x.wwn for x in
server.get_logical_drives()])
for logical_disk in logical_disks_sorted:
if 'physical_disks' not in logical_disk:
disk_allocator.allocate_disks(logical_disk, server,
raid_config)
controller_id = logical_disk['controller']
controller = server.get_controller_by_id(controller_id)
if not controller:
msg = ("Unable to find controller named '%(controller)s'."
" The available controllers are '%(ctrl_list)s'." %
{'controller': controller_id,
'ctrl_list': ', '.join(
[c.id for c in server.controllers])})
raise exception.InvalidInputError(reason=msg)
if 'physical_disks' in logical_disk:
for physical_disk in logical_disk['physical_disks']:
disk_obj = controller.get_physical_drive_by_id(physical_disk)
if not disk_obj:
msg = ("Unable to find physical disk '%(physical_disk)s' "
"on '%(controller)s'" %
{'physical_disk': physical_disk,
'controller': controller_id})
raise exception.InvalidInputError(msg)
controller.create_logical_drive(logical_disk)
# Now find the new logical drive created.
server.refresh()
wwns_after_create = set([x.wwn for x in
server.get_logical_drives()])
new_wwn = wwns_after_create - wwns_before_create
if not new_wwn:
reason = ("Newly created logical disk with raid_level "
"'%(raid_level)s' and size %(size_gb)s GB not "
"found." % {'raid_level': logical_disk['raid_level'],
'size_gb': logical_disk['size_gb']})
raise exception.HPSSAOperationError(reason=reason)
new_logical_disk = server.get_logical_drive_by_wwn(new_wwn.pop())
new_log_drive_properties = new_logical_disk.get_logical_drive_dict()
logical_disk.update(new_log_drive_properties)
wwns_before_create = wwns_after_create.copy()
_update_physical_disk_details(raid_config, server)
return raid_config | [
"def",
"create_configuration",
"(",
"raid_config",
")",
":",
"server",
"=",
"objects",
".",
"Server",
"(",
")",
"select_controllers",
"=",
"lambda",
"x",
":",
"not",
"x",
".",
"properties",
".",
"get",
"(",
"'HBA Mode Enabled'",
",",
"False",
")",
"_select_c... | Create a RAID configuration on this server.
This method creates the given RAID configuration on the
server based on the input passed.
:param raid_config: The dictionary containing the requested
RAID configuration. This data structure should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100},
<info-for-logical-disk-2>
]}
:returns: the current raid configuration. This is same as raid_config
with some extra properties like root_device_hint, volume_name,
controller, physical_disks, etc filled for each logical disk
after its creation.
:raises exception.InvalidInputError, if input is invalid.
:raises exception.HPSSAOperationError, if all the controllers are in HBA
mode. | [
"Create",
"a",
"RAID",
"configuration",
"on",
"this",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L106-L212 | train | 41,839 |
openstack/proliantutils | proliantutils/hpssa/manager.py | _sort_shared_logical_disks | def _sort_shared_logical_disks(logical_disks):
"""Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions.
"""
is_shared = (lambda x: True if ('share_physical_disks' in x and
x['share_physical_disks']) else False)
num_of_disks = (lambda x: x['number_of_physical_disks']
if 'number_of_physical_disks' in x else
constants.RAID_LEVEL_MIN_DISKS[x['raid_level']])
# Separate logical disks based on share_physical_disks value.
# 'logical_disks_shared' when share_physical_disks is True and
# 'logical_disks_nonshared' when share_physical_disks is False
logical_disks_shared = []
logical_disks_nonshared = []
for x in logical_disks:
target = (logical_disks_shared if is_shared(x)
else logical_disks_nonshared)
target.append(x)
# Separete logical disks with raid 1 from the 'logical_disks_shared' into
# 'logical_disks_shared_raid1' and remaining as
# 'logical_disks_shared_excl_raid1'.
logical_disks_shared_raid1 = []
logical_disks_shared_excl_raid1 = []
for x in logical_disks_shared:
target = (logical_disks_shared_raid1 if x['raid_level'] == '1'
else logical_disks_shared_excl_raid1)
target.append(x)
# Sort the 'logical_disks_shared' in reverse order based on
# 'number_of_physical_disks' attribute, if provided, otherwise minimum
# disks required to create the logical volume.
logical_disks_shared = sorted(logical_disks_shared_excl_raid1,
reverse=True,
key=num_of_disks)
# Move RAID 1+0 to first in 'logical_disks_shared' when number of physical
# disks needed to create logical volume cannot be shared with odd number of
# disks and disks higher than that of RAID 1+0.
check = True
for x in logical_disks_shared:
if x['raid_level'] == "1+0":
x_num = num_of_disks(x)
for y in logical_disks_shared:
if y['raid_level'] != "1+0":
y_num = num_of_disks(y)
if x_num < y_num:
check = (True if y_num % 2 == 0 else False)
if check:
break
if not check:
logical_disks_shared.remove(x)
logical_disks_shared.insert(0, x)
check = True
# Final 'logical_disks_sorted' list should have non shared logical disks
# first, followed by shared logical disks with RAID 1, and finally by the
# shared logical disks sorted based on number of disks and RAID 1+0
# condition.
logical_disks_sorted = (logical_disks_nonshared +
logical_disks_shared_raid1 +
logical_disks_shared)
return logical_disks_sorted | python | def _sort_shared_logical_disks(logical_disks):
"""Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions.
"""
is_shared = (lambda x: True if ('share_physical_disks' in x and
x['share_physical_disks']) else False)
num_of_disks = (lambda x: x['number_of_physical_disks']
if 'number_of_physical_disks' in x else
constants.RAID_LEVEL_MIN_DISKS[x['raid_level']])
# Separate logical disks based on share_physical_disks value.
# 'logical_disks_shared' when share_physical_disks is True and
# 'logical_disks_nonshared' when share_physical_disks is False
logical_disks_shared = []
logical_disks_nonshared = []
for x in logical_disks:
target = (logical_disks_shared if is_shared(x)
else logical_disks_nonshared)
target.append(x)
# Separete logical disks with raid 1 from the 'logical_disks_shared' into
# 'logical_disks_shared_raid1' and remaining as
# 'logical_disks_shared_excl_raid1'.
logical_disks_shared_raid1 = []
logical_disks_shared_excl_raid1 = []
for x in logical_disks_shared:
target = (logical_disks_shared_raid1 if x['raid_level'] == '1'
else logical_disks_shared_excl_raid1)
target.append(x)
# Sort the 'logical_disks_shared' in reverse order based on
# 'number_of_physical_disks' attribute, if provided, otherwise minimum
# disks required to create the logical volume.
logical_disks_shared = sorted(logical_disks_shared_excl_raid1,
reverse=True,
key=num_of_disks)
# Move RAID 1+0 to first in 'logical_disks_shared' when number of physical
# disks needed to create logical volume cannot be shared with odd number of
# disks and disks higher than that of RAID 1+0.
check = True
for x in logical_disks_shared:
if x['raid_level'] == "1+0":
x_num = num_of_disks(x)
for y in logical_disks_shared:
if y['raid_level'] != "1+0":
y_num = num_of_disks(y)
if x_num < y_num:
check = (True if y_num % 2 == 0 else False)
if check:
break
if not check:
logical_disks_shared.remove(x)
logical_disks_shared.insert(0, x)
check = True
# Final 'logical_disks_sorted' list should have non shared logical disks
# first, followed by shared logical disks with RAID 1, and finally by the
# shared logical disks sorted based on number of disks and RAID 1+0
# condition.
logical_disks_sorted = (logical_disks_nonshared +
logical_disks_shared_raid1 +
logical_disks_shared)
return logical_disks_sorted | [
"def",
"_sort_shared_logical_disks",
"(",
"logical_disks",
")",
":",
"is_shared",
"=",
"(",
"lambda",
"x",
":",
"True",
"if",
"(",
"'share_physical_disks'",
"in",
"x",
"and",
"x",
"[",
"'share_physical_disks'",
"]",
")",
"else",
"False",
")",
"num_of_disks",
"... | Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions. | [
"Sort",
"the",
"logical",
"disks",
"based",
"on",
"the",
"following",
"conditions",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L215-L297 | train | 41,840 |
openstack/proliantutils | proliantutils/hpssa/manager.py | delete_configuration | def delete_configuration():
"""Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
for controller in server.controllers:
# Trigger delete only if there is some RAID array, otherwise
# hpssacli/ssacli will fail saying "no logical drives found.".
if controller.raid_arrays:
controller.delete_all_logical_drives()
return get_configuration() | python | def delete_configuration():
"""Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
for controller in server.controllers:
# Trigger delete only if there is some RAID array, otherwise
# hpssacli/ssacli will fail saying "no logical drives found.".
if controller.raid_arrays:
controller.delete_all_logical_drives()
return get_configuration() | [
"def",
"delete_configuration",
"(",
")",
":",
"server",
"=",
"objects",
".",
"Server",
"(",
")",
"select_controllers",
"=",
"lambda",
"x",
":",
"not",
"x",
".",
"properties",
".",
"get",
"(",
"'HBA Mode Enabled'",
",",
"False",
")",
"_select_controllers_by",
... | Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks. | [
"Delete",
"a",
"RAID",
"configuration",
"on",
"this",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L300-L317 | train | 41,841 |
openstack/proliantutils | proliantutils/hpssa/manager.py | get_configuration | def get_configuration():
"""Get the current RAID configuration.
Get the RAID configuration from the server and return it
as a dictionary.
:returns: A dictionary of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
server = objects.Server()
logical_drives = server.get_logical_drives()
raid_config = {}
raid_config['logical_disks'] = []
for logical_drive in logical_drives:
logical_drive_dict = logical_drive.get_logical_drive_dict()
raid_config['logical_disks'].append(logical_drive_dict)
_update_physical_disk_details(raid_config, server)
return raid_config | python | def get_configuration():
"""Get the current RAID configuration.
Get the RAID configuration from the server and return it
as a dictionary.
:returns: A dictionary of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
server = objects.Server()
logical_drives = server.get_logical_drives()
raid_config = {}
raid_config['logical_disks'] = []
for logical_drive in logical_drives:
logical_drive_dict = logical_drive.get_logical_drive_dict()
raid_config['logical_disks'].append(logical_drive_dict)
_update_physical_disk_details(raid_config, server)
return raid_config | [
"def",
"get_configuration",
"(",
")",
":",
"server",
"=",
"objects",
".",
"Server",
"(",
")",
"logical_drives",
"=",
"server",
".",
"get_logical_drives",
"(",
")",
"raid_config",
"=",
"{",
"}",
"raid_config",
"[",
"'logical_disks'",
"]",
"=",
"[",
"]",
"fo... | Get the current RAID configuration.
Get the RAID configuration from the server and return it
as a dictionary.
:returns: A dictionary of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
} | [
"Get",
"the",
"current",
"RAID",
"configuration",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L320-L349 | train | 41,842 |
openstack/proliantutils | proliantutils/hpssa/manager.py | erase_devices | def erase_devices():
"""Erase all the drives on this server.
This method performs sanitize erase on all the supported physical drives
in this server. This erase cannot be performed on logical drives.
:returns: a dictionary of controllers with drives and the erase status.
:raises exception.HPSSAException, if none of the drives support
sanitize erase.
"""
server = objects.Server()
for controller in server.controllers:
drives = [x for x in controller.unassigned_physical_drives
if (x.get_physical_drive_dict().get('erase_status', '')
== 'OK')]
if drives:
controller.erase_devices(drives)
while not has_erase_completed():
time.sleep(300)
server.refresh()
status = {}
for controller in server.controllers:
drive_status = {x.id: x.erase_status
for x in controller.unassigned_physical_drives}
sanitize_supported = controller.properties.get(
'Sanitize Erase Supported', 'False')
if sanitize_supported == 'False':
msg = ("Drives overwritten with zeros because sanitize erase "
"is not supported on the controller.")
else:
msg = ("Sanitize Erase performed on the disks attached to "
"the controller.")
drive_status.update({'Summary': msg})
status[controller.id] = drive_status
return status | python | def erase_devices():
"""Erase all the drives on this server.
This method performs sanitize erase on all the supported physical drives
in this server. This erase cannot be performed on logical drives.
:returns: a dictionary of controllers with drives and the erase status.
:raises exception.HPSSAException, if none of the drives support
sanitize erase.
"""
server = objects.Server()
for controller in server.controllers:
drives = [x for x in controller.unassigned_physical_drives
if (x.get_physical_drive_dict().get('erase_status', '')
== 'OK')]
if drives:
controller.erase_devices(drives)
while not has_erase_completed():
time.sleep(300)
server.refresh()
status = {}
for controller in server.controllers:
drive_status = {x.id: x.erase_status
for x in controller.unassigned_physical_drives}
sanitize_supported = controller.properties.get(
'Sanitize Erase Supported', 'False')
if sanitize_supported == 'False':
msg = ("Drives overwritten with zeros because sanitize erase "
"is not supported on the controller.")
else:
msg = ("Sanitize Erase performed on the disks attached to "
"the controller.")
drive_status.update({'Summary': msg})
status[controller.id] = drive_status
return status | [
"def",
"erase_devices",
"(",
")",
":",
"server",
"=",
"objects",
".",
"Server",
"(",
")",
"for",
"controller",
"in",
"server",
".",
"controllers",
":",
"drives",
"=",
"[",
"x",
"for",
"x",
"in",
"controller",
".",
"unassigned_physical_drives",
"if",
"(",
... | Erase all the drives on this server.
This method performs sanitize erase on all the supported physical drives
in this server. This erase cannot be performed on logical drives.
:returns: a dictionary of controllers with drives and the erase status.
:raises exception.HPSSAException, if none of the drives support
sanitize erase. | [
"Erase",
"all",
"the",
"drives",
"on",
"this",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L362-L402 | train | 41,843 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/queue/_deserialization.py | _parse_queue_message_from_headers | def _parse_queue_message_from_headers(response):
'''
Extracts pop receipt and time next visible from headers.
'''
headers = _parse_response_for_dict(response)
message = QueueMessage()
message.pop_receipt = headers.get('x-ms-popreceipt')
message.time_next_visible = parser.parse(headers.get('x-ms-time-next-visible'))
return message | python | def _parse_queue_message_from_headers(response):
'''
Extracts pop receipt and time next visible from headers.
'''
headers = _parse_response_for_dict(response)
message = QueueMessage()
message.pop_receipt = headers.get('x-ms-popreceipt')
message.time_next_visible = parser.parse(headers.get('x-ms-time-next-visible'))
return message | [
"def",
"_parse_queue_message_from_headers",
"(",
"response",
")",
":",
"headers",
"=",
"_parse_response_for_dict",
"(",
"response",
")",
"message",
"=",
"QueueMessage",
"(",
")",
"message",
".",
"pop_receipt",
"=",
"headers",
".",
"get",
"(",
"'x-ms-popreceipt'",
... | Extracts pop receipt and time next visible from headers. | [
"Extracts",
"pop",
"receipt",
"and",
"time",
"next",
"visible",
"from",
"headers",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/queue/_deserialization.py#L45-L55 | train | 41,844 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/storage.py | Storage.volumes | def volumes(self):
"""This property prepares the list of volumes
:return a list of volumes.
"""
return sys_volumes.VolumeCollection(
self._conn, utils.get_subresource_path_by(self, 'Volumes'),
redfish_version=self.redfish_version) | python | def volumes(self):
"""This property prepares the list of volumes
:return a list of volumes.
"""
return sys_volumes.VolumeCollection(
self._conn, utils.get_subresource_path_by(self, 'Volumes'),
redfish_version=self.redfish_version) | [
"def",
"volumes",
"(",
"self",
")",
":",
"return",
"sys_volumes",
".",
"VolumeCollection",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'Volumes'",
")",
",",
"redfish_version",
"=",
"self",
".",
"redfish_version",... | This property prepares the list of volumes
:return a list of volumes. | [
"This",
"property",
"prepares",
"the",
"list",
"of",
"volumes"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L46-L53 | train | 41,845 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/storage.py | Storage._drives_list | def _drives_list(self):
"""Gets the list of drives
:return a list of drives.
"""
drives_list = []
for member in self.drives:
drives_list.append(sys_drives.Drive(
self._conn, member.get('@odata.id'), self.redfish_version))
return drives_list | python | def _drives_list(self):
"""Gets the list of drives
:return a list of drives.
"""
drives_list = []
for member in self.drives:
drives_list.append(sys_drives.Drive(
self._conn, member.get('@odata.id'), self.redfish_version))
return drives_list | [
"def",
"_drives_list",
"(",
"self",
")",
":",
"drives_list",
"=",
"[",
"]",
"for",
"member",
"in",
"self",
".",
"drives",
":",
"drives_list",
".",
"append",
"(",
"sys_drives",
".",
"Drive",
"(",
"self",
".",
"_conn",
",",
"member",
".",
"get",
"(",
"... | Gets the list of drives
:return a list of drives. | [
"Gets",
"the",
"list",
"of",
"drives"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L55-L64 | train | 41,846 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/storage.py | Storage.has_ssd | def has_ssd(self):
"""Return true if any of the drive is ssd"""
for member in self._drives_list():
if member.media_type == constants.MEDIA_TYPE_SSD:
return True
return False | python | def has_ssd(self):
"""Return true if any of the drive is ssd"""
for member in self._drives_list():
if member.media_type == constants.MEDIA_TYPE_SSD:
return True
return False | [
"def",
"has_ssd",
"(",
"self",
")",
":",
"for",
"member",
"in",
"self",
".",
"_drives_list",
"(",
")",
":",
"if",
"member",
".",
"media_type",
"==",
"constants",
".",
"MEDIA_TYPE_SSD",
":",
"return",
"True",
"return",
"False"
] | Return true if any of the drive is ssd | [
"Return",
"true",
"if",
"any",
"of",
"the",
"drive",
"is",
"ssd"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L78-L83 | train | 41,847 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/storage.py | Storage.has_rotational | def has_rotational(self):
"""Return true if any of the drive is HDD"""
for member in self._drives_list():
if member.media_type == constants.MEDIA_TYPE_HDD:
return True
return False | python | def has_rotational(self):
"""Return true if any of the drive is HDD"""
for member in self._drives_list():
if member.media_type == constants.MEDIA_TYPE_HDD:
return True
return False | [
"def",
"has_rotational",
"(",
"self",
")",
":",
"for",
"member",
"in",
"self",
".",
"_drives_list",
"(",
")",
":",
"if",
"member",
".",
"media_type",
"==",
"constants",
".",
"MEDIA_TYPE_HDD",
":",
"return",
"True",
"return",
"False"
] | Return true if any of the drive is HDD | [
"Return",
"true",
"if",
"any",
"of",
"the",
"drive",
"is",
"HDD"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L87-L92 | train | 41,848 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/storage.py | Storage.has_nvme_ssd | def has_nvme_ssd(self):
"""Return True if the drive is SSD and protocol is NVMe"""
for member in self._drives_list():
if (member.media_type == constants.MEDIA_TYPE_SSD and
member.protocol == constants.PROTOCOL_NVMe):
return True
return False | python | def has_nvme_ssd(self):
"""Return True if the drive is SSD and protocol is NVMe"""
for member in self._drives_list():
if (member.media_type == constants.MEDIA_TYPE_SSD and
member.protocol == constants.PROTOCOL_NVMe):
return True
return False | [
"def",
"has_nvme_ssd",
"(",
"self",
")",
":",
"for",
"member",
"in",
"self",
".",
"_drives_list",
"(",
")",
":",
"if",
"(",
"member",
".",
"media_type",
"==",
"constants",
".",
"MEDIA_TYPE_SSD",
"and",
"member",
".",
"protocol",
"==",
"constants",
".",
"... | Return True if the drive is SSD and protocol is NVMe | [
"Return",
"True",
"if",
"the",
"drive",
"is",
"SSD",
"and",
"protocol",
"is",
"NVMe"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L96-L102 | train | 41,849 |
Azure/azure-multiapi-storage-python | azure/multiapi/cosmosdb/v2017_04_17/common/cloudstorageaccount.py | CloudStorageAccount.create_table_service | def create_table_service(self):
'''
Creates a TableService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.table.tableservice.TableService`
'''
try:
from ..table.tableservice import TableService
return TableService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated)
except ImportError:
raise Exception('The package azure-storage-table is required. '
+ 'Please install it using "pip install azure-storage-table"') | python | def create_table_service(self):
'''
Creates a TableService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.table.tableservice.TableService`
'''
try:
from ..table.tableservice import TableService
return TableService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated)
except ImportError:
raise Exception('The package azure-storage-table is required. '
+ 'Please install it using "pip install azure-storage-table"') | [
"def",
"create_table_service",
"(",
"self",
")",
":",
"try",
":",
"from",
".",
".",
"table",
".",
"tableservice",
"import",
"TableService",
"return",
"TableService",
"(",
"self",
".",
"account_name",
",",
"self",
".",
"account_key",
",",
"sas_token",
"=",
"s... | Creates a TableService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.table.tableservice.TableService` | [
"Creates",
"a",
"TableService",
"object",
"with",
"the",
"settings",
"specified",
"in",
"the",
"CloudStorageAccount",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/cosmosdb/v2017_04_17/common/cloudstorageaccount.py#L60-L75 | train | 41,850 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/queue/queueservice.py | QueueService.get_queue_service_properties | def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The queue service properties.
:rtype: :class:`~azure.storage.models.ServiceProperties`
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path()
request.query = [
('restype', 'service'),
('comp', 'properties'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_service_properties(response.body) | python | def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The queue service properties.
:rtype: :class:`~azure.storage.models.ServiceProperties`
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path()
request.query = [
('restype', 'service'),
('comp', 'properties'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_service_properties(response.body) | [
"def",
"get_queue_service_properties",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"host",
"=",
"self",
".",
"_get_host",
"(",
")",
"request",
".",
"... | Gets the properties of a storage account's Queue service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The queue service properties.
:rtype: :class:`~azure.storage.models.ServiceProperties` | [
"Gets",
"the",
"properties",
"of",
"a",
"storage",
"account",
"s",
"Queue",
"service",
"including",
"logging",
"analytics",
"and",
"CORS",
"rules",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/queue/queueservice.py#L276-L297 | train | 41,851 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/queue/queueservice.py | QueueService.get_queue_acl | def get_queue_acl(self, queue_name, timeout=None):
'''
Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_signed_identifiers(response.body) | python | def get_queue_acl(self, queue_name, timeout=None):
'''
Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_signed_identifiers(response.body) | [
"def",
"get_queue_acl",
"(",
"self",
",",
"queue_name",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'queue_name'",
",",
"queue_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",... | Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy` | [
"Returns",
"details",
"about",
"any",
"stored",
"access",
"policies",
"specified",
"on",
"the",
"queue",
"that",
"may",
"be",
"used",
"with",
"Shared",
"Access",
"Signatures",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/queue/queueservice.py#L577-L600 | train | 41,852 |
openstack/proliantutils | proliantutils/redfish/resources/system/secure_boot.py | SecureBoot.get_allowed_reset_keys_values | def get_allowed_reset_keys_values(self):
"""Get the allowed values for resetting the system.
:returns: A set with the allowed values.
"""
reset_keys_action = self._get_reset_keys_action_element()
if not reset_keys_action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'reset keys in secure boot %s', self.path)
return set(mappings.SECUREBOOT_RESET_KEYS_MAP_REV)
return set([mappings.SECUREBOOT_RESET_KEYS_MAP[v] for v in
set(mappings.SECUREBOOT_RESET_KEYS_MAP).
intersection(reset_keys_action.allowed_values)]) | python | def get_allowed_reset_keys_values(self):
"""Get the allowed values for resetting the system.
:returns: A set with the allowed values.
"""
reset_keys_action = self._get_reset_keys_action_element()
if not reset_keys_action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'reset keys in secure boot %s', self.path)
return set(mappings.SECUREBOOT_RESET_KEYS_MAP_REV)
return set([mappings.SECUREBOOT_RESET_KEYS_MAP[v] for v in
set(mappings.SECUREBOOT_RESET_KEYS_MAP).
intersection(reset_keys_action.allowed_values)]) | [
"def",
"get_allowed_reset_keys_values",
"(",
"self",
")",
":",
"reset_keys_action",
"=",
"self",
".",
"_get_reset_keys_action_element",
"(",
")",
"if",
"not",
"reset_keys_action",
".",
"allowed_values",
":",
"LOG",
".",
"warning",
"(",
"'Could not figure out the allowed... | Get the allowed values for resetting the system.
:returns: A set with the allowed values. | [
"Get",
"the",
"allowed",
"values",
"for",
"resetting",
"the",
"system",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/secure_boot.py#L84-L98 | train | 41,853 |
openstack/proliantutils | proliantutils/redfish/resources/system/secure_boot.py | SecureBoot.reset_keys | def reset_keys(self, target_value):
"""Resets the secure boot keys.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO.
"""
valid_keys_resets = self.get_allowed_reset_keys_values()
if target_value not in valid_keys_resets:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_keys_reset_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_keys_reset_values': valid_keys_resets})
raise exception.InvalidInputError(msg)
value = mappings.SECUREBOOT_RESET_KEYS_MAP_REV[target_value]
target_uri = (
self._get_reset_keys_action_element().target_uri)
self._conn.post(target_uri, data={'ResetKeysType': value}) | python | def reset_keys(self, target_value):
"""Resets the secure boot keys.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO.
"""
valid_keys_resets = self.get_allowed_reset_keys_values()
if target_value not in valid_keys_resets:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_keys_reset_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_keys_reset_values': valid_keys_resets})
raise exception.InvalidInputError(msg)
value = mappings.SECUREBOOT_RESET_KEYS_MAP_REV[target_value]
target_uri = (
self._get_reset_keys_action_element().target_uri)
self._conn.post(target_uri, data={'ResetKeysType': value}) | [
"def",
"reset_keys",
"(",
"self",
",",
"target_value",
")",
":",
"valid_keys_resets",
"=",
"self",
".",
"get_allowed_reset_keys_values",
"(",
")",
"if",
"target_value",
"not",
"in",
"valid_keys_resets",
":",
"msg",
"=",
"(",
"'The parameter \"%(parameter)s\" value \"%... | Resets the secure boot keys.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO. | [
"Resets",
"the",
"secure",
"boot",
"keys",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/secure_boot.py#L100-L120 | train | 41,854 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_deserialization.py | _parse_response_for_dict | def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = _HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict | python | def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = _HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict | [
"def",
"_parse_response_for_dict",
"(",
"response",
")",
":",
"if",
"response",
"is",
"None",
":",
"return",
"None",
"http_headers",
"=",
"[",
"'server'",
",",
"'date'",
",",
"'location'",
",",
"'host'",
",",
"'via'",
",",
"'proxy-connection'",
",",
"'connecti... | Extracts name-values from response header. Filter out the standard
http headers. | [
"Extracts",
"name",
"-",
"values",
"from",
"response",
"header",
".",
"Filter",
"out",
"the",
"standard",
"http",
"headers",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_deserialization.py#L106-L120 | train | 41,855 |
openstack/proliantutils | proliantutils/redfish/resources/system/iscsi.py | ISCSIResource.iscsi_settings | def iscsi_settings(self):
"""Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return ISCSISettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) | python | def iscsi_settings(self):
"""Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return ISCSISettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) | [
"def",
"iscsi_settings",
"(",
"self",
")",
":",
"return",
"ISCSISettings",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"[",
"\"@Redfish.Settings\"",
",",
"\"SettingsObject\"",
"]",
")",
",",
"redfish_version",
"=",
... | Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | [
"Property",
"to",
"provide",
"reference",
"to",
"iSCSI",
"settings",
"instance"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/iscsi.py#L42-L51 | train | 41,856 |
openstack/proliantutils | proliantutils/redfish/resources/system/iscsi.py | ISCSISettings.update_iscsi_settings | def update_iscsi_settings(self, iscsi_data):
"""Update iscsi data
:param data: default iscsi config data
"""
self._conn.patch(self.path, data=iscsi_data) | python | def update_iscsi_settings(self, iscsi_data):
"""Update iscsi data
:param data: default iscsi config data
"""
self._conn.patch(self.path, data=iscsi_data) | [
"def",
"update_iscsi_settings",
"(",
"self",
",",
"iscsi_data",
")",
":",
"self",
".",
"_conn",
".",
"patch",
"(",
"self",
".",
"path",
",",
"data",
"=",
"iscsi_data",
")"
] | Update iscsi data
:param data: default iscsi config data | [
"Update",
"iscsi",
"data"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/iscsi.py#L61-L66 | train | 41,857 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/smart_storage.py | HPESmartStorage.array_controllers | def array_controllers(self):
"""This property gets the list of instances for array controllers
This property gets the list of instances for array controllers
:returns: a list of instances of array controllers.
"""
return array_controller.HPEArrayControllerCollection(
self._conn, utils.get_subresource_path_by(
self, ['Links', 'ArrayControllers']),
redfish_version=self.redfish_version) | python | def array_controllers(self):
"""This property gets the list of instances for array controllers
This property gets the list of instances for array controllers
:returns: a list of instances of array controllers.
"""
return array_controller.HPEArrayControllerCollection(
self._conn, utils.get_subresource_path_by(
self, ['Links', 'ArrayControllers']),
redfish_version=self.redfish_version) | [
"def",
"array_controllers",
"(",
"self",
")",
":",
"return",
"array_controller",
".",
"HPEArrayControllerCollection",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"[",
"'Links'",
",",
"'ArrayControllers'",
"]",
")",
... | This property gets the list of instances for array controllers
This property gets the list of instances for array controllers
:returns: a list of instances of array controllers. | [
"This",
"property",
"gets",
"the",
"list",
"of",
"instances",
"for",
"array",
"controllers"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/smart_storage.py#L37-L46 | train | 41,858 |
openstack/proliantutils | proliantutils/ilo/common.py | wait_for_operation_to_complete | def wait_for_operation_to_complete(
has_operation_completed, retries=10, delay_bw_retries=5,
delay_before_attempts=10, failover_exc=exception.IloError,
failover_msg=("Operation did not complete even after multiple "
"attempts."), is_silent_loop_exit=False):
"""Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError.
"""
retry_count = retries
# Delay for ``delay_before_attempts`` secs, before beginning any attempt
time.sleep(delay_before_attempts)
while retry_count:
try:
LOG.debug("Calling '%s', retries left: %d",
has_operation_completed.__name__, retry_count)
if has_operation_completed():
break
except exception.IloError:
pass
time.sleep(delay_bw_retries)
retry_count -= 1
else:
LOG.debug("Max retries exceeded with: '%s'",
has_operation_completed.__name__)
if not is_silent_loop_exit:
raise failover_exc(failover_msg) | python | def wait_for_operation_to_complete(
has_operation_completed, retries=10, delay_bw_retries=5,
delay_before_attempts=10, failover_exc=exception.IloError,
failover_msg=("Operation did not complete even after multiple "
"attempts."), is_silent_loop_exit=False):
"""Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError.
"""
retry_count = retries
# Delay for ``delay_before_attempts`` secs, before beginning any attempt
time.sleep(delay_before_attempts)
while retry_count:
try:
LOG.debug("Calling '%s', retries left: %d",
has_operation_completed.__name__, retry_count)
if has_operation_completed():
break
except exception.IloError:
pass
time.sleep(delay_bw_retries)
retry_count -= 1
else:
LOG.debug("Max retries exceeded with: '%s'",
has_operation_completed.__name__)
if not is_silent_loop_exit:
raise failover_exc(failover_msg) | [
"def",
"wait_for_operation_to_complete",
"(",
"has_operation_completed",
",",
"retries",
"=",
"10",
",",
"delay_bw_retries",
"=",
"5",
",",
"delay_before_attempts",
"=",
"10",
",",
"failover_exc",
"=",
"exception",
".",
"IloError",
",",
"failover_msg",
"=",
"(",
"... | Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError. | [
"Attempts",
"the",
"provided",
"operation",
"for",
"a",
"specified",
"number",
"of",
"times",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L37-L81 | train | 41,859 |
openstack/proliantutils | proliantutils/ilo/common.py | wait_for_ilo_after_reset | def wait_for_ilo_after_reset(ilo_object):
"""Continuously polls for iLO to come up after reset."""
is_ilo_up_after_reset = lambda: ilo_object.get_product_name() is not None
is_ilo_up_after_reset.__name__ = 'is_ilo_up_after_reset'
wait_for_operation_to_complete(
is_ilo_up_after_reset,
failover_exc=exception.IloConnectionError,
failover_msg='iLO is not up after reset.'
) | python | def wait_for_ilo_after_reset(ilo_object):
"""Continuously polls for iLO to come up after reset."""
is_ilo_up_after_reset = lambda: ilo_object.get_product_name() is not None
is_ilo_up_after_reset.__name__ = 'is_ilo_up_after_reset'
wait_for_operation_to_complete(
is_ilo_up_after_reset,
failover_exc=exception.IloConnectionError,
failover_msg='iLO is not up after reset.'
) | [
"def",
"wait_for_ilo_after_reset",
"(",
"ilo_object",
")",
":",
"is_ilo_up_after_reset",
"=",
"lambda",
":",
"ilo_object",
".",
"get_product_name",
"(",
")",
"is",
"not",
"None",
"is_ilo_up_after_reset",
".",
"__name__",
"=",
"'is_ilo_up_after_reset'",
"wait_for_operati... | Continuously polls for iLO to come up after reset. | [
"Continuously",
"polls",
"for",
"iLO",
"to",
"come",
"up",
"after",
"reset",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L84-L94 | train | 41,860 |
openstack/proliantutils | proliantutils/ilo/common.py | wait_for_ribcl_firmware_update_to_complete | def wait_for_ribcl_firmware_update_to_complete(ribcl_object):
"""Continuously checks for iLO firmware update to complete."""
def is_ilo_reset_initiated():
"""Checks for initiation of iLO reset
Invokes the ``get_product_name`` api and returns
i) True, if exception gets raised as that marks the iLO reset
initiation.
ii) False, if the call gets through without any failure, marking
that iLO is yet to be reset.
"""
try:
LOG.debug(ribcl_object._('Checking for iLO reset...'))
ribcl_object.get_product_name()
return False
except exception.IloError:
LOG.debug(ribcl_object._('iLO is being reset...'))
return True
# Note(deray): wait for 5 secs, before checking if iLO reset got triggered
# at every interval of 6 secs. This looping call happens for 10 times.
# Once it comes out of the wait of iLO reset trigger, then it starts
# waiting for iLO to be up again after reset.
wait_for_operation_to_complete(
is_ilo_reset_initiated,
delay_bw_retries=6,
delay_before_attempts=5,
is_silent_loop_exit=True
)
wait_for_ilo_after_reset(ribcl_object) | python | def wait_for_ribcl_firmware_update_to_complete(ribcl_object):
"""Continuously checks for iLO firmware update to complete."""
def is_ilo_reset_initiated():
"""Checks for initiation of iLO reset
Invokes the ``get_product_name`` api and returns
i) True, if exception gets raised as that marks the iLO reset
initiation.
ii) False, if the call gets through without any failure, marking
that iLO is yet to be reset.
"""
try:
LOG.debug(ribcl_object._('Checking for iLO reset...'))
ribcl_object.get_product_name()
return False
except exception.IloError:
LOG.debug(ribcl_object._('iLO is being reset...'))
return True
# Note(deray): wait for 5 secs, before checking if iLO reset got triggered
# at every interval of 6 secs. This looping call happens for 10 times.
# Once it comes out of the wait of iLO reset trigger, then it starts
# waiting for iLO to be up again after reset.
wait_for_operation_to_complete(
is_ilo_reset_initiated,
delay_bw_retries=6,
delay_before_attempts=5,
is_silent_loop_exit=True
)
wait_for_ilo_after_reset(ribcl_object) | [
"def",
"wait_for_ribcl_firmware_update_to_complete",
"(",
"ribcl_object",
")",
":",
"def",
"is_ilo_reset_initiated",
"(",
")",
":",
"\"\"\"Checks for initiation of iLO reset\n\n Invokes the ``get_product_name`` api and returns\n i) True, if exception gets raised as that marks... | Continuously checks for iLO firmware update to complete. | [
"Continuously",
"checks",
"for",
"iLO",
"firmware",
"update",
"to",
"complete",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L144-L174 | train | 41,861 |
openstack/proliantutils | proliantutils/ilo/common.py | get_filename_and_extension_of | def get_filename_and_extension_of(target_file):
"""Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension
"""
base_target_filename = os.path.basename(target_file)
file_name, file_ext_with_dot = os.path.splitext(base_target_filename)
return file_name, file_ext_with_dot | python | def get_filename_and_extension_of(target_file):
"""Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension
"""
base_target_filename = os.path.basename(target_file)
file_name, file_ext_with_dot = os.path.splitext(base_target_filename)
return file_name, file_ext_with_dot | [
"def",
"get_filename_and_extension_of",
"(",
"target_file",
")",
":",
"base_target_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"target_file",
")",
"file_name",
",",
"file_ext_with_dot",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"base_target_filena... | Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension | [
"Gets",
"the",
"base",
"filename",
"and",
"extension",
"of",
"the",
"target",
"file",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L184-L192 | train | 41,862 |
openstack/proliantutils | proliantutils/ilo/common.py | add_exec_permission_to | def add_exec_permission_to(target_file):
"""Add executable permissions to the file
:param target_file: the target file whose permission to be changed
"""
mode = os.stat(target_file).st_mode
os.chmod(target_file, mode | stat.S_IXUSR) | python | def add_exec_permission_to(target_file):
"""Add executable permissions to the file
:param target_file: the target file whose permission to be changed
"""
mode = os.stat(target_file).st_mode
os.chmod(target_file, mode | stat.S_IXUSR) | [
"def",
"add_exec_permission_to",
"(",
"target_file",
")",
":",
"mode",
"=",
"os",
".",
"stat",
"(",
"target_file",
")",
".",
"st_mode",
"os",
".",
"chmod",
"(",
"target_file",
",",
"mode",
"|",
"stat",
".",
"S_IXUSR",
")"
] | Add executable permissions to the file
:param target_file: the target file whose permission to be changed | [
"Add",
"executable",
"permissions",
"to",
"the",
"file"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L195-L201 | train | 41,863 |
openstack/proliantutils | proliantutils/ilo/common.py | get_major_minor | def get_major_minor(ilo_ver_str):
"""Extract the major and minor number from the passed string
:param ilo_ver_str: the string that contains the version information
:returns: String of the form "<major>.<minor>" or None
"""
if not ilo_ver_str:
return None
try:
# Note(vmud213):This logic works for all strings
# that contain the version info as <major>.<minor>
# Formats of the strings:
# Release version -> "2.50 Feb 18 2016"
# Debug version -> "iLO 4 v2.50"
# random version -> "XYZ ABC 2.30"
pattern = re.search(ILO_VER_STR_PATTERN, ilo_ver_str)
if pattern:
matched = pattern.group(0)
if matched:
return matched
return None
except Exception:
return None | python | def get_major_minor(ilo_ver_str):
"""Extract the major and minor number from the passed string
:param ilo_ver_str: the string that contains the version information
:returns: String of the form "<major>.<minor>" or None
"""
if not ilo_ver_str:
return None
try:
# Note(vmud213):This logic works for all strings
# that contain the version info as <major>.<minor>
# Formats of the strings:
# Release version -> "2.50 Feb 18 2016"
# Debug version -> "iLO 4 v2.50"
# random version -> "XYZ ABC 2.30"
pattern = re.search(ILO_VER_STR_PATTERN, ilo_ver_str)
if pattern:
matched = pattern.group(0)
if matched:
return matched
return None
except Exception:
return None | [
"def",
"get_major_minor",
"(",
"ilo_ver_str",
")",
":",
"if",
"not",
"ilo_ver_str",
":",
"return",
"None",
"try",
":",
"# Note(vmud213):This logic works for all strings",
"# that contain the version info as <major>.<minor>",
"# Formats of the strings:",
"# Release version -> \"... | Extract the major and minor number from the passed string
:param ilo_ver_str: the string that contains the version information
:returns: String of the form "<major>.<minor>" or None | [
"Extract",
"the",
"major",
"and",
"minor",
"number",
"from",
"the",
"passed",
"string"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L204-L226 | train | 41,864 |
openstack/proliantutils | proliantutils/ilo/common.py | get_supported_boot_modes | def get_supported_boot_modes(supported_boot_mode_constant):
"""Retrieves the server supported boot modes
It retrieves the server supported boot modes as a namedtuple
containing 'boot_mode_bios' as 'true'/'false' (in string format)
and 'boot_mode_uefi' again as true'/'false'.
:param supported_boot_mode_constant: supported boot_mode constant
:returns: A namedtuple containing ``boot_mode_bios`` and
``boot_mode_uefi`` with 'true'/'false' set accordingly for
legacy BIOS and UEFI boot modes.
"""
boot_mode_bios = 'false'
boot_mode_uefi = 'false'
if (supported_boot_mode_constant ==
constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY):
boot_mode_bios = 'true'
elif (supported_boot_mode_constant ==
constants.SUPPORTED_BOOT_MODE_UEFI_ONLY):
boot_mode_uefi = 'true'
elif (supported_boot_mode_constant ==
constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI):
boot_mode_bios = 'true'
boot_mode_uefi = 'true'
return SupportedBootModes(boot_mode_bios=boot_mode_bios,
boot_mode_uefi=boot_mode_uefi) | python | def get_supported_boot_modes(supported_boot_mode_constant):
"""Retrieves the server supported boot modes
It retrieves the server supported boot modes as a namedtuple
containing 'boot_mode_bios' as 'true'/'false' (in string format)
and 'boot_mode_uefi' again as true'/'false'.
:param supported_boot_mode_constant: supported boot_mode constant
:returns: A namedtuple containing ``boot_mode_bios`` and
``boot_mode_uefi`` with 'true'/'false' set accordingly for
legacy BIOS and UEFI boot modes.
"""
boot_mode_bios = 'false'
boot_mode_uefi = 'false'
if (supported_boot_mode_constant ==
constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY):
boot_mode_bios = 'true'
elif (supported_boot_mode_constant ==
constants.SUPPORTED_BOOT_MODE_UEFI_ONLY):
boot_mode_uefi = 'true'
elif (supported_boot_mode_constant ==
constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI):
boot_mode_bios = 'true'
boot_mode_uefi = 'true'
return SupportedBootModes(boot_mode_bios=boot_mode_bios,
boot_mode_uefi=boot_mode_uefi) | [
"def",
"get_supported_boot_modes",
"(",
"supported_boot_mode_constant",
")",
":",
"boot_mode_bios",
"=",
"'false'",
"boot_mode_uefi",
"=",
"'false'",
"if",
"(",
"supported_boot_mode_constant",
"==",
"constants",
".",
"SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY",
")",
":",
"boot_m... | Retrieves the server supported boot modes
It retrieves the server supported boot modes as a namedtuple
containing 'boot_mode_bios' as 'true'/'false' (in string format)
and 'boot_mode_uefi' again as true'/'false'.
:param supported_boot_mode_constant: supported boot_mode constant
:returns: A namedtuple containing ``boot_mode_bios`` and
``boot_mode_uefi`` with 'true'/'false' set accordingly for
legacy BIOS and UEFI boot modes. | [
"Retrieves",
"the",
"server",
"supported",
"boot",
"modes"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L229-L254 | train | 41,865 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Server.refresh | def refresh(self):
"""Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
config = self._get_all_details()
raid_info = _convert_to_dict(config)
self.controllers = []
for key, value in raid_info.items():
self.controllers.append(Controller(key, value, self))
self.last_updated = time.time() | python | def refresh(self):
"""Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
config = self._get_all_details()
raid_info = _convert_to_dict(config)
self.controllers = []
for key, value in raid_info.items():
self.controllers.append(Controller(key, value, self))
self.last_updated = time.time() | [
"def",
"refresh",
"(",
"self",
")",
":",
"config",
"=",
"self",
".",
"_get_all_details",
"(",
")",
"raid_info",
"=",
"_convert_to_dict",
"(",
"config",
")",
"self",
".",
"controllers",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"raid_info",
".",
... | Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed. | [
"Refresh",
"the",
"server",
"and",
"it",
"s",
"child",
"objects",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L239-L256 | train | 41,866 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Server.get_controller_by_id | def get_controller_by_id(self, id):
"""Get the controller object given the id.
This method returns the controller object for given id.
:param id: id of the controller, for example
'Smart Array P822 in Slot 2'
:returns: Controller object which has the id or None if the
controller is not found.
"""
for controller in self.controllers:
if controller.id == id:
return controller
return None | python | def get_controller_by_id(self, id):
"""Get the controller object given the id.
This method returns the controller object for given id.
:param id: id of the controller, for example
'Smart Array P822 in Slot 2'
:returns: Controller object which has the id or None if the
controller is not found.
"""
for controller in self.controllers:
if controller.id == id:
return controller
return None | [
"def",
"get_controller_by_id",
"(",
"self",
",",
"id",
")",
":",
"for",
"controller",
"in",
"self",
".",
"controllers",
":",
"if",
"controller",
".",
"id",
"==",
"id",
":",
"return",
"controller",
"return",
"None"
] | Get the controller object given the id.
This method returns the controller object for given id.
:param id: id of the controller, for example
'Smart Array P822 in Slot 2'
:returns: Controller object which has the id or None if the
controller is not found. | [
"Get",
"the",
"controller",
"object",
"given",
"the",
"id",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L258-L271 | train | 41,867 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Server.get_logical_drives | def get_logical_drives(self):
"""Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects.
"""
logical_drives = []
for controller in self.controllers:
for array in controller.raid_arrays:
for logical_drive in array.logical_drives:
logical_drives.append(logical_drive)
return logical_drives | python | def get_logical_drives(self):
"""Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects.
"""
logical_drives = []
for controller in self.controllers:
for array in controller.raid_arrays:
for logical_drive in array.logical_drives:
logical_drives.append(logical_drive)
return logical_drives | [
"def",
"get_logical_drives",
"(",
"self",
")",
":",
"logical_drives",
"=",
"[",
"]",
"for",
"controller",
"in",
"self",
".",
"controllers",
":",
"for",
"array",
"in",
"controller",
".",
"raid_arrays",
":",
"for",
"logical_drive",
"in",
"array",
".",
"logical... | Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects. | [
"Get",
"all",
"the",
"RAID",
"logical",
"drives",
"in",
"the",
"Server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L273-L286 | train | 41,868 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Server.get_physical_drives | def get_physical_drives(self):
"""Get all the RAID physical drives on the Server.
This method returns all the physical drives on the server
by examining all the controllers.
:returns: a list of PhysicalDrive objects.
"""
physical_drives = []
for controller in self.controllers:
# First add unassigned physical drives.
for physical_drive in controller.unassigned_physical_drives:
physical_drives.append(physical_drive)
# Now add physical drives part of RAID arrays.
for array in controller.raid_arrays:
for physical_drive in array.physical_drives:
physical_drives.append(physical_drive)
return physical_drives | python | def get_physical_drives(self):
"""Get all the RAID physical drives on the Server.
This method returns all the physical drives on the server
by examining all the controllers.
:returns: a list of PhysicalDrive objects.
"""
physical_drives = []
for controller in self.controllers:
# First add unassigned physical drives.
for physical_drive in controller.unassigned_physical_drives:
physical_drives.append(physical_drive)
# Now add physical drives part of RAID arrays.
for array in controller.raid_arrays:
for physical_drive in array.physical_drives:
physical_drives.append(physical_drive)
return physical_drives | [
"def",
"get_physical_drives",
"(",
"self",
")",
":",
"physical_drives",
"=",
"[",
"]",
"for",
"controller",
"in",
"self",
".",
"controllers",
":",
"# First add unassigned physical drives.",
"for",
"physical_drive",
"in",
"controller",
".",
"unassigned_physical_drives",
... | Get all the RAID physical drives on the Server.
This method returns all the physical drives on the server
by examining all the controllers.
:returns: a list of PhysicalDrive objects. | [
"Get",
"all",
"the",
"RAID",
"physical",
"drives",
"on",
"the",
"Server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L288-L306 | train | 41,869 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Server.get_logical_drive_by_wwn | def get_logical_drive_by_wwn(self, wwn):
"""Get the logical drive object given the wwn.
This method returns the logical drive object with the given wwn.
:param wwn: wwn of the logical drive
:returns: LogicalDrive object which has the wwn or None if
logical drive is not found.
"""
disk = [x for x in self.get_logical_drives() if x.wwn == wwn]
if disk:
return disk[0]
return None | python | def get_logical_drive_by_wwn(self, wwn):
"""Get the logical drive object given the wwn.
This method returns the logical drive object with the given wwn.
:param wwn: wwn of the logical drive
:returns: LogicalDrive object which has the wwn or None if
logical drive is not found.
"""
disk = [x for x in self.get_logical_drives() if x.wwn == wwn]
if disk:
return disk[0]
return None | [
"def",
"get_logical_drive_by_wwn",
"(",
"self",
",",
"wwn",
")",
":",
"disk",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"get_logical_drives",
"(",
")",
"if",
"x",
".",
"wwn",
"==",
"wwn",
"]",
"if",
"disk",
":",
"return",
"disk",
"[",
"0",
"]",... | Get the logical drive object given the wwn.
This method returns the logical drive object with the given wwn.
:param wwn: wwn of the logical drive
:returns: LogicalDrive object which has the wwn or None if
logical drive is not found. | [
"Get",
"the",
"logical",
"drive",
"object",
"given",
"the",
"wwn",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L308-L320 | train | 41,870 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Controller.get_physical_drive_by_id | def get_physical_drive_by_id(self, id):
"""Get a PhysicalDrive object for given id.
This method examines both assigned and unassigned physical
drives of the controller and returns the physical drive.
:param id: id of physical drive, for example '5I:1:1'.
:returns: PhysicalDrive object having the id, or None if
physical drive is not found.
"""
for phy_drive in self.unassigned_physical_drives:
if phy_drive.id == id:
return phy_drive
for array in self.raid_arrays:
for phy_drive in array.physical_drives:
if phy_drive.id == id:
return phy_drive
return None | python | def get_physical_drive_by_id(self, id):
"""Get a PhysicalDrive object for given id.
This method examines both assigned and unassigned physical
drives of the controller and returns the physical drive.
:param id: id of physical drive, for example '5I:1:1'.
:returns: PhysicalDrive object having the id, or None if
physical drive is not found.
"""
for phy_drive in self.unassigned_physical_drives:
if phy_drive.id == id:
return phy_drive
for array in self.raid_arrays:
for phy_drive in array.physical_drives:
if phy_drive.id == id:
return phy_drive
return None | [
"def",
"get_physical_drive_by_id",
"(",
"self",
",",
"id",
")",
":",
"for",
"phy_drive",
"in",
"self",
".",
"unassigned_physical_drives",
":",
"if",
"phy_drive",
".",
"id",
"==",
"id",
":",
"return",
"phy_drive",
"for",
"array",
"in",
"self",
".",
"raid_arra... | Get a PhysicalDrive object for given id.
This method examines both assigned and unassigned physical
drives of the controller and returns the physical drive.
:param id: id of physical drive, for example '5I:1:1'.
:returns: PhysicalDrive object having the id, or None if
physical drive is not found. | [
"Get",
"a",
"PhysicalDrive",
"object",
"for",
"given",
"id",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L350-L367 | train | 41,871 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Controller.create_logical_drive | def create_logical_drive(self, logical_drive_info):
"""Create a logical drive on the controller.
This method creates a logical drive on the controller when the
logical drive details and physical drive ids are passed to it.
:param logical_drive_info: a dictionary containing the details
of the logical drive as specified in raid config.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
cmd_args = []
if 'array' in logical_drive_info:
cmd_args.extend(['array', logical_drive_info['array']])
cmd_args.extend(['create', "type=logicaldrive"])
if 'physical_disks' in logical_drive_info:
phy_drive_ids = ','.join(logical_drive_info['physical_disks'])
cmd_args.append("drives=%s" % phy_drive_ids)
raid_level = logical_drive_info['raid_level']
# For RAID levels (like 5+0 and 6+0), HPSSA names them differently.
# Check if we have mapping stored, otherwise use the same.
raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get(
raid_level, raid_level)
cmd_args.append("raid=%s" % raid_level)
# If size_gb is MAX, then don't pass size argument. HPSSA will
# automatically allocate the maximum # disks size possible to the
# logical disk.
if logical_drive_info['size_gb'] != "MAX":
size_mb = logical_drive_info['size_gb'] * 1024
cmd_args.append("size=%s" % size_mb)
self.execute_cmd(*cmd_args, process_input='y') | python | def create_logical_drive(self, logical_drive_info):
"""Create a logical drive on the controller.
This method creates a logical drive on the controller when the
logical drive details and physical drive ids are passed to it.
:param logical_drive_info: a dictionary containing the details
of the logical drive as specified in raid config.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
cmd_args = []
if 'array' in logical_drive_info:
cmd_args.extend(['array', logical_drive_info['array']])
cmd_args.extend(['create', "type=logicaldrive"])
if 'physical_disks' in logical_drive_info:
phy_drive_ids = ','.join(logical_drive_info['physical_disks'])
cmd_args.append("drives=%s" % phy_drive_ids)
raid_level = logical_drive_info['raid_level']
# For RAID levels (like 5+0 and 6+0), HPSSA names them differently.
# Check if we have mapping stored, otherwise use the same.
raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get(
raid_level, raid_level)
cmd_args.append("raid=%s" % raid_level)
# If size_gb is MAX, then don't pass size argument. HPSSA will
# automatically allocate the maximum # disks size possible to the
# logical disk.
if logical_drive_info['size_gb'] != "MAX":
size_mb = logical_drive_info['size_gb'] * 1024
cmd_args.append("size=%s" % size_mb)
self.execute_cmd(*cmd_args, process_input='y') | [
"def",
"create_logical_drive",
"(",
"self",
",",
"logical_drive_info",
")",
":",
"cmd_args",
"=",
"[",
"]",
"if",
"'array'",
"in",
"logical_drive_info",
":",
"cmd_args",
".",
"extend",
"(",
"[",
"'array'",
",",
"logical_drive_info",
"[",
"'array'",
"]",
"]",
... | Create a logical drive on the controller.
This method creates a logical drive on the controller when the
logical drive details and physical drive ids are passed to it.
:param logical_drive_info: a dictionary containing the details
of the logical drive as specified in raid config.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed. | [
"Create",
"a",
"logical",
"drive",
"on",
"the",
"controller",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L384-L418 | train | 41,872 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Controller._get_erase_command | def _get_erase_command(self, drive, pattern):
"""Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments.
"""
cmd_args = []
cmd_args.append("pd %s" % drive)
cmd_args.extend(['modify', 'erase', pattern])
if pattern != 'erasepattern=zero':
cmd_args.append('unrestricted=off')
cmd_args.append('forced')
return cmd_args | python | def _get_erase_command(self, drive, pattern):
"""Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments.
"""
cmd_args = []
cmd_args.append("pd %s" % drive)
cmd_args.extend(['modify', 'erase', pattern])
if pattern != 'erasepattern=zero':
cmd_args.append('unrestricted=off')
cmd_args.append('forced')
return cmd_args | [
"def",
"_get_erase_command",
"(",
"self",
",",
"drive",
",",
"pattern",
")",
":",
"cmd_args",
"=",
"[",
"]",
"cmd_args",
".",
"append",
"(",
"\"pd %s\"",
"%",
"drive",
")",
"cmd_args",
".",
"extend",
"(",
"[",
"'modify'",
",",
"'erase'",
",",
"pattern",
... | Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments. | [
"Return",
"the",
"command",
"arguments",
"based",
"on",
"the",
"pattern",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L428-L449 | train | 41,873 |
openstack/proliantutils | proliantutils/hpssa/objects.py | Controller.erase_devices | def erase_devices(self, drives):
"""Perform Erase on all the drives in the controller.
This method erases all the hdd and ssd drives in the controller
by overwriting the drives with patterns for hdd and erasing storage
blocks for ssd drives. The drives would be unavailable until
successful completion or failure of erase operation.
If the sanitize erase is not supported on any disk it will try to
populate zeros on disk drives.
:param drives: A list of drive objects in the controller.
:raises: HPSSAOperationError, if sanitize erase is not supported.
"""
for drive in drives:
pattern = 'overwrite' if (
drive.disk_type == constants.DISK_TYPE_HDD) else 'block'
cmd_args = self._get_erase_command(
drive.id, 'erasepattern=%s' % pattern)
stdout = self.execute_cmd(*cmd_args)
LOG.debug("Sanitize disk erase invoked with erase pattern as "
"'%(pattern)s' on disk type: %(disk_type)s."
% {'pattern': pattern, 'disk_type': drive.disk_type})
if "not supported" in str(stdout):
new_pattern = 'zero'
cmd_args = self._get_erase_command(drive.id,
'erasepattern=zero')
self.execute_cmd(*cmd_args)
LOG.debug("Sanitize disk erase invoked with erase pattern as "
"'%(pattern)s' is not supported on disk type: "
"%(disk_type)s. Now its invoked with erase pattern "
"as %(new_pattern)s."
% {'pattern': pattern, 'disk_type': drive.disk_type,
'new_pattern': new_pattern}) | python | def erase_devices(self, drives):
"""Perform Erase on all the drives in the controller.
This method erases all the hdd and ssd drives in the controller
by overwriting the drives with patterns for hdd and erasing storage
blocks for ssd drives. The drives would be unavailable until
successful completion or failure of erase operation.
If the sanitize erase is not supported on any disk it will try to
populate zeros on disk drives.
:param drives: A list of drive objects in the controller.
:raises: HPSSAOperationError, if sanitize erase is not supported.
"""
for drive in drives:
pattern = 'overwrite' if (
drive.disk_type == constants.DISK_TYPE_HDD) else 'block'
cmd_args = self._get_erase_command(
drive.id, 'erasepattern=%s' % pattern)
stdout = self.execute_cmd(*cmd_args)
LOG.debug("Sanitize disk erase invoked with erase pattern as "
"'%(pattern)s' on disk type: %(disk_type)s."
% {'pattern': pattern, 'disk_type': drive.disk_type})
if "not supported" in str(stdout):
new_pattern = 'zero'
cmd_args = self._get_erase_command(drive.id,
'erasepattern=zero')
self.execute_cmd(*cmd_args)
LOG.debug("Sanitize disk erase invoked with erase pattern as "
"'%(pattern)s' is not supported on disk type: "
"%(disk_type)s. Now its invoked with erase pattern "
"as %(new_pattern)s."
% {'pattern': pattern, 'disk_type': drive.disk_type,
'new_pattern': new_pattern}) | [
"def",
"erase_devices",
"(",
"self",
",",
"drives",
")",
":",
"for",
"drive",
"in",
"drives",
":",
"pattern",
"=",
"'overwrite'",
"if",
"(",
"drive",
".",
"disk_type",
"==",
"constants",
".",
"DISK_TYPE_HDD",
")",
"else",
"'block'",
"cmd_args",
"=",
"self"... | Perform Erase on all the drives in the controller.
This method erases all the hdd and ssd drives in the controller
by overwriting the drives with patterns for hdd and erasing storage
blocks for ssd drives. The drives would be unavailable until
successful completion or failure of erase operation.
If the sanitize erase is not supported on any disk it will try to
populate zeros on disk drives.
:param drives: A list of drive objects in the controller.
:raises: HPSSAOperationError, if sanitize erase is not supported. | [
"Perform",
"Erase",
"on",
"all",
"the",
"drives",
"in",
"the",
"controller",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L451-L485 | train | 41,874 |
openstack/proliantutils | proliantutils/hpssa/objects.py | RaidArray.can_accomodate | def can_accomodate(self, logical_disk):
"""Check if this RAID array can accomodate the logical disk.
This method uses hpssacli/ssacli command's option to check if the
logical disk with desired size and RAID level can be created
on this RAID array.
:param logical_disk: Dictionary of logical disk to be created.
:returns: True, if logical disk can be created on the RAID array
False, otherwise.
"""
raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get(
logical_disk['raid_level'], logical_disk['raid_level'])
args = ("array", self.id, "create", "type=logicaldrive",
"raid=%s" % raid_level, "size=?")
if logical_disk['size_gb'] != "MAX":
desired_disk_size = logical_disk['size_gb']
else:
desired_disk_size = constants.MINIMUM_DISK_SIZE
try:
stdout, stderr = self.parent.execute_cmd(
*args, dont_transform_to_hpssa_exception=True)
except processutils.ProcessExecutionError as ex:
# hpssacli/ssacli returns error code 1 when RAID level of the
# logical disk is not supported on the array.
# If that's the case, just return saying the logical disk
# cannot be accomodated in the array.
# If exist_code is not 1, then it's some other error that we
# don't expect to appear and hence raise it back.
if ex.exit_code == 1:
return False
else:
raise exception.HPSSAOperationError(reason=ex)
except Exception as ex:
raise exception.HPSSAOperationError(reason=ex)
# TODO(rameshg87): This always returns in MB, but confirm with
# HPSSA folks.
match = re.search('Max: (\d+)', stdout)
if not match:
return False
max_size_gb = int(match.group(1)) / 1024
return desired_disk_size <= max_size_gb | python | def can_accomodate(self, logical_disk):
"""Check if this RAID array can accomodate the logical disk.
This method uses hpssacli/ssacli command's option to check if the
logical disk with desired size and RAID level can be created
on this RAID array.
:param logical_disk: Dictionary of logical disk to be created.
:returns: True, if logical disk can be created on the RAID array
False, otherwise.
"""
raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get(
logical_disk['raid_level'], logical_disk['raid_level'])
args = ("array", self.id, "create", "type=logicaldrive",
"raid=%s" % raid_level, "size=?")
if logical_disk['size_gb'] != "MAX":
desired_disk_size = logical_disk['size_gb']
else:
desired_disk_size = constants.MINIMUM_DISK_SIZE
try:
stdout, stderr = self.parent.execute_cmd(
*args, dont_transform_to_hpssa_exception=True)
except processutils.ProcessExecutionError as ex:
# hpssacli/ssacli returns error code 1 when RAID level of the
# logical disk is not supported on the array.
# If that's the case, just return saying the logical disk
# cannot be accomodated in the array.
# If exist_code is not 1, then it's some other error that we
# don't expect to appear and hence raise it back.
if ex.exit_code == 1:
return False
else:
raise exception.HPSSAOperationError(reason=ex)
except Exception as ex:
raise exception.HPSSAOperationError(reason=ex)
# TODO(rameshg87): This always returns in MB, but confirm with
# HPSSA folks.
match = re.search('Max: (\d+)', stdout)
if not match:
return False
max_size_gb = int(match.group(1)) / 1024
return desired_disk_size <= max_size_gb | [
"def",
"can_accomodate",
"(",
"self",
",",
"logical_disk",
")",
":",
"raid_level",
"=",
"constants",
".",
"RAID_LEVEL_INPUT_TO_HPSSA_MAPPING",
".",
"get",
"(",
"logical_disk",
"[",
"'raid_level'",
"]",
",",
"logical_disk",
"[",
"'raid_level'",
"]",
")",
"args",
... | Check if this RAID array can accomodate the logical disk.
This method uses hpssacli/ssacli command's option to check if the
logical disk with desired size and RAID level can be created
on this RAID array.
:param logical_disk: Dictionary of logical disk to be created.
:returns: True, if logical disk can be created on the RAID array
False, otherwise. | [
"Check",
"if",
"this",
"RAID",
"array",
"can",
"accomodate",
"the",
"logical",
"disk",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L517-L562 | train | 41,875 |
openstack/proliantutils | proliantutils/hpssa/objects.py | PhysicalDrive.get_physical_drive_dict | def get_physical_drive_dict(self):
"""Returns a dictionary of with the details of the physical drive."""
if isinstance(self.parent, RaidArray):
controller = self.parent.parent.id
status = 'active'
else:
controller = self.parent.id
status = 'ready'
return {'size_gb': self.size_gb,
'controller': controller,
'id': self.id,
'disk_type': self.disk_type,
'interface_type': self.interface_type,
'model': self.model,
'firmware': self.firmware,
'status': status,
'erase_status': self.erase_status} | python | def get_physical_drive_dict(self):
"""Returns a dictionary of with the details of the physical drive."""
if isinstance(self.parent, RaidArray):
controller = self.parent.parent.id
status = 'active'
else:
controller = self.parent.id
status = 'ready'
return {'size_gb': self.size_gb,
'controller': controller,
'id': self.id,
'disk_type': self.disk_type,
'interface_type': self.interface_type,
'model': self.model,
'firmware': self.firmware,
'status': status,
'erase_status': self.erase_status} | [
"def",
"get_physical_drive_dict",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"parent",
",",
"RaidArray",
")",
":",
"controller",
"=",
"self",
".",
"parent",
".",
"parent",
".",
"id",
"status",
"=",
"'active'",
"else",
":",
"controller",
... | Returns a dictionary of with the details of the physical drive. | [
"Returns",
"a",
"dictionary",
"of",
"with",
"the",
"details",
"of",
"the",
"physical",
"drive",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L686-L704 | train | 41,876 |
inveniosoftware/invenio-files-rest | invenio_files_rest/helpers.py | send_stream | def send_stream(stream, filename, size, mtime, mimetype=None, restricted=True,
as_attachment=False, etag=None, content_md5=None,
chunk_size=None, conditional=True, trusted=False):
"""Send the contents of a file to the client.
.. warning::
It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if
you serve user uploaded files. Here are some recommendations:
1. Serve user uploaded files from a separate domain
(not a subdomain). This way a malicious file can only attack
other user uploaded files.
2. Prevent the browser from rendering and executing HTML files (by
setting ``trusted=False``).
3. Force the browser to download the file as an attachment
(``as_attachment=True``).
:param stream: The file stream to send.
:param filename: The file name.
:param size: The file size.
:param mtime: A Unix timestamp that represents last modified time (UTC).
:param mimetype: The file mimetype. If ``None``, the module will try to
guess. (Default: ``None``)
:param restricted: If the file is not restricted, the module will set the
cache-control. (Default: ``True``)
:param as_attachment: If the file is an attachment. (Default: ``False``)
:param etag: If defined, it will be set as HTTP E-Tag.
:param content_md5: If defined, a HTTP Content-MD5 header will be set.
:param chunk_size: The chunk size.
:param conditional: Make the response conditional to the request.
(Default: ``True``)
:param trusted: Do not enable this option unless you know what you are
doing. By default this function will send HTTP headers and MIME types
that prevents your browser from rendering e.g. a HTML file which could
contain a malicious script tag.
(Default: ``False``)
:returns: A Flask response instance.
"""
chunk_size = chunk_size_or_default(chunk_size)
# Guess mimetype from filename if not provided.
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
# Construct headers
headers = Headers()
headers['Content-Length'] = size
if content_md5:
headers['Content-MD5'] = content_md5
if not trusted:
# Sanitize MIME type
mimetype = sanitize_mimetype(mimetype, filename=filename)
# See https://www.owasp.org/index.php/OWASP_Secure_Headers_Project
# Prevent JavaScript execution
headers['Content-Security-Policy'] = "default-src 'none';"
# Prevent MIME type sniffing for browser.
headers['X-Content-Type-Options'] = 'nosniff'
# Prevent opening of downloaded file by IE
headers['X-Download-Options'] = 'noopen'
# Prevent cross domain requests from Flash/Acrobat.
headers['X-Permitted-Cross-Domain-Policies'] = 'none'
# Prevent files from being embedded in frame, iframe and object tags.
headers['X-Frame-Options'] = 'deny'
# Enable XSS protection (IE, Chrome, Safari)
headers['X-XSS-Protection'] = '1; mode=block'
# Force Content-Disposition for application/octet-stream to prevent
# Content-Type sniffing.
if as_attachment or mimetype == 'application/octet-stream':
# See https://github.com/pallets/flask/commit/0049922f2e690a6d
try:
filenames = {'filename': filename.encode('latin-1')}
except UnicodeEncodeError:
filenames = {'filename*': "UTF-8''%s" % url_quote(filename)}
encoded_filename = (unicodedata.normalize('NFKD', filename)
.encode('latin-1', 'ignore'))
if encoded_filename:
filenames['filename'] = encoded_filename
headers.add('Content-Disposition', 'attachment', **filenames)
else:
headers.add('Content-Disposition', 'inline')
# Construct response object.
rv = current_app.response_class(
FileWrapper(stream, buffer_size=chunk_size),
mimetype=mimetype,
headers=headers,
direct_passthrough=True,
)
# Set etag if defined
if etag:
rv.set_etag(etag)
# Set last modified time
if mtime is not None:
rv.last_modified = int(mtime)
# Set cache-control
if not restricted:
rv.cache_control.public = True
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if conditional:
rv = rv.make_conditional(request)
return rv | python | def send_stream(stream, filename, size, mtime, mimetype=None, restricted=True,
as_attachment=False, etag=None, content_md5=None,
chunk_size=None, conditional=True, trusted=False):
"""Send the contents of a file to the client.
.. warning::
It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if
you serve user uploaded files. Here are some recommendations:
1. Serve user uploaded files from a separate domain
(not a subdomain). This way a malicious file can only attack
other user uploaded files.
2. Prevent the browser from rendering and executing HTML files (by
setting ``trusted=False``).
3. Force the browser to download the file as an attachment
(``as_attachment=True``).
:param stream: The file stream to send.
:param filename: The file name.
:param size: The file size.
:param mtime: A Unix timestamp that represents last modified time (UTC).
:param mimetype: The file mimetype. If ``None``, the module will try to
guess. (Default: ``None``)
:param restricted: If the file is not restricted, the module will set the
cache-control. (Default: ``True``)
:param as_attachment: If the file is an attachment. (Default: ``False``)
:param etag: If defined, it will be set as HTTP E-Tag.
:param content_md5: If defined, a HTTP Content-MD5 header will be set.
:param chunk_size: The chunk size.
:param conditional: Make the response conditional to the request.
(Default: ``True``)
:param trusted: Do not enable this option unless you know what you are
doing. By default this function will send HTTP headers and MIME types
that prevents your browser from rendering e.g. a HTML file which could
contain a malicious script tag.
(Default: ``False``)
:returns: A Flask response instance.
"""
chunk_size = chunk_size_or_default(chunk_size)
# Guess mimetype from filename if not provided.
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
# Construct headers
headers = Headers()
headers['Content-Length'] = size
if content_md5:
headers['Content-MD5'] = content_md5
if not trusted:
# Sanitize MIME type
mimetype = sanitize_mimetype(mimetype, filename=filename)
# See https://www.owasp.org/index.php/OWASP_Secure_Headers_Project
# Prevent JavaScript execution
headers['Content-Security-Policy'] = "default-src 'none';"
# Prevent MIME type sniffing for browser.
headers['X-Content-Type-Options'] = 'nosniff'
# Prevent opening of downloaded file by IE
headers['X-Download-Options'] = 'noopen'
# Prevent cross domain requests from Flash/Acrobat.
headers['X-Permitted-Cross-Domain-Policies'] = 'none'
# Prevent files from being embedded in frame, iframe and object tags.
headers['X-Frame-Options'] = 'deny'
# Enable XSS protection (IE, Chrome, Safari)
headers['X-XSS-Protection'] = '1; mode=block'
# Force Content-Disposition for application/octet-stream to prevent
# Content-Type sniffing.
if as_attachment or mimetype == 'application/octet-stream':
# See https://github.com/pallets/flask/commit/0049922f2e690a6d
try:
filenames = {'filename': filename.encode('latin-1')}
except UnicodeEncodeError:
filenames = {'filename*': "UTF-8''%s" % url_quote(filename)}
encoded_filename = (unicodedata.normalize('NFKD', filename)
.encode('latin-1', 'ignore'))
if encoded_filename:
filenames['filename'] = encoded_filename
headers.add('Content-Disposition', 'attachment', **filenames)
else:
headers.add('Content-Disposition', 'inline')
# Construct response object.
rv = current_app.response_class(
FileWrapper(stream, buffer_size=chunk_size),
mimetype=mimetype,
headers=headers,
direct_passthrough=True,
)
# Set etag if defined
if etag:
rv.set_etag(etag)
# Set last modified time
if mtime is not None:
rv.last_modified = int(mtime)
# Set cache-control
if not restricted:
rv.cache_control.public = True
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if conditional:
rv = rv.make_conditional(request)
return rv | [
"def",
"send_stream",
"(",
"stream",
",",
"filename",
",",
"size",
",",
"mtime",
",",
"mimetype",
"=",
"None",
",",
"restricted",
"=",
"True",
",",
"as_attachment",
"=",
"False",
",",
"etag",
"=",
"None",
",",
"content_md5",
"=",
"None",
",",
"chunk_size... | Send the contents of a file to the client.
.. warning::
It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if
you serve user uploaded files. Here are some recommendations:
1. Serve user uploaded files from a separate domain
(not a subdomain). This way a malicious file can only attack
other user uploaded files.
2. Prevent the browser from rendering and executing HTML files (by
setting ``trusted=False``).
3. Force the browser to download the file as an attachment
(``as_attachment=True``).
:param stream: The file stream to send.
:param filename: The file name.
:param size: The file size.
:param mtime: A Unix timestamp that represents last modified time (UTC).
:param mimetype: The file mimetype. If ``None``, the module will try to
guess. (Default: ``None``)
:param restricted: If the file is not restricted, the module will set the
cache-control. (Default: ``True``)
:param as_attachment: If the file is an attachment. (Default: ``False``)
:param etag: If defined, it will be set as HTTP E-Tag.
:param content_md5: If defined, a HTTP Content-MD5 header will be set.
:param chunk_size: The chunk size.
:param conditional: Make the response conditional to the request.
(Default: ``True``)
:param trusted: Do not enable this option unless you know what you are
doing. By default this function will send HTTP headers and MIME types
that prevents your browser from rendering e.g. a HTML file which could
contain a malicious script tag.
(Default: ``False``)
:returns: A Flask response instance. | [
"Send",
"the",
"contents",
"of",
"a",
"file",
"to",
"the",
"client",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/helpers.py#L64-L177 | train | 41,877 |
inveniosoftware/invenio-files-rest | invenio_files_rest/helpers.py | sanitize_mimetype | def sanitize_mimetype(mimetype, filename=None):
"""Sanitize a MIME type so the browser does not render the file."""
# Allow some few mime type like plain text, images and audio.
if mimetype in MIMETYPE_WHITELIST:
return mimetype
# Rewrite HTML, JavaScript, CSS etc to text/plain.
if mimetype in MIMETYPE_PLAINTEXT or \
(filename and filename.lower() in MIMETYPE_TEXTFILES):
return 'text/plain'
# Default
return 'application/octet-stream' | python | def sanitize_mimetype(mimetype, filename=None):
"""Sanitize a MIME type so the browser does not render the file."""
# Allow some few mime type like plain text, images and audio.
if mimetype in MIMETYPE_WHITELIST:
return mimetype
# Rewrite HTML, JavaScript, CSS etc to text/plain.
if mimetype in MIMETYPE_PLAINTEXT or \
(filename and filename.lower() in MIMETYPE_TEXTFILES):
return 'text/plain'
# Default
return 'application/octet-stream' | [
"def",
"sanitize_mimetype",
"(",
"mimetype",
",",
"filename",
"=",
"None",
")",
":",
"# Allow some few mime type like plain text, images and audio.",
"if",
"mimetype",
"in",
"MIMETYPE_WHITELIST",
":",
"return",
"mimetype",
"# Rewrite HTML, JavaScript, CSS etc to text/plain.",
"... | Sanitize a MIME type so the browser does not render the file. | [
"Sanitize",
"a",
"MIME",
"type",
"so",
"the",
"browser",
"does",
"not",
"render",
"the",
"file",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/helpers.py#L180-L190 | train | 41,878 |
inveniosoftware/invenio-files-rest | invenio_files_rest/helpers.py | make_path | def make_path(base_uri, path, filename, path_dimensions, split_length):
"""Generate a path as base location for file instance.
:param base_uri: The base URI.
:param path: The relative path.
:param path_dimensions: Number of chunks the path should be split into.
:param split_length: The length of any chunk.
:returns: A string representing the full path.
"""
assert len(path) > path_dimensions * split_length
uri_parts = []
for i in range(path_dimensions):
uri_parts.append(path[0:split_length])
path = path[split_length:]
uri_parts.append(path)
uri_parts.append(filename)
return os.path.join(base_uri, *uri_parts) | python | def make_path(base_uri, path, filename, path_dimensions, split_length):
"""Generate a path as base location for file instance.
:param base_uri: The base URI.
:param path: The relative path.
:param path_dimensions: Number of chunks the path should be split into.
:param split_length: The length of any chunk.
:returns: A string representing the full path.
"""
assert len(path) > path_dimensions * split_length
uri_parts = []
for i in range(path_dimensions):
uri_parts.append(path[0:split_length])
path = path[split_length:]
uri_parts.append(path)
uri_parts.append(filename)
return os.path.join(base_uri, *uri_parts) | [
"def",
"make_path",
"(",
"base_uri",
",",
"path",
",",
"filename",
",",
"path_dimensions",
",",
"split_length",
")",
":",
"assert",
"len",
"(",
"path",
")",
">",
"path_dimensions",
"*",
"split_length",
"uri_parts",
"=",
"[",
"]",
"for",
"i",
"in",
"range",... | Generate a path as base location for file instance.
:param base_uri: The base URI.
:param path: The relative path.
:param path_dimensions: Number of chunks the path should be split into.
:param split_length: The length of any chunk.
:returns: A string representing the full path. | [
"Generate",
"a",
"path",
"as",
"base",
"location",
"for",
"file",
"instance",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/helpers.py#L193-L211 | train | 41,879 |
inveniosoftware/invenio-files-rest | invenio_files_rest/helpers.py | populate_from_path | def populate_from_path(bucket, source, checksum=True, key_prefix='',
chunk_size=None):
"""Populate a ``bucket`` from all files in path.
:param bucket: The bucket (instance or id) to create the object in.
:param source: The file or directory path.
:param checksum: If ``True`` then a MD5 checksum will be computed for each
file. (Default: ``True``)
:param key_prefix: The key prefix for the bucket.
:param chunk_size: Chunk size to read from file.
:returns: A iterator for all
:class:`invenio_files_rest.models.ObjectVersion` instances.
"""
from .models import FileInstance, ObjectVersion
def create_file(key, path):
"""Create new ``ObjectVersion`` from path or existing ``FileInstance``.
It checks MD5 checksum and size of existing ``FileInstance``s.
"""
key = key_prefix + key
if checksum:
file_checksum = compute_md5_checksum(
open(path, 'rb'), chunk_size=chunk_size)
file_instance = FileInstance.query.filter_by(
checksum=file_checksum, size=os.path.getsize(path)
).first()
if file_instance:
return ObjectVersion.create(
bucket, key, _file_id=file_instance.id
)
return ObjectVersion.create(bucket, key, stream=open(path, 'rb'))
if os.path.isfile(source):
yield create_file(os.path.basename(source), source)
else:
for root, dirs, files in os.walk(source, topdown=False):
for name in files:
filename = os.path.join(root, name)
assert filename.startswith(source)
parts = [p for p in filename[len(source):].split(os.sep) if p]
yield create_file('/'.join(parts), os.path.join(root, name)) | python | def populate_from_path(bucket, source, checksum=True, key_prefix='',
chunk_size=None):
"""Populate a ``bucket`` from all files in path.
:param bucket: The bucket (instance or id) to create the object in.
:param source: The file or directory path.
:param checksum: If ``True`` then a MD5 checksum will be computed for each
file. (Default: ``True``)
:param key_prefix: The key prefix for the bucket.
:param chunk_size: Chunk size to read from file.
:returns: A iterator for all
:class:`invenio_files_rest.models.ObjectVersion` instances.
"""
from .models import FileInstance, ObjectVersion
def create_file(key, path):
"""Create new ``ObjectVersion`` from path or existing ``FileInstance``.
It checks MD5 checksum and size of existing ``FileInstance``s.
"""
key = key_prefix + key
if checksum:
file_checksum = compute_md5_checksum(
open(path, 'rb'), chunk_size=chunk_size)
file_instance = FileInstance.query.filter_by(
checksum=file_checksum, size=os.path.getsize(path)
).first()
if file_instance:
return ObjectVersion.create(
bucket, key, _file_id=file_instance.id
)
return ObjectVersion.create(bucket, key, stream=open(path, 'rb'))
if os.path.isfile(source):
yield create_file(os.path.basename(source), source)
else:
for root, dirs, files in os.walk(source, topdown=False):
for name in files:
filename = os.path.join(root, name)
assert filename.startswith(source)
parts = [p for p in filename[len(source):].split(os.sep) if p]
yield create_file('/'.join(parts), os.path.join(root, name)) | [
"def",
"populate_from_path",
"(",
"bucket",
",",
"source",
",",
"checksum",
"=",
"True",
",",
"key_prefix",
"=",
"''",
",",
"chunk_size",
"=",
"None",
")",
":",
"from",
".",
"models",
"import",
"FileInstance",
",",
"ObjectVersion",
"def",
"create_file",
"(",... | Populate a ``bucket`` from all files in path.
:param bucket: The bucket (instance or id) to create the object in.
:param source: The file or directory path.
:param checksum: If ``True`` then a MD5 checksum will be computed for each
file. (Default: ``True``)
:param key_prefix: The key prefix for the bucket.
:param chunk_size: Chunk size to read from file.
:returns: A iterator for all
:class:`invenio_files_rest.models.ObjectVersion` instances. | [
"Populate",
"a",
"bucket",
"from",
"all",
"files",
"in",
"path",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/helpers.py#L251-L293 | train | 41,880 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.set_options | def set_options(self, options):
"""
Sets instance variables based on an options dict
"""
# COMMAND LINE OPTIONS
self.wipe = options.get("wipe")
self.test_run = options.get("test_run")
self.quiet = options.get("test_run")
self.container_name = options.get("container")
self.verbosity = int(options.get("verbosity"))
self.syncmedia = options.get("syncmedia")
self.syncstatic = options.get("syncstatic")
if self.test_run:
self.verbosity = 2
cli_includes = options.get("includes")
cli_excludes = options.get("excludes")
# CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY
if self.syncmedia and self.syncstatic:
raise CommandError("options --media and --static are mutually exclusive")
if not self.container_name:
if self.syncmedia:
self.container_name = CUMULUS["CONTAINER"]
elif self.syncstatic:
self.container_name = CUMULUS["STATIC_CONTAINER"]
else:
raise CommandError("must select one of the required options, either --media or --static")
settings_includes = CUMULUS["INCLUDE_LIST"]
settings_excludes = CUMULUS["EXCLUDE_LIST"]
# PATH SETTINGS
if self.syncmedia:
self.file_root = os.path.abspath(settings.MEDIA_ROOT)
self.file_url = settings.MEDIA_URL
elif self.syncstatic:
self.file_root = os.path.abspath(settings.STATIC_ROOT)
self.file_url = settings.STATIC_URL
if not self.file_root.endswith("/"):
self.file_root = self.file_root + "/"
if self.file_url.startswith("/"):
self.file_url = self.file_url[1:]
# SYNCSTATIC VARS
# combine includes and excludes from the cli and django settings file
self.includes = list(set(cli_includes + settings_includes))
self.excludes = list(set(cli_excludes + settings_excludes))
# transform glob patterns to regular expressions
self.local_filenames = []
self.create_count = 0
self.upload_count = 0
self.update_count = 0
self.skip_count = 0
self.delete_count = 0 | python | def set_options(self, options):
"""
Sets instance variables based on an options dict
"""
# COMMAND LINE OPTIONS
self.wipe = options.get("wipe")
self.test_run = options.get("test_run")
self.quiet = options.get("test_run")
self.container_name = options.get("container")
self.verbosity = int(options.get("verbosity"))
self.syncmedia = options.get("syncmedia")
self.syncstatic = options.get("syncstatic")
if self.test_run:
self.verbosity = 2
cli_includes = options.get("includes")
cli_excludes = options.get("excludes")
# CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY
if self.syncmedia and self.syncstatic:
raise CommandError("options --media and --static are mutually exclusive")
if not self.container_name:
if self.syncmedia:
self.container_name = CUMULUS["CONTAINER"]
elif self.syncstatic:
self.container_name = CUMULUS["STATIC_CONTAINER"]
else:
raise CommandError("must select one of the required options, either --media or --static")
settings_includes = CUMULUS["INCLUDE_LIST"]
settings_excludes = CUMULUS["EXCLUDE_LIST"]
# PATH SETTINGS
if self.syncmedia:
self.file_root = os.path.abspath(settings.MEDIA_ROOT)
self.file_url = settings.MEDIA_URL
elif self.syncstatic:
self.file_root = os.path.abspath(settings.STATIC_ROOT)
self.file_url = settings.STATIC_URL
if not self.file_root.endswith("/"):
self.file_root = self.file_root + "/"
if self.file_url.startswith("/"):
self.file_url = self.file_url[1:]
# SYNCSTATIC VARS
# combine includes and excludes from the cli and django settings file
self.includes = list(set(cli_includes + settings_includes))
self.excludes = list(set(cli_excludes + settings_excludes))
# transform glob patterns to regular expressions
self.local_filenames = []
self.create_count = 0
self.upload_count = 0
self.update_count = 0
self.skip_count = 0
self.delete_count = 0 | [
"def",
"set_options",
"(",
"self",
",",
"options",
")",
":",
"# COMMAND LINE OPTIONS",
"self",
".",
"wipe",
"=",
"options",
".",
"get",
"(",
"\"wipe\"",
")",
"self",
".",
"test_run",
"=",
"options",
".",
"get",
"(",
"\"test_run\"",
")",
"self",
".",
"qui... | Sets instance variables based on an options dict | [
"Sets",
"instance",
"variables",
"based",
"on",
"an",
"options",
"dict"
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L45-L97 | train | 41,881 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.match_cloud | def match_cloud(self, includes, excludes):
"""
Returns the cloud objects that match the include and exclude patterns.
"""
cloud_objs = [cloud_obj.name for cloud_obj in self.container.get_objects()]
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
excludes = [o for o in cloud_objs if re.match(excludes_pattern, o)]
includes = [o for o in cloud_objs if re.match(includes_pattern, o)]
return [o for o in includes if o not in excludes] | python | def match_cloud(self, includes, excludes):
"""
Returns the cloud objects that match the include and exclude patterns.
"""
cloud_objs = [cloud_obj.name for cloud_obj in self.container.get_objects()]
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
excludes = [o for o in cloud_objs if re.match(excludes_pattern, o)]
includes = [o for o in cloud_objs if re.match(includes_pattern, o)]
return [o for o in includes if o not in excludes] | [
"def",
"match_cloud",
"(",
"self",
",",
"includes",
",",
"excludes",
")",
":",
"cloud_objs",
"=",
"[",
"cloud_obj",
".",
"name",
"for",
"cloud_obj",
"in",
"self",
".",
"container",
".",
"get_objects",
"(",
")",
"]",
"includes_pattern",
"=",
"r\"|\"",
".",
... | Returns the cloud objects that match the include and exclude patterns. | [
"Returns",
"the",
"cloud",
"objects",
"that",
"match",
"the",
"include",
"and",
"exclude",
"patterns",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L143-L152 | train | 41,882 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.upload_files | def upload_files(self, abspaths, relpaths, remote_objects):
"""
Determines files to be uploaded and call ``upload_file`` on each.
"""
for relpath in relpaths:
abspath = [p for p in abspaths if p[len(self.file_root):] == relpath][0]
cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
if relpath in remote_objects:
self.update_count += 1
else:
self.create_count += 1
self.upload_file(abspath, relpath) | python | def upload_files(self, abspaths, relpaths, remote_objects):
"""
Determines files to be uploaded and call ``upload_file`` on each.
"""
for relpath in relpaths:
abspath = [p for p in abspaths if p[len(self.file_root):] == relpath][0]
cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
if relpath in remote_objects:
self.update_count += 1
else:
self.create_count += 1
self.upload_file(abspath, relpath) | [
"def",
"upload_files",
"(",
"self",
",",
"abspaths",
",",
"relpaths",
",",
"remote_objects",
")",
":",
"for",
"relpath",
"in",
"relpaths",
":",
"abspath",
"=",
"[",
"p",
"for",
"p",
"in",
"abspaths",
"if",
"p",
"[",
"len",
"(",
"self",
".",
"file_root"... | Determines files to be uploaded and call ``upload_file`` on each. | [
"Determines",
"files",
"to",
"be",
"uploaded",
"and",
"call",
"upload_file",
"on",
"each",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L177-L195 | train | 41,883 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.upload_file | def upload_file(self, abspath, cloud_filename):
"""
Uploads a file to the container.
"""
if not self.test_run:
content = open(abspath, "rb")
content_type = get_content_type(cloud_filename, content)
headers = get_headers(cloud_filename, content_type)
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
size = content.size
else:
size = os.stat(abspath).st_size
self.container.create(
obj_name=cloud_filename,
data=content,
content_type=content_type,
content_length=size,
content_encoding=headers.get("Content-Encoding", None),
headers=headers,
ttl=CUMULUS["FILE_TTL"],
etag=None,
)
self.upload_count += 1
if not self.quiet or self.verbosity > 1:
print("Uploaded: {0}".format(cloud_filename)) | python | def upload_file(self, abspath, cloud_filename):
"""
Uploads a file to the container.
"""
if not self.test_run:
content = open(abspath, "rb")
content_type = get_content_type(cloud_filename, content)
headers = get_headers(cloud_filename, content_type)
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
size = content.size
else:
size = os.stat(abspath).st_size
self.container.create(
obj_name=cloud_filename,
data=content,
content_type=content_type,
content_length=size,
content_encoding=headers.get("Content-Encoding", None),
headers=headers,
ttl=CUMULUS["FILE_TTL"],
etag=None,
)
self.upload_count += 1
if not self.quiet or self.verbosity > 1:
print("Uploaded: {0}".format(cloud_filename)) | [
"def",
"upload_file",
"(",
"self",
",",
"abspath",
",",
"cloud_filename",
")",
":",
"if",
"not",
"self",
".",
"test_run",
":",
"content",
"=",
"open",
"(",
"abspath",
",",
"\"rb\"",
")",
"content_type",
"=",
"get_content_type",
"(",
"cloud_filename",
",",
... | Uploads a file to the container. | [
"Uploads",
"a",
"file",
"to",
"the",
"container",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L197-L224 | train | 41,884 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.delete_extra_files | def delete_extra_files(self, relpaths, cloud_objs):
"""
Deletes any objects from the container that do not exist locally.
"""
for cloud_obj in cloud_objs:
if cloud_obj not in relpaths:
if not self.test_run:
self.delete_cloud_obj(cloud_obj)
self.delete_count += 1
if not self.quiet or self.verbosity > 1:
print("Deleted: {0}".format(cloud_obj)) | python | def delete_extra_files(self, relpaths, cloud_objs):
"""
Deletes any objects from the container that do not exist locally.
"""
for cloud_obj in cloud_objs:
if cloud_obj not in relpaths:
if not self.test_run:
self.delete_cloud_obj(cloud_obj)
self.delete_count += 1
if not self.quiet or self.verbosity > 1:
print("Deleted: {0}".format(cloud_obj)) | [
"def",
"delete_extra_files",
"(",
"self",
",",
"relpaths",
",",
"cloud_objs",
")",
":",
"for",
"cloud_obj",
"in",
"cloud_objs",
":",
"if",
"cloud_obj",
"not",
"in",
"relpaths",
":",
"if",
"not",
"self",
".",
"test_run",
":",
"self",
".",
"delete_cloud_obj",
... | Deletes any objects from the container that do not exist locally. | [
"Deletes",
"any",
"objects",
"from",
"the",
"container",
"that",
"do",
"not",
"exist",
"locally",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L226-L236 | train | 41,885 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.delete_cloud_obj | def delete_cloud_obj(self, cloud_obj):
"""
Deletes an object from the container.
"""
self._connection.delete_object(
container=self.container_name,
obj=cloud_obj,
) | python | def delete_cloud_obj(self, cloud_obj):
"""
Deletes an object from the container.
"""
self._connection.delete_object(
container=self.container_name,
obj=cloud_obj,
) | [
"def",
"delete_cloud_obj",
"(",
"self",
",",
"cloud_obj",
")",
":",
"self",
".",
"_connection",
".",
"delete_object",
"(",
"container",
"=",
"self",
".",
"container_name",
",",
"obj",
"=",
"cloud_obj",
",",
")"
] | Deletes an object from the container. | [
"Deletes",
"an",
"object",
"from",
"the",
"container",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L238-L245 | train | 41,886 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.wipe_container | def wipe_container(self):
"""
Completely wipes out the contents of the container.
"""
if self.test_run:
print("Wipe would delete {0} objects.".format(len(self.container.object_count)))
else:
if not self.quiet or self.verbosity > 1:
print("Deleting {0} objects...".format(len(self.container.object_count)))
self._connection.delete_all_objects() | python | def wipe_container(self):
"""
Completely wipes out the contents of the container.
"""
if self.test_run:
print("Wipe would delete {0} objects.".format(len(self.container.object_count)))
else:
if not self.quiet or self.verbosity > 1:
print("Deleting {0} objects...".format(len(self.container.object_count)))
self._connection.delete_all_objects() | [
"def",
"wipe_container",
"(",
"self",
")",
":",
"if",
"self",
".",
"test_run",
":",
"print",
"(",
"\"Wipe would delete {0} objects.\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"container",
".",
"object_count",
")",
")",
")",
"else",
":",
"if",
"not",
... | Completely wipes out the contents of the container. | [
"Completely",
"wipes",
"out",
"the",
"contents",
"of",
"the",
"container",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L247-L256 | train | 41,887 |
django-cumulus/django-cumulus | cumulus/management/commands/syncfiles.py | Command.print_tally | def print_tally(self):
"""
Prints the final tally to stdout.
"""
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count)) | python | def print_tally(self):
"""
Prints the final tally to stdout.
"""
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count)) | [
"def",
"print_tally",
"(",
"self",
")",
":",
"self",
".",
"update_count",
"=",
"self",
".",
"upload_count",
"-",
"self",
".",
"create_count",
"if",
"self",
".",
"test_run",
":",
"print",
"(",
"\"Test run complete with the following results:\"",
")",
"print",
"("... | Prints the final tally to stdout. | [
"Prints",
"the",
"final",
"tally",
"to",
"stdout",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L258-L266 | train | 41,888 |
django-cumulus/django-cumulus | cumulus/management/commands/container_list.py | Command.handle | def handle(self, *args, **options):
"""
Lists all the items in a container to stdout.
"""
self._connection = Auth()._get_connection()
if len(args) == 0:
containers = self._connection.list_containers()
if not containers:
print("No containers were found for this account.")
elif len(args) == 1:
containers = self._connection.list_container_object_names(args[0])
if not containers:
print("No matching container found.")
else:
raise CommandError("Pass one and only one [container_name] as an argument")
for container in containers:
print(container) | python | def handle(self, *args, **options):
"""
Lists all the items in a container to stdout.
"""
self._connection = Auth()._get_connection()
if len(args) == 0:
containers = self._connection.list_containers()
if not containers:
print("No containers were found for this account.")
elif len(args) == 1:
containers = self._connection.list_container_object_names(args[0])
if not containers:
print("No matching container found.")
else:
raise CommandError("Pass one and only one [container_name] as an argument")
for container in containers:
print(container) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"_connection",
"=",
"Auth",
"(",
")",
".",
"_get_connection",
"(",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"containers",
"=",
"self",
"."... | Lists all the items in a container to stdout. | [
"Lists",
"all",
"the",
"items",
"in",
"a",
"container",
"to",
"stdout",
"."
] | 64feb07b857af28f226be4899e875c29405e261d | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/container_list.py#L13-L31 | train | 41,889 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | progress_updater | def progress_updater(size, total):
"""Progress reporter for checksum verification."""
current_task.update_state(
state=state('PROGRESS'),
meta=dict(size=size, total=total)
) | python | def progress_updater(size, total):
"""Progress reporter for checksum verification."""
current_task.update_state(
state=state('PROGRESS'),
meta=dict(size=size, total=total)
) | [
"def",
"progress_updater",
"(",
"size",
",",
"total",
")",
":",
"current_task",
".",
"update_state",
"(",
"state",
"=",
"state",
"(",
"'PROGRESS'",
")",
",",
"meta",
"=",
"dict",
"(",
"size",
"=",
"size",
",",
"total",
"=",
"total",
")",
")"
] | Progress reporter for checksum verification. | [
"Progress",
"reporter",
"for",
"checksum",
"verification",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L32-L37 | train | 41,890 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | verify_checksum | def verify_checksum(file_id, pessimistic=False, chunk_size=None, throws=True,
checksum_kwargs=None):
"""Verify checksum of a file instance.
:param file_id: The file ID.
"""
f = FileInstance.query.get(uuid.UUID(file_id))
# Anything might happen during the task, so being pessimistic and marking
# the file as unchecked is a reasonable precaution
if pessimistic:
f.clear_last_check()
db.session.commit()
f.verify_checksum(
progress_callback=progress_updater, chunk_size=chunk_size,
throws=throws, checksum_kwargs=checksum_kwargs)
db.session.commit() | python | def verify_checksum(file_id, pessimistic=False, chunk_size=None, throws=True,
checksum_kwargs=None):
"""Verify checksum of a file instance.
:param file_id: The file ID.
"""
f = FileInstance.query.get(uuid.UUID(file_id))
# Anything might happen during the task, so being pessimistic and marking
# the file as unchecked is a reasonable precaution
if pessimistic:
f.clear_last_check()
db.session.commit()
f.verify_checksum(
progress_callback=progress_updater, chunk_size=chunk_size,
throws=throws, checksum_kwargs=checksum_kwargs)
db.session.commit() | [
"def",
"verify_checksum",
"(",
"file_id",
",",
"pessimistic",
"=",
"False",
",",
"chunk_size",
"=",
"None",
",",
"throws",
"=",
"True",
",",
"checksum_kwargs",
"=",
"None",
")",
":",
"f",
"=",
"FileInstance",
".",
"query",
".",
"get",
"(",
"uuid",
".",
... | Verify checksum of a file instance.
:param file_id: The file ID. | [
"Verify",
"checksum",
"of",
"a",
"file",
"instance",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L41-L57 | train | 41,891 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | schedule_checksum_verification | def schedule_checksum_verification(frequency=None, batch_interval=None,
max_count=None, max_size=None,
files_query=None,
checksum_kwargs=None):
"""Schedule a batch of files for checksum verification.
The purpose of this task is to be periodically called through `celerybeat`,
in order achieve a repeated verification cycle of all file checksums, while
following a set of constraints in order to throttle the execution rate of
the checks.
:param dict frequency: Time period over which a full check of all files
should be performed. The argument is a dictionary that will be passed
as arguments to the `datetime.timedelta` class. Defaults to a month (30
days).
:param dict batch_interval: How often a batch is sent. If not supplied,
this information will be extracted, if possible, from the
celery.conf['CELERYBEAT_SCHEDULE'] entry of this task. The argument is
a dictionary that will be passed as arguments to the
`datetime.timedelta` class.
:param int max_count: Max count of files of a single batch. When set to `0`
it's automatically calculated to be distributed equally through the
number of total batches.
:param int max_size: Max size of a single batch in bytes. When set to `0`
it's automatically calculated to be distributed equally through the
number of total batches.
:param str files_query: Import path for a function returning a
FileInstance query for files that should be checked.
:param dict checksum_kwargs: Passed to ``FileInstance.verify_checksum``.
"""
assert max_count is not None or max_size is not None
frequency = timedelta(**frequency) if frequency else timedelta(days=30)
if batch_interval:
batch_interval = timedelta(**batch_interval)
else:
celery_schedule = current_celery.conf.get('CELERYBEAT_SCHEDULE', {})
batch_interval = batch_interval or next(
(v['schedule'] for v in celery_schedule.values()
if v.get('task') == schedule_checksum_verification.name), None)
if not batch_interval or not isinstance(batch_interval, timedelta):
raise Exception(u'No "batch_interval" could be decided')
total_batches = int(
frequency.total_seconds() / batch_interval.total_seconds())
files = obj_or_import_string(
files_query, default=default_checksum_verification_files_query)()
files = files.order_by(
sa.func.coalesce(FileInstance.last_check_at, date.min))
if max_count is not None:
all_files_count = files.count()
min_count = int(math.ceil(all_files_count / total_batches))
max_count = min_count if max_count == 0 else max_count
if max_count < min_count:
current_app.logger.warning(
u'The "max_count" you specified ({0}) is smaller than the '
'minimum batch file count required ({1}) in order to achieve '
'the file checks over the specified period ({2}).'
.format(max_count, min_count, frequency))
files = files.limit(max_count)
if max_size is not None:
all_files_size = db.session.query(
sa.func.sum(FileInstance.size)).scalar()
min_size = int(math.ceil(all_files_size / total_batches))
max_size = min_size if max_size == 0 else max_size
if max_size < min_size:
current_app.logger.warning(
u'The "max_size" you specified ({0}) is smaller than the '
'minimum batch total file size required ({1}) in order to '
'achieve the file checks over the specified period ({2}).'
.format(max_size, min_size, frequency))
files = files.yield_per(1000)
scheduled_file_ids = []
total_size = 0
for f in files:
# Add at least the first file, since it might be larger than "max_size"
scheduled_file_ids.append(str(f.id))
total_size += f.size
if max_size and max_size <= total_size:
break
group(
verify_checksum.s(
file_id, pessimistic=True, throws=False,
checksum_kwargs=(checksum_kwargs or {}))
for file_id in scheduled_file_ids
).apply_async() | python | def schedule_checksum_verification(frequency=None, batch_interval=None,
max_count=None, max_size=None,
files_query=None,
checksum_kwargs=None):
"""Schedule a batch of files for checksum verification.
The purpose of this task is to be periodically called through `celerybeat`,
in order achieve a repeated verification cycle of all file checksums, while
following a set of constraints in order to throttle the execution rate of
the checks.
:param dict frequency: Time period over which a full check of all files
should be performed. The argument is a dictionary that will be passed
as arguments to the `datetime.timedelta` class. Defaults to a month (30
days).
:param dict batch_interval: How often a batch is sent. If not supplied,
this information will be extracted, if possible, from the
celery.conf['CELERYBEAT_SCHEDULE'] entry of this task. The argument is
a dictionary that will be passed as arguments to the
`datetime.timedelta` class.
:param int max_count: Max count of files of a single batch. When set to `0`
it's automatically calculated to be distributed equally through the
number of total batches.
:param int max_size: Max size of a single batch in bytes. When set to `0`
it's automatically calculated to be distributed equally through the
number of total batches.
:param str files_query: Import path for a function returning a
FileInstance query for files that should be checked.
:param dict checksum_kwargs: Passed to ``FileInstance.verify_checksum``.
"""
assert max_count is not None or max_size is not None
frequency = timedelta(**frequency) if frequency else timedelta(days=30)
if batch_interval:
batch_interval = timedelta(**batch_interval)
else:
celery_schedule = current_celery.conf.get('CELERYBEAT_SCHEDULE', {})
batch_interval = batch_interval or next(
(v['schedule'] for v in celery_schedule.values()
if v.get('task') == schedule_checksum_verification.name), None)
if not batch_interval or not isinstance(batch_interval, timedelta):
raise Exception(u'No "batch_interval" could be decided')
total_batches = int(
frequency.total_seconds() / batch_interval.total_seconds())
files = obj_or_import_string(
files_query, default=default_checksum_verification_files_query)()
files = files.order_by(
sa.func.coalesce(FileInstance.last_check_at, date.min))
if max_count is not None:
all_files_count = files.count()
min_count = int(math.ceil(all_files_count / total_batches))
max_count = min_count if max_count == 0 else max_count
if max_count < min_count:
current_app.logger.warning(
u'The "max_count" you specified ({0}) is smaller than the '
'minimum batch file count required ({1}) in order to achieve '
'the file checks over the specified period ({2}).'
.format(max_count, min_count, frequency))
files = files.limit(max_count)
if max_size is not None:
all_files_size = db.session.query(
sa.func.sum(FileInstance.size)).scalar()
min_size = int(math.ceil(all_files_size / total_batches))
max_size = min_size if max_size == 0 else max_size
if max_size < min_size:
current_app.logger.warning(
u'The "max_size" you specified ({0}) is smaller than the '
'minimum batch total file size required ({1}) in order to '
'achieve the file checks over the specified period ({2}).'
.format(max_size, min_size, frequency))
files = files.yield_per(1000)
scheduled_file_ids = []
total_size = 0
for f in files:
# Add at least the first file, since it might be larger than "max_size"
scheduled_file_ids.append(str(f.id))
total_size += f.size
if max_size and max_size <= total_size:
break
group(
verify_checksum.s(
file_id, pessimistic=True, throws=False,
checksum_kwargs=(checksum_kwargs or {}))
for file_id in scheduled_file_ids
).apply_async() | [
"def",
"schedule_checksum_verification",
"(",
"frequency",
"=",
"None",
",",
"batch_interval",
"=",
"None",
",",
"max_count",
"=",
"None",
",",
"max_size",
"=",
"None",
",",
"files_query",
"=",
"None",
",",
"checksum_kwargs",
"=",
"None",
")",
":",
"assert",
... | Schedule a batch of files for checksum verification.
The purpose of this task is to be periodically called through `celerybeat`,
in order achieve a repeated verification cycle of all file checksums, while
following a set of constraints in order to throttle the execution rate of
the checks.
:param dict frequency: Time period over which a full check of all files
should be performed. The argument is a dictionary that will be passed
as arguments to the `datetime.timedelta` class. Defaults to a month (30
days).
:param dict batch_interval: How often a batch is sent. If not supplied,
this information will be extracted, if possible, from the
celery.conf['CELERYBEAT_SCHEDULE'] entry of this task. The argument is
a dictionary that will be passed as arguments to the
`datetime.timedelta` class.
:param int max_count: Max count of files of a single batch. When set to `0`
it's automatically calculated to be distributed equally through the
number of total batches.
:param int max_size: Max size of a single batch in bytes. When set to `0`
it's automatically calculated to be distributed equally through the
number of total batches.
:param str files_query: Import path for a function returning a
FileInstance query for files that should be checked.
:param dict checksum_kwargs: Passed to ``FileInstance.verify_checksum``. | [
"Schedule",
"a",
"batch",
"of",
"files",
"for",
"checksum",
"verification",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L66-L154 | train | 41,892 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | migrate_file | def migrate_file(src_id, location_name, post_fixity_check=False):
"""Task to migrate a file instance to a new location.
.. note:: If something goes wrong during the content copy, the destination
file instance is removed.
:param src_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param location_name: Where to migrate the file.
:param post_fixity_check: Verify checksum after migration.
(Default: ``False``)
"""
location = Location.get_by_name(location_name)
f_src = FileInstance.get(src_id)
# Create destination
f_dst = FileInstance.create()
db.session.commit()
try:
# Copy contents
f_dst.copy_contents(
f_src,
progress_callback=progress_updater,
default_location=location.uri,
)
db.session.commit()
except Exception:
# Remove destination file instance if an error occurred.
db.session.delete(f_dst)
db.session.commit()
raise
# Update all objects pointing to file.
ObjectVersion.relink_all(f_src, f_dst)
db.session.commit()
# Start a fixity check
if post_fixity_check:
verify_checksum.delay(str(f_dst.id)) | python | def migrate_file(src_id, location_name, post_fixity_check=False):
"""Task to migrate a file instance to a new location.
.. note:: If something goes wrong during the content copy, the destination
file instance is removed.
:param src_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param location_name: Where to migrate the file.
:param post_fixity_check: Verify checksum after migration.
(Default: ``False``)
"""
location = Location.get_by_name(location_name)
f_src = FileInstance.get(src_id)
# Create destination
f_dst = FileInstance.create()
db.session.commit()
try:
# Copy contents
f_dst.copy_contents(
f_src,
progress_callback=progress_updater,
default_location=location.uri,
)
db.session.commit()
except Exception:
# Remove destination file instance if an error occurred.
db.session.delete(f_dst)
db.session.commit()
raise
# Update all objects pointing to file.
ObjectVersion.relink_all(f_src, f_dst)
db.session.commit()
# Start a fixity check
if post_fixity_check:
verify_checksum.delay(str(f_dst.id)) | [
"def",
"migrate_file",
"(",
"src_id",
",",
"location_name",
",",
"post_fixity_check",
"=",
"False",
")",
":",
"location",
"=",
"Location",
".",
"get_by_name",
"(",
"location_name",
")",
"f_src",
"=",
"FileInstance",
".",
"get",
"(",
"src_id",
")",
"# Create de... | Task to migrate a file instance to a new location.
.. note:: If something goes wrong during the content copy, the destination
file instance is removed.
:param src_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param location_name: Where to migrate the file.
:param post_fixity_check: Verify checksum after migration.
(Default: ``False``) | [
"Task",
"to",
"migrate",
"a",
"file",
"instance",
"to",
"a",
"new",
"location",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L158-L196 | train | 41,893 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | remove_file_data | def remove_file_data(file_id, silent=True):
"""Remove file instance and associated data.
:param file_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param silent: It stops propagation of a possible arised IntegrityError
exception. (Default: ``True``)
:raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes
wrong and silent is set to ``False``.
"""
try:
# First remove FileInstance from database and commit transaction to
# ensure integrity constraints are checked and enforced.
f = FileInstance.get(file_id)
if not f.writable:
return
f.delete()
db.session.commit()
# Next, remove the file on disk. This leaves the possibility of having
# a file on disk dangling in case the database removal works, and the
# disk file removal doesn't work.
f.storage().delete()
except IntegrityError:
if not silent:
raise | python | def remove_file_data(file_id, silent=True):
"""Remove file instance and associated data.
:param file_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param silent: It stops propagation of a possible arised IntegrityError
exception. (Default: ``True``)
:raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes
wrong and silent is set to ``False``.
"""
try:
# First remove FileInstance from database and commit transaction to
# ensure integrity constraints are checked and enforced.
f = FileInstance.get(file_id)
if not f.writable:
return
f.delete()
db.session.commit()
# Next, remove the file on disk. This leaves the possibility of having
# a file on disk dangling in case the database removal works, and the
# disk file removal doesn't work.
f.storage().delete()
except IntegrityError:
if not silent:
raise | [
"def",
"remove_file_data",
"(",
"file_id",
",",
"silent",
"=",
"True",
")",
":",
"try",
":",
"# First remove FileInstance from database and commit transaction to",
"# ensure integrity constraints are checked and enforced.",
"f",
"=",
"FileInstance",
".",
"get",
"(",
"file_id"... | Remove file instance and associated data.
:param file_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param silent: It stops propagation of a possible arised IntegrityError
exception. (Default: ``True``)
:raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes
wrong and silent is set to ``False``. | [
"Remove",
"file",
"instance",
"and",
"associated",
"data",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L200-L223 | train | 41,894 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | merge_multipartobject | def merge_multipartobject(upload_id, version_id=None):
"""Merge multipart object.
:param upload_id: The :class:`invenio_files_rest.models.MultipartObject`
upload ID.
:param version_id: Optionally you can define which file version.
(Default: ``None``)
:returns: The :class:`invenio_files_rest.models.ObjectVersion` version
ID.
"""
mp = MultipartObject.query.filter_by(upload_id=upload_id).one_or_none()
if not mp:
raise RuntimeError('Upload ID does not exists.')
if not mp.completed:
raise RuntimeError('MultipartObject is not completed.')
try:
obj = mp.merge_parts(
version_id=version_id,
progress_callback=progress_updater
)
db.session.commit()
return str(obj.version_id)
except Exception:
db.session.rollback()
raise | python | def merge_multipartobject(upload_id, version_id=None):
"""Merge multipart object.
:param upload_id: The :class:`invenio_files_rest.models.MultipartObject`
upload ID.
:param version_id: Optionally you can define which file version.
(Default: ``None``)
:returns: The :class:`invenio_files_rest.models.ObjectVersion` version
ID.
"""
mp = MultipartObject.query.filter_by(upload_id=upload_id).one_or_none()
if not mp:
raise RuntimeError('Upload ID does not exists.')
if not mp.completed:
raise RuntimeError('MultipartObject is not completed.')
try:
obj = mp.merge_parts(
version_id=version_id,
progress_callback=progress_updater
)
db.session.commit()
return str(obj.version_id)
except Exception:
db.session.rollback()
raise | [
"def",
"merge_multipartobject",
"(",
"upload_id",
",",
"version_id",
"=",
"None",
")",
":",
"mp",
"=",
"MultipartObject",
".",
"query",
".",
"filter_by",
"(",
"upload_id",
"=",
"upload_id",
")",
".",
"one_or_none",
"(",
")",
"if",
"not",
"mp",
":",
"raise"... | Merge multipart object.
:param upload_id: The :class:`invenio_files_rest.models.MultipartObject`
upload ID.
:param version_id: Optionally you can define which file version.
(Default: ``None``)
:returns: The :class:`invenio_files_rest.models.ObjectVersion` version
ID. | [
"Merge",
"multipart",
"object",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L227-L252 | train | 41,895 |
inveniosoftware/invenio-files-rest | invenio_files_rest/tasks.py | remove_expired_multipartobjects | def remove_expired_multipartobjects():
"""Remove expired multipart objects."""
delta = current_app.config['FILES_REST_MULTIPART_EXPIRES']
expired_dt = datetime.utcnow() - delta
file_ids = []
for mp in MultipartObject.query_expired(expired_dt):
file_ids.append(str(mp.file_id))
mp.delete()
for fid in file_ids:
remove_file_data.delay(fid) | python | def remove_expired_multipartobjects():
"""Remove expired multipart objects."""
delta = current_app.config['FILES_REST_MULTIPART_EXPIRES']
expired_dt = datetime.utcnow() - delta
file_ids = []
for mp in MultipartObject.query_expired(expired_dt):
file_ids.append(str(mp.file_id))
mp.delete()
for fid in file_ids:
remove_file_data.delay(fid) | [
"def",
"remove_expired_multipartobjects",
"(",
")",
":",
"delta",
"=",
"current_app",
".",
"config",
"[",
"'FILES_REST_MULTIPART_EXPIRES'",
"]",
"expired_dt",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"delta",
"file_ids",
"=",
"[",
"]",
"for",
"mp",
"in",... | Remove expired multipart objects. | [
"Remove",
"expired",
"multipart",
"objects",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L256-L267 | train | 41,896 |
inveniosoftware/invenio-files-rest | invenio_files_rest/storage/pyfs.py | pyfs_storage_factory | def pyfs_storage_factory(fileinstance=None, default_location=None,
default_storage_class=None,
filestorage_class=PyFSFileStorage, fileurl=None,
size=None, modified=None, clean_dir=True):
"""Get factory function for creating a PyFS file storage instance."""
# Either the FileInstance needs to be specified or all filestorage
# class parameters need to be specified
assert fileinstance or (fileurl and size)
if fileinstance:
# FIXME: Code here should be refactored since it assumes a lot on the
# directory structure where the file instances are written
fileurl = None
size = fileinstance.size
modified = fileinstance.updated
if fileinstance.uri:
# Use already existing URL.
fileurl = fileinstance.uri
else:
assert default_location
# Generate a new URL.
fileurl = make_path(
default_location,
str(fileinstance.id),
'data',
current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'],
current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'],
)
return filestorage_class(
fileurl, size=size, modified=modified, clean_dir=clean_dir) | python | def pyfs_storage_factory(fileinstance=None, default_location=None,
default_storage_class=None,
filestorage_class=PyFSFileStorage, fileurl=None,
size=None, modified=None, clean_dir=True):
"""Get factory function for creating a PyFS file storage instance."""
# Either the FileInstance needs to be specified or all filestorage
# class parameters need to be specified
assert fileinstance or (fileurl and size)
if fileinstance:
# FIXME: Code here should be refactored since it assumes a lot on the
# directory structure where the file instances are written
fileurl = None
size = fileinstance.size
modified = fileinstance.updated
if fileinstance.uri:
# Use already existing URL.
fileurl = fileinstance.uri
else:
assert default_location
# Generate a new URL.
fileurl = make_path(
default_location,
str(fileinstance.id),
'data',
current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'],
current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'],
)
return filestorage_class(
fileurl, size=size, modified=modified, clean_dir=clean_dir) | [
"def",
"pyfs_storage_factory",
"(",
"fileinstance",
"=",
"None",
",",
"default_location",
"=",
"None",
",",
"default_storage_class",
"=",
"None",
",",
"filestorage_class",
"=",
"PyFSFileStorage",
",",
"fileurl",
"=",
"None",
",",
"size",
"=",
"None",
",",
"modif... | Get factory function for creating a PyFS file storage instance. | [
"Get",
"factory",
"function",
"for",
"creating",
"a",
"PyFS",
"file",
"storage",
"instance",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/storage/pyfs.py#L131-L162 | train | 41,897 |
inveniosoftware/invenio-files-rest | invenio_files_rest/storage/pyfs.py | PyFSFileStorage._get_fs | def _get_fs(self, create_dir=True):
"""Return tuple with filesystem and filename."""
filedir = dirname(self.fileurl)
filename = basename(self.fileurl)
return (
opener.opendir(filedir, writeable=True, create_dir=create_dir),
filename
) | python | def _get_fs(self, create_dir=True):
"""Return tuple with filesystem and filename."""
filedir = dirname(self.fileurl)
filename = basename(self.fileurl)
return (
opener.opendir(filedir, writeable=True, create_dir=create_dir),
filename
) | [
"def",
"_get_fs",
"(",
"self",
",",
"create_dir",
"=",
"True",
")",
":",
"filedir",
"=",
"dirname",
"(",
"self",
".",
"fileurl",
")",
"filename",
"=",
"basename",
"(",
"self",
".",
"fileurl",
")",
"return",
"(",
"opener",
".",
"opendir",
"(",
"filedir"... | Return tuple with filesystem and filename. | [
"Return",
"tuple",
"with",
"filesystem",
"and",
"filename",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/storage/pyfs.py#L42-L50 | train | 41,898 |
inveniosoftware/invenio-files-rest | invenio_files_rest/storage/pyfs.py | PyFSFileStorage.open | def open(self, mode='rb'):
"""Open file.
The caller is responsible for closing the file.
"""
fs, path = self._get_fs()
return fs.open(path, mode=mode) | python | def open(self, mode='rb'):
"""Open file.
The caller is responsible for closing the file.
"""
fs, path = self._get_fs()
return fs.open(path, mode=mode) | [
"def",
"open",
"(",
"self",
",",
"mode",
"=",
"'rb'",
")",
":",
"fs",
",",
"path",
"=",
"self",
".",
"_get_fs",
"(",
")",
"return",
"fs",
".",
"open",
"(",
"path",
",",
"mode",
"=",
"mode",
")"
] | Open file.
The caller is responsible for closing the file. | [
"Open",
"file",
"."
] | 59a950da61cc8d5882a03c6fde6db2e2ed10befd | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/storage/pyfs.py#L52-L58 | train | 41,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.