code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def saveTemplate(self):
"""Get a json structure of the current inputs,
to be able to load later"""
savedict = {}
for comp_editor in self.widgets():
stim = comp_editor.component()
comp_editor.saveToObject()
savedict[stim.name] = stim.stateDict()
savedict['delay'] = self.delaySpnbx.value()
return savedict | def function[saveTemplate, parameter[self]]:
constant[Get a json structure of the current inputs,
to be able to load later]
variable[savedict] assign[=] dictionary[[], []]
for taget[name[comp_editor]] in starred[call[name[self].widgets, parameter[]]] begin[:]
variable[stim] assign[=] call[name[comp_editor].component, parameter[]]
call[name[comp_editor].saveToObject, parameter[]]
call[name[savedict]][name[stim].name] assign[=] call[name[stim].stateDict, parameter[]]
call[name[savedict]][constant[delay]] assign[=] call[name[self].delaySpnbx.value, parameter[]]
return[name[savedict]] | keyword[def] identifier[saveTemplate] ( identifier[self] ):
literal[string]
identifier[savedict] ={}
keyword[for] identifier[comp_editor] keyword[in] identifier[self] . identifier[widgets] ():
identifier[stim] = identifier[comp_editor] . identifier[component] ()
identifier[comp_editor] . identifier[saveToObject] ()
identifier[savedict] [ identifier[stim] . identifier[name] ]= identifier[stim] . identifier[stateDict] ()
identifier[savedict] [ literal[string] ]= identifier[self] . identifier[delaySpnbx] . identifier[value] ()
keyword[return] identifier[savedict] | def saveTemplate(self):
"""Get a json structure of the current inputs,
to be able to load later"""
savedict = {}
for comp_editor in self.widgets():
stim = comp_editor.component()
comp_editor.saveToObject()
savedict[stim.name] = stim.stateDict() # depends on [control=['for'], data=['comp_editor']]
savedict['delay'] = self.delaySpnbx.value()
return savedict |
def balanced_binning_reference(y, ax=None, target='Frequency', bins=4, **kwargs):
"""
BalancedBinningReference generates a histogram with vertical lines
showing the recommended value point to bin your data so they can be evenly
distributed in each bin.
Parameters
----------
y : an array of one dimension or a pandas Series
ax : matplotlib Axes, default: None
This is inherited from FeatureVisualizer and is defined within
``BalancedBinningReference``.
target : string, default: "Frequency"
The name of the ``y`` variable
bins : number of bins to generate the histogram, default: 4
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
"""
# Initialize the visualizer
visualizer = BalancedBinningReference(ax=ax, bins=bins, target=target, **kwargs)
# Fit and poof the visualizer
visualizer.fit(y)
visualizer.poof() | def function[balanced_binning_reference, parameter[y, ax, target, bins]]:
constant[
BalancedBinningReference generates a histogram with vertical lines
showing the recommended value point to bin your data so they can be evenly
distributed in each bin.
Parameters
----------
y : an array of one dimension or a pandas Series
ax : matplotlib Axes, default: None
This is inherited from FeatureVisualizer and is defined within
``BalancedBinningReference``.
target : string, default: "Frequency"
The name of the ``y`` variable
bins : number of bins to generate the histogram, default: 4
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
]
variable[visualizer] assign[=] call[name[BalancedBinningReference], parameter[]]
call[name[visualizer].fit, parameter[name[y]]]
call[name[visualizer].poof, parameter[]] | keyword[def] identifier[balanced_binning_reference] ( identifier[y] , identifier[ax] = keyword[None] , identifier[target] = literal[string] , identifier[bins] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[visualizer] = identifier[BalancedBinningReference] ( identifier[ax] = identifier[ax] , identifier[bins] = identifier[bins] , identifier[target] = identifier[target] ,** identifier[kwargs] )
identifier[visualizer] . identifier[fit] ( identifier[y] )
identifier[visualizer] . identifier[poof] () | def balanced_binning_reference(y, ax=None, target='Frequency', bins=4, **kwargs):
"""
BalancedBinningReference generates a histogram with vertical lines
showing the recommended value point to bin your data so they can be evenly
distributed in each bin.
Parameters
----------
y : an array of one dimension or a pandas Series
ax : matplotlib Axes, default: None
This is inherited from FeatureVisualizer and is defined within
``BalancedBinningReference``.
target : string, default: "Frequency"
The name of the ``y`` variable
bins : number of bins to generate the histogram, default: 4
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
"""
# Initialize the visualizer
visualizer = BalancedBinningReference(ax=ax, bins=bins, target=target, **kwargs)
# Fit and poof the visualizer
visualizer.fit(y)
visualizer.poof() |
def xml(self, fn=None, src='word/document.xml', XMLClass=XML, **params):
"return the src with the given transformation applied, if any."
if src in self.xml_cache: return self.xml_cache[src]
if src not in self.zipfile.namelist(): return
x = XMLClass(
fn=fn or (self.fn and self.fn.replace('.docx', '.xml')) or None,
root=self.zipfile.read(src))
self.xml_cache[src] = x
return x | def function[xml, parameter[self, fn, src, XMLClass]]:
constant[return the src with the given transformation applied, if any.]
if compare[name[src] in name[self].xml_cache] begin[:]
return[call[name[self].xml_cache][name[src]]]
if compare[name[src] <ast.NotIn object at 0x7da2590d7190> call[name[self].zipfile.namelist, parameter[]]] begin[:]
return[None]
variable[x] assign[=] call[name[XMLClass], parameter[]]
call[name[self].xml_cache][name[src]] assign[=] name[x]
return[name[x]] | keyword[def] identifier[xml] ( identifier[self] , identifier[fn] = keyword[None] , identifier[src] = literal[string] , identifier[XMLClass] = identifier[XML] ,** identifier[params] ):
literal[string]
keyword[if] identifier[src] keyword[in] identifier[self] . identifier[xml_cache] : keyword[return] identifier[self] . identifier[xml_cache] [ identifier[src] ]
keyword[if] identifier[src] keyword[not] keyword[in] identifier[self] . identifier[zipfile] . identifier[namelist] (): keyword[return]
identifier[x] = identifier[XMLClass] (
identifier[fn] = identifier[fn] keyword[or] ( identifier[self] . identifier[fn] keyword[and] identifier[self] . identifier[fn] . identifier[replace] ( literal[string] , literal[string] )) keyword[or] keyword[None] ,
identifier[root] = identifier[self] . identifier[zipfile] . identifier[read] ( identifier[src] ))
identifier[self] . identifier[xml_cache] [ identifier[src] ]= identifier[x]
keyword[return] identifier[x] | def xml(self, fn=None, src='word/document.xml', XMLClass=XML, **params):
"""return the src with the given transformation applied, if any."""
if src in self.xml_cache:
return self.xml_cache[src] # depends on [control=['if'], data=['src']]
if src not in self.zipfile.namelist():
return # depends on [control=['if'], data=[]]
x = XMLClass(fn=fn or (self.fn and self.fn.replace('.docx', '.xml')) or None, root=self.zipfile.read(src))
self.xml_cache[src] = x
return x |
def get_string_list(self, key):
"""Get a list of strings."""
strings = []
size = self.beginReadArray(key)
for i in range(size):
self.setArrayIndex(i)
entry = str(self._value("entry"))
strings.append(entry)
self.endArray()
return strings | def function[get_string_list, parameter[self, key]]:
constant[Get a list of strings.]
variable[strings] assign[=] list[[]]
variable[size] assign[=] call[name[self].beginReadArray, parameter[name[key]]]
for taget[name[i]] in starred[call[name[range], parameter[name[size]]]] begin[:]
call[name[self].setArrayIndex, parameter[name[i]]]
variable[entry] assign[=] call[name[str], parameter[call[name[self]._value, parameter[constant[entry]]]]]
call[name[strings].append, parameter[name[entry]]]
call[name[self].endArray, parameter[]]
return[name[strings]] | keyword[def] identifier[get_string_list] ( identifier[self] , identifier[key] ):
literal[string]
identifier[strings] =[]
identifier[size] = identifier[self] . identifier[beginReadArray] ( identifier[key] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[size] ):
identifier[self] . identifier[setArrayIndex] ( identifier[i] )
identifier[entry] = identifier[str] ( identifier[self] . identifier[_value] ( literal[string] ))
identifier[strings] . identifier[append] ( identifier[entry] )
identifier[self] . identifier[endArray] ()
keyword[return] identifier[strings] | def get_string_list(self, key):
"""Get a list of strings."""
strings = []
size = self.beginReadArray(key)
for i in range(size):
self.setArrayIndex(i)
entry = str(self._value('entry'))
strings.append(entry) # depends on [control=['for'], data=['i']]
self.endArray()
return strings |
def _read_opt_pad(self, code, *, desc):
"""Read HOPOPT padding options.
Structure of HOPOPT padding options [RFC 8200]:
* Pad1 Option:
+-+-+-+-+-+-+-+-+
| 0 |
+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
* PadN Option:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
| 1 | Opt Data Len | Option Data
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
1 8 hopopt.opt.length Length of Option Data
2 16 hopopt.pad.padding Padding
"""
_type = self._read_opt_type(code)
if code == 0:
opt = dict(
desc=desc,
type=_type,
length=1,
)
elif code == 1:
_size = self._read_unpack(1)
_padn = self._read_fileng(_size)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
padding=_padn,
)
else:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
return opt | def function[_read_opt_pad, parameter[self, code]]:
constant[Read HOPOPT padding options.
Structure of HOPOPT padding options [RFC 8200]:
* Pad1 Option:
+-+-+-+-+-+-+-+-+
| 0 |
+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
* PadN Option:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
| 1 | Opt Data Len | Option Data
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
1 8 hopopt.opt.length Length of Option Data
2 16 hopopt.pad.padding Padding
]
variable[_type] assign[=] call[name[self]._read_opt_type, parameter[name[code]]]
if compare[name[code] equal[==] constant[0]] begin[:]
variable[opt] assign[=] call[name[dict], parameter[]]
return[name[opt]] | keyword[def] identifier[_read_opt_pad] ( identifier[self] , identifier[code] ,*, identifier[desc] ):
literal[string]
identifier[_type] = identifier[self] . identifier[_read_opt_type] ( identifier[code] )
keyword[if] identifier[code] == literal[int] :
identifier[opt] = identifier[dict] (
identifier[desc] = identifier[desc] ,
identifier[type] = identifier[_type] ,
identifier[length] = literal[int] ,
)
keyword[elif] identifier[code] == literal[int] :
identifier[_size] = identifier[self] . identifier[_read_unpack] ( literal[int] )
identifier[_padn] = identifier[self] . identifier[_read_fileng] ( identifier[_size] )
identifier[opt] = identifier[dict] (
identifier[desc] = identifier[desc] ,
identifier[type] = identifier[_type] ,
identifier[length] = identifier[_size] + literal[int] ,
identifier[padding] = identifier[_padn] ,
)
keyword[else] :
keyword[raise] identifier[ProtocolError] ( literal[string] )
keyword[return] identifier[opt] | def _read_opt_pad(self, code, *, desc):
"""Read HOPOPT padding options.
Structure of HOPOPT padding options [RFC 8200]:
* Pad1 Option:
+-+-+-+-+-+-+-+-+
| 0 |
+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
* PadN Option:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
| 1 | Opt Data Len | Option Data
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
1 8 hopopt.opt.length Length of Option Data
2 16 hopopt.pad.padding Padding
"""
_type = self._read_opt_type(code)
if code == 0:
opt = dict(desc=desc, type=_type, length=1) # depends on [control=['if'], data=[]]
elif code == 1:
_size = self._read_unpack(1)
_padn = self._read_fileng(_size)
opt = dict(desc=desc, type=_type, length=_size + 2, padding=_padn) # depends on [control=['if'], data=[]]
else:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
return opt |
def pprint_sequence(sequence, annotations=None, block_length=10,
blocks_per_line=6, format=PlaintextFormat):
"""
Pretty-print sequence for use with a monospace font.
>>> sequence = 'MIMANQPLWLDSEVEMNHYQQSHIKSKSPYFPEDKHICWIKIFKAFGT' * 4
>>> print pprint_sequence(sequence, format=PlaintextFormat)
1 MIMANQPLWL DSEVEMNHYQ QSHIKSKSPY FPEDKHICWI KIFKAFGTMI MANQPLWLDS
61 EVEMNHYQQS HIKSKSPYFP EDKHICWIKI FKAFGTMIMA NQPLWLDSEV EMNHYQQSHI
121 KSKSPYFPED KHICWIKIFK AFGTMIMANQ PLWLDSEVEM NHYQQSHIKS KSPYFPEDKH
181 ICWIKIFKAF GT
:arg sequence: Sequence to pretty-print.
:type sequence: str or any sliceable yielding slices representable as
strings.
:arg annotations: For each annotation level, a list of (`start`, `stop`)
pairs defining an annotated region.
:type annotations: list
:arg block_length: Length of space-separated blocks.
:type block_length: int
:arg blocks_per_line: Number of blocks per line.
:type blocks_per_line: int
:arg format: Output format to use for pretty-printing. Some formats are
pre-defined as :data:`HtmlFormat`, :data:`AnsiFormat`, and
:data:`PlaintextFormat`.
:type format: :class:`Format`
:return: Pretty-printed version of `sequence`.
:rtype: str
All regions (`start`, `stop`) are defined as in slicing notation, so
zero-based and `stop` is not included.
The `annotations` argument is a list of annotations. An annotation is a
list of regions as (`start`, `stop`) tuples. The level of each annotation
is its index in `annotations`.
Annotation regions can overlap (overlap within one level is ignored) and
do not need to be sorted.
The number of annotation levels supported depends on `format`.
:data:`HtmlFormat` supports 10 levels, :data:`AnsiFormat` supports 3
levels and annotations are ignored completely with
:data:`PlaintextFormat`.
"""
annotations = annotations or []
partitioning = partition_range(len(sequence), annotations)
# The maximum length for positions is the 10_log of the length of the
# sequence.
margin = int(math.floor(math.log(max(len(sequence), 1), 10))
+ 1) + len(format.margin[0])
result = (format.margin[0] + '1').rjust(margin) + format.margin[1] + ' '
for p in range(0, len(sequence), block_length):
# Partitioning of the block starting at position p.
block = [(max(start, p), min(stop, p + block_length), levels)
for start, stop, levels in partitioning
if start < p + block_length and stop > p]
result += ' '
for start, stop, levels in block:
delimiters = [(left, right) for level, (left, right)
in enumerate(format.annotations) if level in levels]
result += (''.join(left for left, right in reversed(delimiters)) +
str(sequence[start:stop]) +
''.join(right for left, right in delimiters))
if (not (p + block_length) % (block_length * blocks_per_line) and
p + block_length < len(sequence)):
result += ('\n' + (format.margin[0] +
str(p + block_length + 1)).rjust(margin) +
format.margin[1] + ' ')
return result | def function[pprint_sequence, parameter[sequence, annotations, block_length, blocks_per_line, format]]:
constant[
Pretty-print sequence for use with a monospace font.
>>> sequence = 'MIMANQPLWLDSEVEMNHYQQSHIKSKSPYFPEDKHICWIKIFKAFGT' * 4
>>> print pprint_sequence(sequence, format=PlaintextFormat)
1 MIMANQPLWL DSEVEMNHYQ QSHIKSKSPY FPEDKHICWI KIFKAFGTMI MANQPLWLDS
61 EVEMNHYQQS HIKSKSPYFP EDKHICWIKI FKAFGTMIMA NQPLWLDSEV EMNHYQQSHI
121 KSKSPYFPED KHICWIKIFK AFGTMIMANQ PLWLDSEVEM NHYQQSHIKS KSPYFPEDKH
181 ICWIKIFKAF GT
:arg sequence: Sequence to pretty-print.
:type sequence: str or any sliceable yielding slices representable as
strings.
:arg annotations: For each annotation level, a list of (`start`, `stop`)
pairs defining an annotated region.
:type annotations: list
:arg block_length: Length of space-separated blocks.
:type block_length: int
:arg blocks_per_line: Number of blocks per line.
:type blocks_per_line: int
:arg format: Output format to use for pretty-printing. Some formats are
pre-defined as :data:`HtmlFormat`, :data:`AnsiFormat`, and
:data:`PlaintextFormat`.
:type format: :class:`Format`
:return: Pretty-printed version of `sequence`.
:rtype: str
All regions (`start`, `stop`) are defined as in slicing notation, so
zero-based and `stop` is not included.
The `annotations` argument is a list of annotations. An annotation is a
list of regions as (`start`, `stop`) tuples. The level of each annotation
is its index in `annotations`.
Annotation regions can overlap (overlap within one level is ignored) and
do not need to be sorted.
The number of annotation levels supported depends on `format`.
:data:`HtmlFormat` supports 10 levels, :data:`AnsiFormat` supports 3
levels and annotations are ignored completely with
:data:`PlaintextFormat`.
]
variable[annotations] assign[=] <ast.BoolOp object at 0x7da18bcc9180>
variable[partitioning] assign[=] call[name[partition_range], parameter[call[name[len], parameter[name[sequence]]], name[annotations]]]
variable[margin] assign[=] binary_operation[call[name[int], parameter[binary_operation[call[name[math].floor, parameter[call[name[math].log, parameter[call[name[max], parameter[call[name[len], parameter[name[sequence]]], constant[1]]], constant[10]]]]] + constant[1]]]] + call[name[len], parameter[call[name[format].margin][constant[0]]]]]
variable[result] assign[=] binary_operation[binary_operation[call[binary_operation[call[name[format].margin][constant[0]] + constant[1]].rjust, parameter[name[margin]]] + call[name[format].margin][constant[1]]] + constant[ ]]
for taget[name[p]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[sequence]]], name[block_length]]]] begin[:]
variable[block] assign[=] <ast.ListComp object at 0x7da18bccabc0>
<ast.AugAssign object at 0x7da18bccace0>
for taget[tuple[[<ast.Name object at 0x7da18bccac80>, <ast.Name object at 0x7da18bccbeb0>, <ast.Name object at 0x7da18bccba90>]]] in starred[name[block]] begin[:]
variable[delimiters] assign[=] <ast.ListComp object at 0x7da18bcc8a60>
<ast.AugAssign object at 0x7da18bcc9540>
if <ast.BoolOp object at 0x7da18bcc83d0> begin[:]
<ast.AugAssign object at 0x7da18bcca080>
return[name[result]] | keyword[def] identifier[pprint_sequence] ( identifier[sequence] , identifier[annotations] = keyword[None] , identifier[block_length] = literal[int] ,
identifier[blocks_per_line] = literal[int] , identifier[format] = identifier[PlaintextFormat] ):
literal[string]
identifier[annotations] = identifier[annotations] keyword[or] []
identifier[partitioning] = identifier[partition_range] ( identifier[len] ( identifier[sequence] ), identifier[annotations] )
identifier[margin] = identifier[int] ( identifier[math] . identifier[floor] ( identifier[math] . identifier[log] ( identifier[max] ( identifier[len] ( identifier[sequence] ), literal[int] ), literal[int] ))
+ literal[int] )+ identifier[len] ( identifier[format] . identifier[margin] [ literal[int] ])
identifier[result] =( identifier[format] . identifier[margin] [ literal[int] ]+ literal[string] ). identifier[rjust] ( identifier[margin] )+ identifier[format] . identifier[margin] [ literal[int] ]+ literal[string]
keyword[for] identifier[p] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[sequence] ), identifier[block_length] ):
identifier[block] =[( identifier[max] ( identifier[start] , identifier[p] ), identifier[min] ( identifier[stop] , identifier[p] + identifier[block_length] ), identifier[levels] )
keyword[for] identifier[start] , identifier[stop] , identifier[levels] keyword[in] identifier[partitioning]
keyword[if] identifier[start] < identifier[p] + identifier[block_length] keyword[and] identifier[stop] > identifier[p] ]
identifier[result] += literal[string]
keyword[for] identifier[start] , identifier[stop] , identifier[levels] keyword[in] identifier[block] :
identifier[delimiters] =[( identifier[left] , identifier[right] ) keyword[for] identifier[level] ,( identifier[left] , identifier[right] )
keyword[in] identifier[enumerate] ( identifier[format] . identifier[annotations] ) keyword[if] identifier[level] keyword[in] identifier[levels] ]
identifier[result] +=( literal[string] . identifier[join] ( identifier[left] keyword[for] identifier[left] , identifier[right] keyword[in] identifier[reversed] ( identifier[delimiters] ))+
identifier[str] ( identifier[sequence] [ identifier[start] : identifier[stop] ])+
literal[string] . identifier[join] ( identifier[right] keyword[for] identifier[left] , identifier[right] keyword[in] identifier[delimiters] ))
keyword[if] ( keyword[not] ( identifier[p] + identifier[block_length] )%( identifier[block_length] * identifier[blocks_per_line] ) keyword[and]
identifier[p] + identifier[block_length] < identifier[len] ( identifier[sequence] )):
identifier[result] +=( literal[string] +( identifier[format] . identifier[margin] [ literal[int] ]+
identifier[str] ( identifier[p] + identifier[block_length] + literal[int] )). identifier[rjust] ( identifier[margin] )+
identifier[format] . identifier[margin] [ literal[int] ]+ literal[string] )
keyword[return] identifier[result] | def pprint_sequence(sequence, annotations=None, block_length=10, blocks_per_line=6, format=PlaintextFormat):
"""
Pretty-print sequence for use with a monospace font.
>>> sequence = 'MIMANQPLWLDSEVEMNHYQQSHIKSKSPYFPEDKHICWIKIFKAFGT' * 4
>>> print pprint_sequence(sequence, format=PlaintextFormat)
1 MIMANQPLWL DSEVEMNHYQ QSHIKSKSPY FPEDKHICWI KIFKAFGTMI MANQPLWLDS
61 EVEMNHYQQS HIKSKSPYFP EDKHICWIKI FKAFGTMIMA NQPLWLDSEV EMNHYQQSHI
121 KSKSPYFPED KHICWIKIFK AFGTMIMANQ PLWLDSEVEM NHYQQSHIKS KSPYFPEDKH
181 ICWIKIFKAF GT
:arg sequence: Sequence to pretty-print.
:type sequence: str or any sliceable yielding slices representable as
strings.
:arg annotations: For each annotation level, a list of (`start`, `stop`)
pairs defining an annotated region.
:type annotations: list
:arg block_length: Length of space-separated blocks.
:type block_length: int
:arg blocks_per_line: Number of blocks per line.
:type blocks_per_line: int
:arg format: Output format to use for pretty-printing. Some formats are
pre-defined as :data:`HtmlFormat`, :data:`AnsiFormat`, and
:data:`PlaintextFormat`.
:type format: :class:`Format`
:return: Pretty-printed version of `sequence`.
:rtype: str
All regions (`start`, `stop`) are defined as in slicing notation, so
zero-based and `stop` is not included.
The `annotations` argument is a list of annotations. An annotation is a
list of regions as (`start`, `stop`) tuples. The level of each annotation
is its index in `annotations`.
Annotation regions can overlap (overlap within one level is ignored) and
do not need to be sorted.
The number of annotation levels supported depends on `format`.
:data:`HtmlFormat` supports 10 levels, :data:`AnsiFormat` supports 3
levels and annotations are ignored completely with
:data:`PlaintextFormat`.
"""
annotations = annotations or []
partitioning = partition_range(len(sequence), annotations)
# The maximum length for positions is the 10_log of the length of the
# sequence.
margin = int(math.floor(math.log(max(len(sequence), 1), 10)) + 1) + len(format.margin[0])
result = (format.margin[0] + '1').rjust(margin) + format.margin[1] + ' '
for p in range(0, len(sequence), block_length):
# Partitioning of the block starting at position p.
block = [(max(start, p), min(stop, p + block_length), levels) for (start, stop, levels) in partitioning if start < p + block_length and stop > p]
result += ' '
for (start, stop, levels) in block:
delimiters = [(left, right) for (level, (left, right)) in enumerate(format.annotations) if level in levels]
result += ''.join((left for (left, right) in reversed(delimiters))) + str(sequence[start:stop]) + ''.join((right for (left, right) in delimiters)) # depends on [control=['for'], data=[]]
if not (p + block_length) % (block_length * blocks_per_line) and p + block_length < len(sequence):
result += '\n' + (format.margin[0] + str(p + block_length + 1)).rjust(margin) + format.margin[1] + ' ' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
return result |
def formatException(self, record):
"""
Format and return the specified exception information as a string.
:type record logging.LogRecord
:rtype: dict
"""
if record.exc_info is None:
return {}
(exc_type, exc_message, trace) = record.exc_info
return {
'e': {
'class': str(type(exc_type).__name__), # ZeroDivisionError
'message': str(exc_message), # integer division or modulo by zero
'trace': list(traceback.format_tb(trace)),
}
} | def function[formatException, parameter[self, record]]:
constant[
Format and return the specified exception information as a string.
:type record logging.LogRecord
:rtype: dict
]
if compare[name[record].exc_info is constant[None]] begin[:]
return[dictionary[[], []]]
<ast.Tuple object at 0x7da1b28fca90> assign[=] name[record].exc_info
return[dictionary[[<ast.Constant object at 0x7da1b28fee60>], [<ast.Dict object at 0x7da1b28ffe20>]]] | keyword[def] identifier[formatException] ( identifier[self] , identifier[record] ):
literal[string]
keyword[if] identifier[record] . identifier[exc_info] keyword[is] keyword[None] :
keyword[return] {}
( identifier[exc_type] , identifier[exc_message] , identifier[trace] )= identifier[record] . identifier[exc_info]
keyword[return] {
literal[string] :{
literal[string] : identifier[str] ( identifier[type] ( identifier[exc_type] ). identifier[__name__] ),
literal[string] : identifier[str] ( identifier[exc_message] ),
literal[string] : identifier[list] ( identifier[traceback] . identifier[format_tb] ( identifier[trace] )),
}
} | def formatException(self, record):
"""
Format and return the specified exception information as a string.
:type record logging.LogRecord
:rtype: dict
"""
if record.exc_info is None:
return {} # depends on [control=['if'], data=[]]
(exc_type, exc_message, trace) = record.exc_info # ZeroDivisionError
# integer division or modulo by zero
return {'e': {'class': str(type(exc_type).__name__), 'message': str(exc_message), 'trace': list(traceback.format_tb(trace))}} |
def ints(self, qlist):
"""
Converts a sequence of pegasus_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The pegasus_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
"""
m, m1 = self.args
return (((m * u + w) * 12 + k) * m1 + z for (u, w, k, z) in qlist) | def function[ints, parameter[self, qlist]]:
constant[
Converts a sequence of pegasus_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The pegasus_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
]
<ast.Tuple object at 0x7da1b08fa5c0> assign[=] name[self].args
return[<ast.GeneratorExp object at 0x7da1b08fba90>] | keyword[def] identifier[ints] ( identifier[self] , identifier[qlist] ):
literal[string]
identifier[m] , identifier[m1] = identifier[self] . identifier[args]
keyword[return] ((( identifier[m] * identifier[u] + identifier[w] )* literal[int] + identifier[k] )* identifier[m1] + identifier[z] keyword[for] ( identifier[u] , identifier[w] , identifier[k] , identifier[z] ) keyword[in] identifier[qlist] ) | def ints(self, qlist):
"""
Converts a sequence of pegasus_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The pegasus_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
"""
(m, m1) = self.args
return (((m * u + w) * 12 + k) * m1 + z for (u, w, k, z) in qlist) |
def get_graph(graph, conn, **kwargs):
""" Returns all the triples for a specific are graph
args:
graph: the URI of the graph to retreive
conn: the rdfframework triplestore connection
"""
sparql = render_without_request("sparqlGraphDataTemplate.rq",
prefix=NSM.prefix(),
graph=graph)
return conn.query(sparql, **kwargs) | def function[get_graph, parameter[graph, conn]]:
constant[ Returns all the triples for a specific are graph
args:
graph: the URI of the graph to retreive
conn: the rdfframework triplestore connection
]
variable[sparql] assign[=] call[name[render_without_request], parameter[constant[sparqlGraphDataTemplate.rq]]]
return[call[name[conn].query, parameter[name[sparql]]]] | keyword[def] identifier[get_graph] ( identifier[graph] , identifier[conn] ,** identifier[kwargs] ):
literal[string]
identifier[sparql] = identifier[render_without_request] ( literal[string] ,
identifier[prefix] = identifier[NSM] . identifier[prefix] (),
identifier[graph] = identifier[graph] )
keyword[return] identifier[conn] . identifier[query] ( identifier[sparql] ,** identifier[kwargs] ) | def get_graph(graph, conn, **kwargs):
""" Returns all the triples for a specific are graph
args:
graph: the URI of the graph to retreive
conn: the rdfframework triplestore connection
"""
sparql = render_without_request('sparqlGraphDataTemplate.rq', prefix=NSM.prefix(), graph=graph)
return conn.query(sparql, **kwargs) |
def _sort_cards(self, cards: Generator) -> list:
'''sort cards by blocknum and blockseq'''
return sorted([card.__dict__ for card in cards],
key=itemgetter('blocknum', 'blockseq', 'cardseq')) | def function[_sort_cards, parameter[self, cards]]:
constant[sort cards by blocknum and blockseq]
return[call[name[sorted], parameter[<ast.ListComp object at 0x7da1b2380eb0>]]] | keyword[def] identifier[_sort_cards] ( identifier[self] , identifier[cards] : identifier[Generator] )-> identifier[list] :
literal[string]
keyword[return] identifier[sorted] ([ identifier[card] . identifier[__dict__] keyword[for] identifier[card] keyword[in] identifier[cards] ],
identifier[key] = identifier[itemgetter] ( literal[string] , literal[string] , literal[string] )) | def _sort_cards(self, cards: Generator) -> list:
"""sort cards by blocknum and blockseq"""
return sorted([card.__dict__ for card in cards], key=itemgetter('blocknum', 'blockseq', 'cardseq')) |
def set_profile_picture(self, filename):
"""
Sets the profile picture
:param filename: The filename on disk of the image to set
"""
log.info("[+] Setting the profile picture to file '{}'".format(filename))
profile_pictures.set_profile_picture(filename, self.kik_node + '@talk.kik.com', self.username, self.password) | def function[set_profile_picture, parameter[self, filename]]:
constant[
Sets the profile picture
:param filename: The filename on disk of the image to set
]
call[name[log].info, parameter[call[constant[[+] Setting the profile picture to file '{}'].format, parameter[name[filename]]]]]
call[name[profile_pictures].set_profile_picture, parameter[name[filename], binary_operation[name[self].kik_node + constant[@talk.kik.com]], name[self].username, name[self].password]] | keyword[def] identifier[set_profile_picture] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[filename] ))
identifier[profile_pictures] . identifier[set_profile_picture] ( identifier[filename] , identifier[self] . identifier[kik_node] + literal[string] , identifier[self] . identifier[username] , identifier[self] . identifier[password] ) | def set_profile_picture(self, filename):
"""
Sets the profile picture
:param filename: The filename on disk of the image to set
"""
log.info("[+] Setting the profile picture to file '{}'".format(filename))
profile_pictures.set_profile_picture(filename, self.kik_node + '@talk.kik.com', self.username, self.password) |
def rescan_file(self, this_hash, timeout=None):
""" Rescan a previously submitted filed or schedule an scan to be performed in the future.
:param this_hash: a md5/sha1/sha256 hash. You can also specify a CSV list made up of a combination of any of
the three allowed hashes (up to 25 items), this allows you to perform a batch request with
one single call. Note that the file must already be present in our file store.
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: JSON response that contains scan_id and permalink.
"""
params = {'apikey': self.api_key, 'resource': this_hash}
try:
response = requests.post(self.base + 'file/rescan', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | def function[rescan_file, parameter[self, this_hash, timeout]]:
constant[ Rescan a previously submitted filed or schedule an scan to be performed in the future.
:param this_hash: a md5/sha1/sha256 hash. You can also specify a CSV list made up of a combination of any of
the three allowed hashes (up to 25 items), this allows you to perform a batch request with
one single call. Note that the file must already be present in our file store.
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: JSON response that contains scan_id and permalink.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ef2f80>, <ast.Constant object at 0x7da1b0ef2830>], [<ast.Attribute object at 0x7da1b0ef2590>, <ast.Name object at 0x7da1b0ef19f0>]]
<ast.Try object at 0x7da1b0ef1540>
return[call[name[_return_response_and_status_code], parameter[name[response]]]] | keyword[def] identifier[rescan_file] ( identifier[self] , identifier[this_hash] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[self] . identifier[api_key] , literal[string] : identifier[this_hash] }
keyword[try] :
identifier[response] = identifier[requests] . identifier[post] ( identifier[self] . identifier[base] + literal[string] , identifier[params] = identifier[params] , identifier[proxies] = identifier[self] . identifier[proxies] , identifier[timeout] = identifier[timeout] )
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[e] :
keyword[return] identifier[dict] ( identifier[error] = identifier[str] ( identifier[e] ))
keyword[return] identifier[_return_response_and_status_code] ( identifier[response] ) | def rescan_file(self, this_hash, timeout=None):
""" Rescan a previously submitted filed or schedule an scan to be performed in the future.
:param this_hash: a md5/sha1/sha256 hash. You can also specify a CSV list made up of a combination of any of
the three allowed hashes (up to 25 items), this allows you to perform a batch request with
one single call. Note that the file must already be present in our file store.
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: JSON response that contains scan_id and permalink.
"""
params = {'apikey': self.api_key, 'resource': this_hash}
try:
response = requests.post(self.base + 'file/rescan', params=params, proxies=self.proxies, timeout=timeout) # depends on [control=['try'], data=[]]
except requests.RequestException as e:
return dict(error=str(e)) # depends on [control=['except'], data=['e']]
return _return_response_and_status_code(response) |
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group) | def function[_send, parameter[self, metric]]:
constant[
Send data to gmond.
]
variable[metric_name] assign[=] call[name[self].get_name_from_path, parameter[name[metric].path]]
variable[tmax] assign[=] constant[60]
variable[dmax] assign[=] constant[0]
variable[slope] assign[=] constant[both]
variable[metric_type] assign[=] constant[double]
variable[units] assign[=] constant[]
variable[group] assign[=] constant[]
call[name[self].gmetric.send, parameter[name[metric_name], name[metric].value, name[metric_type], name[units], name[slope], name[tmax], name[dmax], name[group]]] | keyword[def] identifier[_send] ( identifier[self] , identifier[metric] ):
literal[string]
identifier[metric_name] = identifier[self] . identifier[get_name_from_path] ( identifier[metric] . identifier[path] )
identifier[tmax] = literal[string]
identifier[dmax] = literal[string]
identifier[slope] = literal[string]
identifier[metric_type] = literal[string]
identifier[units] = literal[string]
identifier[group] = literal[string]
identifier[self] . identifier[gmetric] . identifier[send] ( identifier[metric_name] ,
identifier[metric] . identifier[value] ,
identifier[metric_type] ,
identifier[units] ,
identifier[slope] ,
identifier[tmax] ,
identifier[dmax] ,
identifier[group] ) | def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = '60'
dmax = '0'
slope = 'both'
# FIXME: Badness, shouldn't *assume* double type
metric_type = 'double'
units = ''
group = ''
self.gmetric.send(metric_name, metric.value, metric_type, units, slope, tmax, dmax, group) |
def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return
raise TypeError("Tried to lookup %s '%s' as a %s." %\
(self.__class__.__name__, self.get_internal_path(), klass.__name__)) | def function[must_be_same, parameter[self, klass]]:
constant[
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
]
if <ast.BoolOp object at 0x7da20c6c5930> begin[:]
return[None]
<ast.Raise object at 0x7da2041dab30> | keyword[def] identifier[must_be_same] ( identifier[self] , identifier[klass] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] , identifier[klass] ) keyword[or] identifier[klass] keyword[is] identifier[Entry] :
keyword[return]
keyword[raise] identifier[TypeError] ( literal[string] %( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[self] . identifier[get_internal_path] (), identifier[klass] . identifier[__name__] )) | def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return # depends on [control=['if'], data=[]]
raise TypeError("Tried to lookup %s '%s' as a %s." % (self.__class__.__name__, self.get_internal_path(), klass.__name__)) |
def children(self):
"""~TermList: the children of all the terms in the list.
"""
return TermList(unique_everseen(
y for x in self for y in x.children
)) | def function[children, parameter[self]]:
constant[~TermList: the children of all the terms in the list.
]
return[call[name[TermList], parameter[call[name[unique_everseen], parameter[<ast.GeneratorExp object at 0x7da1b1113520>]]]]] | keyword[def] identifier[children] ( identifier[self] ):
literal[string]
keyword[return] identifier[TermList] ( identifier[unique_everseen] (
identifier[y] keyword[for] identifier[x] keyword[in] identifier[self] keyword[for] identifier[y] keyword[in] identifier[x] . identifier[children]
)) | def children(self):
"""~TermList: the children of all the terms in the list.
"""
return TermList(unique_everseen((y for x in self for y in x.children))) |
async def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
return await self.execute_command('PUNSUBSCRIBE', *args) | <ast.AsyncFunctionDef object at 0x7da1b07ce260> | keyword[async] keyword[def] identifier[punsubscribe] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[args] :
identifier[args] = identifier[list_or_args] ( identifier[args] [ literal[int] ], identifier[args] [ literal[int] :])
keyword[return] keyword[await] identifier[self] . identifier[execute_command] ( literal[string] ,* identifier[args] ) | async def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:]) # depends on [control=['if'], data=[]]
return await self.execute_command('PUNSUBSCRIBE', *args) |
def do_EOF(self, args):
"""Exit on system end of file character"""
if _debug: ConsoleCmd._debug("do_EOF %r", args)
return self.do_exit(args) | def function[do_EOF, parameter[self, args]]:
constant[Exit on system end of file character]
if name[_debug] begin[:]
call[name[ConsoleCmd]._debug, parameter[constant[do_EOF %r], name[args]]]
return[call[name[self].do_exit, parameter[name[args]]]] | keyword[def] identifier[do_EOF] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[_debug] : identifier[ConsoleCmd] . identifier[_debug] ( literal[string] , identifier[args] )
keyword[return] identifier[self] . identifier[do_exit] ( identifier[args] ) | def do_EOF(self, args):
"""Exit on system end of file character"""
if _debug:
ConsoleCmd._debug('do_EOF %r', args) # depends on [control=['if'], data=[]]
return self.do_exit(args) |
def create_peptidequant_lookup(fns, pqdb, poolnames, pepseq_colnr,
ms1_qcolpattern=None, isobqcolpattern=None,
psmnrpattern=None, fdrcolpattern=None,
pepcolpattern=None):
"""Calls lower level function to create a peptide quant lookup"""
patterns = [ms1_qcolpattern, fdrcolpattern, pepcolpattern]
storefuns = [pqdb.store_precursor_quants, pqdb.store_fdr,
pqdb.store_pep]
create_pep_protein_quant_lookup(fns, pqdb, poolnames, pepseq_colnr,
patterns, storefuns,
isobqcolpattern, psmnrpattern) | def function[create_peptidequant_lookup, parameter[fns, pqdb, poolnames, pepseq_colnr, ms1_qcolpattern, isobqcolpattern, psmnrpattern, fdrcolpattern, pepcolpattern]]:
constant[Calls lower level function to create a peptide quant lookup]
variable[patterns] assign[=] list[[<ast.Name object at 0x7da1b24e2380>, <ast.Name object at 0x7da1b24e0d00>, <ast.Name object at 0x7da1b24e0d90>]]
variable[storefuns] assign[=] list[[<ast.Attribute object at 0x7da1b24e2740>, <ast.Attribute object at 0x7da1b24e28f0>, <ast.Attribute object at 0x7da1b24e22c0>]]
call[name[create_pep_protein_quant_lookup], parameter[name[fns], name[pqdb], name[poolnames], name[pepseq_colnr], name[patterns], name[storefuns], name[isobqcolpattern], name[psmnrpattern]]] | keyword[def] identifier[create_peptidequant_lookup] ( identifier[fns] , identifier[pqdb] , identifier[poolnames] , identifier[pepseq_colnr] ,
identifier[ms1_qcolpattern] = keyword[None] , identifier[isobqcolpattern] = keyword[None] ,
identifier[psmnrpattern] = keyword[None] , identifier[fdrcolpattern] = keyword[None] ,
identifier[pepcolpattern] = keyword[None] ):
literal[string]
identifier[patterns] =[ identifier[ms1_qcolpattern] , identifier[fdrcolpattern] , identifier[pepcolpattern] ]
identifier[storefuns] =[ identifier[pqdb] . identifier[store_precursor_quants] , identifier[pqdb] . identifier[store_fdr] ,
identifier[pqdb] . identifier[store_pep] ]
identifier[create_pep_protein_quant_lookup] ( identifier[fns] , identifier[pqdb] , identifier[poolnames] , identifier[pepseq_colnr] ,
identifier[patterns] , identifier[storefuns] ,
identifier[isobqcolpattern] , identifier[psmnrpattern] ) | def create_peptidequant_lookup(fns, pqdb, poolnames, pepseq_colnr, ms1_qcolpattern=None, isobqcolpattern=None, psmnrpattern=None, fdrcolpattern=None, pepcolpattern=None):
"""Calls lower level function to create a peptide quant lookup"""
patterns = [ms1_qcolpattern, fdrcolpattern, pepcolpattern]
storefuns = [pqdb.store_precursor_quants, pqdb.store_fdr, pqdb.store_pep]
create_pep_protein_quant_lookup(fns, pqdb, poolnames, pepseq_colnr, patterns, storefuns, isobqcolpattern, psmnrpattern) |
def unbuffered_input(self):
'''Context manager for setting the terminal to use unbuffered input.
Normally, your terminal will collect together a user's input
keystrokes and deliver them to you in one neat parcel when they hit
the return/enter key. In a real-time interactive application we instead
want to receive each keystroke as it happens.
This context manager achieves that by setting 'cbreak' mode on the
the output tty stream. cbreak is a mode inbetween 'cooked mode', where
all the user's input is preprocessed, and 'raw mode' where none of it
is. Basically, in cbreak mode input like :kbd:`Control-c` will still
interrupt (i.e. 'break') the process, hence the name. Wikipedia is your
friend on this one!
:meth:`Root.run` uses this context manager for you to make your
application work in the correct way.
'''
if self.is_a_tty:
orig_tty_attrs = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
try:
yield
finally:
termios.tcsetattr(
self.stream, termios.TCSADRAIN, orig_tty_attrs)
else:
yield | def function[unbuffered_input, parameter[self]]:
constant[Context manager for setting the terminal to use unbuffered input.
Normally, your terminal will collect together a user's input
keystrokes and deliver them to you in one neat parcel when they hit
the return/enter key. In a real-time interactive application we instead
want to receive each keystroke as it happens.
This context manager achieves that by setting 'cbreak' mode on the
the output tty stream. cbreak is a mode inbetween 'cooked mode', where
all the user's input is preprocessed, and 'raw mode' where none of it
is. Basically, in cbreak mode input like :kbd:`Control-c` will still
interrupt (i.e. 'break') the process, hence the name. Wikipedia is your
friend on this one!
:meth:`Root.run` uses this context manager for you to make your
application work in the correct way.
]
if name[self].is_a_tty begin[:]
variable[orig_tty_attrs] assign[=] call[name[termios].tcgetattr, parameter[name[self].stream]]
call[name[tty].setcbreak, parameter[name[self].stream]]
<ast.Try object at 0x7da18bc720e0> | keyword[def] identifier[unbuffered_input] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_a_tty] :
identifier[orig_tty_attrs] = identifier[termios] . identifier[tcgetattr] ( identifier[self] . identifier[stream] )
identifier[tty] . identifier[setcbreak] ( identifier[self] . identifier[stream] )
keyword[try] :
keyword[yield]
keyword[finally] :
identifier[termios] . identifier[tcsetattr] (
identifier[self] . identifier[stream] , identifier[termios] . identifier[TCSADRAIN] , identifier[orig_tty_attrs] )
keyword[else] :
keyword[yield] | def unbuffered_input(self):
"""Context manager for setting the terminal to use unbuffered input.
Normally, your terminal will collect together a user's input
keystrokes and deliver them to you in one neat parcel when they hit
the return/enter key. In a real-time interactive application we instead
want to receive each keystroke as it happens.
This context manager achieves that by setting 'cbreak' mode on the
the output tty stream. cbreak is a mode inbetween 'cooked mode', where
all the user's input is preprocessed, and 'raw mode' where none of it
is. Basically, in cbreak mode input like :kbd:`Control-c` will still
interrupt (i.e. 'break') the process, hence the name. Wikipedia is your
friend on this one!
:meth:`Root.run` uses this context manager for you to make your
application work in the correct way.
"""
if self.is_a_tty:
orig_tty_attrs = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
try:
yield # depends on [control=['try'], data=[]]
finally:
termios.tcsetattr(self.stream, termios.TCSADRAIN, orig_tty_attrs) # depends on [control=['if'], data=[]]
else:
yield |
def reload_class(self, verbose=True, reload_module=True):
"""
special class reloading function
This function is often injected as rrr of classes
"""
import utool as ut
verbose = verbose or VERBOSE_CLASS
classname = self.__class__.__name__
try:
modname = self.__class__.__module__
if verbose:
print('[class] reloading ' + classname + ' from ' + modname)
# --HACK--
if hasattr(self, '_on_reload'):
if verbose > 1:
print('[class] calling _on_reload for ' + classname)
self._on_reload()
elif verbose > 1:
print('[class] ' + classname + ' does not have an _on_reload function')
# Do for all inheriting classes
def find_base_clases(_class, find_base_clases=None):
class_list = []
for _baseclass in _class.__bases__:
parents = find_base_clases(_baseclass, find_base_clases)
class_list.extend(parents)
if _class is not object:
class_list.append(_class)
return class_list
head_class = self.__class__
# Determine if parents need reloading
class_list = find_base_clases(head_class, find_base_clases)
# HACK
ignore = {HashComparable2}
class_list = [_class for _class in class_list
if _class not in ignore]
for _class in class_list:
if verbose:
print('[class] reloading parent ' + _class.__name__ +
' from ' + _class.__module__)
if _class.__module__ == '__main__':
# Attempt to find the module that is the main module
# This may be very hacky and potentially break
main_module_ = sys.modules[_class.__module__]
main_modname = ut.get_modname_from_modpath(main_module_.__file__)
module_ = sys.modules[main_modname]
else:
module_ = sys.modules[_class.__module__]
if hasattr(module_, 'rrr'):
if reload_module:
module_.rrr(verbose=verbose)
else:
if reload_module:
import imp
if verbose:
print('[class] reloading ' + _class.__module__ + ' with imp')
try:
imp.reload(module_)
except (ImportError, AttributeError):
print('[class] fallback reloading ' + _class.__module__ +
' with imp')
# one last thing to try. probably used ut.import_module_from_fpath
# when importing this module
imp.load_source(module_.__name__, module_.__file__)
# Reset class attributes
_newclass = getattr(module_, _class.__name__)
reload_class_methods(self, _newclass, verbose=verbose)
# --HACK--
# TODO: handle injected definitions
if hasattr(self, '_initialize_self'):
if verbose > 1:
print('[class] calling _initialize_self for ' + classname)
self._initialize_self()
elif verbose > 1:
print('[class] ' + classname + ' does not have an _initialize_self function')
except Exception as ex:
ut.printex(ex, 'Error Reloading Class', keys=[
'modname', 'module', 'class_', 'class_list', 'self', ])
raise | def function[reload_class, parameter[self, verbose, reload_module]]:
constant[
special class reloading function
This function is often injected as rrr of classes
]
import module[utool] as alias[ut]
variable[verbose] assign[=] <ast.BoolOp object at 0x7da1b2334b80>
variable[classname] assign[=] name[self].__class__.__name__
<ast.Try object at 0x7da1b2334fa0> | keyword[def] identifier[reload_class] ( identifier[self] , identifier[verbose] = keyword[True] , identifier[reload_module] = keyword[True] ):
literal[string]
keyword[import] identifier[utool] keyword[as] identifier[ut]
identifier[verbose] = identifier[verbose] keyword[or] identifier[VERBOSE_CLASS]
identifier[classname] = identifier[self] . identifier[__class__] . identifier[__name__]
keyword[try] :
identifier[modname] = identifier[self] . identifier[__class__] . identifier[__module__]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] + identifier[classname] + literal[string] + identifier[modname] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] identifier[verbose] > literal[int] :
identifier[print] ( literal[string] + identifier[classname] )
identifier[self] . identifier[_on_reload] ()
keyword[elif] identifier[verbose] > literal[int] :
identifier[print] ( literal[string] + identifier[classname] + literal[string] )
keyword[def] identifier[find_base_clases] ( identifier[_class] , identifier[find_base_clases] = keyword[None] ):
identifier[class_list] =[]
keyword[for] identifier[_baseclass] keyword[in] identifier[_class] . identifier[__bases__] :
identifier[parents] = identifier[find_base_clases] ( identifier[_baseclass] , identifier[find_base_clases] )
identifier[class_list] . identifier[extend] ( identifier[parents] )
keyword[if] identifier[_class] keyword[is] keyword[not] identifier[object] :
identifier[class_list] . identifier[append] ( identifier[_class] )
keyword[return] identifier[class_list]
identifier[head_class] = identifier[self] . identifier[__class__]
identifier[class_list] = identifier[find_base_clases] ( identifier[head_class] , identifier[find_base_clases] )
identifier[ignore] ={ identifier[HashComparable2] }
identifier[class_list] =[ identifier[_class] keyword[for] identifier[_class] keyword[in] identifier[class_list]
keyword[if] identifier[_class] keyword[not] keyword[in] identifier[ignore] ]
keyword[for] identifier[_class] keyword[in] identifier[class_list] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] + identifier[_class] . identifier[__name__] +
literal[string] + identifier[_class] . identifier[__module__] )
keyword[if] identifier[_class] . identifier[__module__] == literal[string] :
identifier[main_module_] = identifier[sys] . identifier[modules] [ identifier[_class] . identifier[__module__] ]
identifier[main_modname] = identifier[ut] . identifier[get_modname_from_modpath] ( identifier[main_module_] . identifier[__file__] )
identifier[module_] = identifier[sys] . identifier[modules] [ identifier[main_modname] ]
keyword[else] :
identifier[module_] = identifier[sys] . identifier[modules] [ identifier[_class] . identifier[__module__] ]
keyword[if] identifier[hasattr] ( identifier[module_] , literal[string] ):
keyword[if] identifier[reload_module] :
identifier[module_] . identifier[rrr] ( identifier[verbose] = identifier[verbose] )
keyword[else] :
keyword[if] identifier[reload_module] :
keyword[import] identifier[imp]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] + identifier[_class] . identifier[__module__] + literal[string] )
keyword[try] :
identifier[imp] . identifier[reload] ( identifier[module_] )
keyword[except] ( identifier[ImportError] , identifier[AttributeError] ):
identifier[print] ( literal[string] + identifier[_class] . identifier[__module__] +
literal[string] )
identifier[imp] . identifier[load_source] ( identifier[module_] . identifier[__name__] , identifier[module_] . identifier[__file__] )
identifier[_newclass] = identifier[getattr] ( identifier[module_] , identifier[_class] . identifier[__name__] )
identifier[reload_class_methods] ( identifier[self] , identifier[_newclass] , identifier[verbose] = identifier[verbose] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] identifier[verbose] > literal[int] :
identifier[print] ( literal[string] + identifier[classname] )
identifier[self] . identifier[_initialize_self] ()
keyword[elif] identifier[verbose] > literal[int] :
identifier[print] ( literal[string] + identifier[classname] + literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[ut] . identifier[printex] ( identifier[ex] , literal[string] , identifier[keys] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,])
keyword[raise] | def reload_class(self, verbose=True, reload_module=True):
"""
special class reloading function
This function is often injected as rrr of classes
"""
import utool as ut
verbose = verbose or VERBOSE_CLASS
classname = self.__class__.__name__
try:
modname = self.__class__.__module__
if verbose:
print('[class] reloading ' + classname + ' from ' + modname) # depends on [control=['if'], data=[]]
# --HACK--
if hasattr(self, '_on_reload'):
if verbose > 1:
print('[class] calling _on_reload for ' + classname) # depends on [control=['if'], data=[]]
self._on_reload() # depends on [control=['if'], data=[]]
elif verbose > 1:
print('[class] ' + classname + ' does not have an _on_reload function') # depends on [control=['if'], data=[]]
# Do for all inheriting classes
def find_base_clases(_class, find_base_clases=None):
class_list = []
for _baseclass in _class.__bases__:
parents = find_base_clases(_baseclass, find_base_clases)
class_list.extend(parents) # depends on [control=['for'], data=['_baseclass']]
if _class is not object:
class_list.append(_class) # depends on [control=['if'], data=['_class']]
return class_list
head_class = self.__class__
# Determine if parents need reloading
class_list = find_base_clases(head_class, find_base_clases)
# HACK
ignore = {HashComparable2}
class_list = [_class for _class in class_list if _class not in ignore]
for _class in class_list:
if verbose:
print('[class] reloading parent ' + _class.__name__ + ' from ' + _class.__module__) # depends on [control=['if'], data=[]]
if _class.__module__ == '__main__':
# Attempt to find the module that is the main module
# This may be very hacky and potentially break
main_module_ = sys.modules[_class.__module__]
main_modname = ut.get_modname_from_modpath(main_module_.__file__)
module_ = sys.modules[main_modname] # depends on [control=['if'], data=[]]
else:
module_ = sys.modules[_class.__module__]
if hasattr(module_, 'rrr'):
if reload_module:
module_.rrr(verbose=verbose) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif reload_module:
import imp
if verbose:
print('[class] reloading ' + _class.__module__ + ' with imp') # depends on [control=['if'], data=[]]
try:
imp.reload(module_) # depends on [control=['try'], data=[]]
except (ImportError, AttributeError):
print('[class] fallback reloading ' + _class.__module__ + ' with imp')
# one last thing to try. probably used ut.import_module_from_fpath
# when importing this module
imp.load_source(module_.__name__, module_.__file__) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Reset class attributes
_newclass = getattr(module_, _class.__name__)
reload_class_methods(self, _newclass, verbose=verbose) # depends on [control=['for'], data=['_class']]
# --HACK--
# TODO: handle injected definitions
if hasattr(self, '_initialize_self'):
if verbose > 1:
print('[class] calling _initialize_self for ' + classname) # depends on [control=['if'], data=[]]
self._initialize_self() # depends on [control=['if'], data=[]]
elif verbose > 1:
print('[class] ' + classname + ' does not have an _initialize_self function') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as ex:
ut.printex(ex, 'Error Reloading Class', keys=['modname', 'module', 'class_', 'class_list', 'self'])
raise # depends on [control=['except'], data=['ex']] |
def default_roles(*role_list):
"""Decorate task with these roles by default, but override with -R, -H"""
def selectively_attach(func):
"""Only decorate if nothing specified on command line"""
# pylint: disable=W0142
if not env.roles and not env.hosts:
return roles(*role_list)(func)
else:
if env.hosts:
func = hosts(*env.hosts)(func)
if env.roles:
func = roles(*env.roles)(func)
return func
return selectively_attach | def function[default_roles, parameter[]]:
constant[Decorate task with these roles by default, but override with -R, -H]
def function[selectively_attach, parameter[func]]:
constant[Only decorate if nothing specified on command line]
if <ast.BoolOp object at 0x7da1b09d21d0> begin[:]
return[call[call[name[roles], parameter[<ast.Starred object at 0x7da1b09d3f70>]], parameter[name[func]]]]
return[name[selectively_attach]] | keyword[def] identifier[default_roles] (* identifier[role_list] ):
literal[string]
keyword[def] identifier[selectively_attach] ( identifier[func] ):
literal[string]
keyword[if] keyword[not] identifier[env] . identifier[roles] keyword[and] keyword[not] identifier[env] . identifier[hosts] :
keyword[return] identifier[roles] (* identifier[role_list] )( identifier[func] )
keyword[else] :
keyword[if] identifier[env] . identifier[hosts] :
identifier[func] = identifier[hosts] (* identifier[env] . identifier[hosts] )( identifier[func] )
keyword[if] identifier[env] . identifier[roles] :
identifier[func] = identifier[roles] (* identifier[env] . identifier[roles] )( identifier[func] )
keyword[return] identifier[func]
keyword[return] identifier[selectively_attach] | def default_roles(*role_list):
"""Decorate task with these roles by default, but override with -R, -H"""
def selectively_attach(func):
"""Only decorate if nothing specified on command line"""
# pylint: disable=W0142
if not env.roles and (not env.hosts):
return roles(*role_list)(func) # depends on [control=['if'], data=[]]
else:
if env.hosts:
func = hosts(*env.hosts)(func) # depends on [control=['if'], data=[]]
if env.roles:
func = roles(*env.roles)(func) # depends on [control=['if'], data=[]]
return func
return selectively_attach |
def query(self, query, filters=None, columns=None, sort=None, start=0, rows=30):
"""
Queries Solr and returns results
query - Text query to search for
filters - dictionary of filters to apply when searching in form of { "field":"filter_value" }
columns - columns to return, list of strings
sort - list of fields to sort on in format of ["field asc", "field desc", ... ]
start - start number of first result (used in pagination)
rows - number of rows to return (used for pagination, defaults to 30)
"""
if not columns:
columns = ["*", "score"]
fields = {"q": query,
"json.nl" :"map", # Return facets as JSON objects
"fl": ",".join(columns), # Return score along with results
"start": str(start),
"rows": str(rows),
"wt": "json"}
# Use shards parameter only if there are several cores active
if len(self.endpoints) > 1:
fields["shards"] = self._get_shards()
# Prepare filters
if not filters is None:
filter_list = []
for filter_field, value in filters.items():
filter_list.append("%s:%s" % (filter_field, value))
fields["fq"] = " AND ".join(filter_list)
# Append sorting parameters
if not sort is None:
fields["sort"] = ",".join(sort)
# Do request to Solr server to default endpoint (other cores will be queried with shard functionality)
assert self.default_endpoint in self.endpoints
request_url = _get_url(self.endpoints[self.default_endpoint], "select")
results = self._send_solr_query(request_url, fields)
if not results:
return None
assert "responseHeader" in results
# Check for response status
if not results.get("responseHeader").get("status") == 0:
logger.error("Server error while retrieving results: %s", results)
return None
assert "response" in results
result_obj = self._parse_response(results)
return result_obj | def function[query, parameter[self, query, filters, columns, sort, start, rows]]:
constant[
Queries Solr and returns results
query - Text query to search for
filters - dictionary of filters to apply when searching in form of { "field":"filter_value" }
columns - columns to return, list of strings
sort - list of fields to sort on in format of ["field asc", "field desc", ... ]
start - start number of first result (used in pagination)
rows - number of rows to return (used for pagination, defaults to 30)
]
if <ast.UnaryOp object at 0x7da1b0a70250> begin[:]
variable[columns] assign[=] list[[<ast.Constant object at 0x7da1b0a70d60>, <ast.Constant object at 0x7da1b0a71b70>]]
variable[fields] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a70100>, <ast.Constant object at 0x7da1b0a714b0>, <ast.Constant object at 0x7da1b0a71480>, <ast.Constant object at 0x7da1b0a71e10>, <ast.Constant object at 0x7da1b0a71fc0>, <ast.Constant object at 0x7da1b0a70160>], [<ast.Name object at 0x7da1b0a71e40>, <ast.Constant object at 0x7da1b0a72110>, <ast.Call object at 0x7da1b0a71e70>, <ast.Call object at 0x7da1b0a720b0>, <ast.Call object at 0x7da1b0a71f00>, <ast.Constant object at 0x7da1b0a71cf0>]]
if compare[call[name[len], parameter[name[self].endpoints]] greater[>] constant[1]] begin[:]
call[name[fields]][constant[shards]] assign[=] call[name[self]._get_shards, parameter[]]
if <ast.UnaryOp object at 0x7da1b0a703d0> begin[:]
variable[filter_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0a718a0>, <ast.Name object at 0x7da1b0a71540>]]] in starred[call[name[filters].items, parameter[]]] begin[:]
call[name[filter_list].append, parameter[binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0a70dc0>, <ast.Name object at 0x7da1b0a70f10>]]]]]
call[name[fields]][constant[fq]] assign[=] call[constant[ AND ].join, parameter[name[filter_list]]]
if <ast.UnaryOp object at 0x7da1b0b35840> begin[:]
call[name[fields]][constant[sort]] assign[=] call[constant[,].join, parameter[name[sort]]]
assert[compare[name[self].default_endpoint in name[self].endpoints]]
variable[request_url] assign[=] call[name[_get_url], parameter[call[name[self].endpoints][name[self].default_endpoint], constant[select]]]
variable[results] assign[=] call[name[self]._send_solr_query, parameter[name[request_url], name[fields]]]
if <ast.UnaryOp object at 0x7da1b0b376d0> begin[:]
return[constant[None]]
assert[compare[constant[responseHeader] in name[results]]]
if <ast.UnaryOp object at 0x7da1b0b34850> begin[:]
call[name[logger].error, parameter[constant[Server error while retrieving results: %s], name[results]]]
return[constant[None]]
assert[compare[constant[response] in name[results]]]
variable[result_obj] assign[=] call[name[self]._parse_response, parameter[name[results]]]
return[name[result_obj]] | keyword[def] identifier[query] ( identifier[self] , identifier[query] , identifier[filters] = keyword[None] , identifier[columns] = keyword[None] , identifier[sort] = keyword[None] , identifier[start] = literal[int] , identifier[rows] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[columns] :
identifier[columns] =[ literal[string] , literal[string] ]
identifier[fields] ={ literal[string] : identifier[query] ,
literal[string] : literal[string] ,
literal[string] : literal[string] . identifier[join] ( identifier[columns] ),
literal[string] : identifier[str] ( identifier[start] ),
literal[string] : identifier[str] ( identifier[rows] ),
literal[string] : literal[string] }
keyword[if] identifier[len] ( identifier[self] . identifier[endpoints] )> literal[int] :
identifier[fields] [ literal[string] ]= identifier[self] . identifier[_get_shards] ()
keyword[if] keyword[not] identifier[filters] keyword[is] keyword[None] :
identifier[filter_list] =[]
keyword[for] identifier[filter_field] , identifier[value] keyword[in] identifier[filters] . identifier[items] ():
identifier[filter_list] . identifier[append] ( literal[string] %( identifier[filter_field] , identifier[value] ))
identifier[fields] [ literal[string] ]= literal[string] . identifier[join] ( identifier[filter_list] )
keyword[if] keyword[not] identifier[sort] keyword[is] keyword[None] :
identifier[fields] [ literal[string] ]= literal[string] . identifier[join] ( identifier[sort] )
keyword[assert] identifier[self] . identifier[default_endpoint] keyword[in] identifier[self] . identifier[endpoints]
identifier[request_url] = identifier[_get_url] ( identifier[self] . identifier[endpoints] [ identifier[self] . identifier[default_endpoint] ], literal[string] )
identifier[results] = identifier[self] . identifier[_send_solr_query] ( identifier[request_url] , identifier[fields] )
keyword[if] keyword[not] identifier[results] :
keyword[return] keyword[None]
keyword[assert] literal[string] keyword[in] identifier[results]
keyword[if] keyword[not] identifier[results] . identifier[get] ( literal[string] ). identifier[get] ( literal[string] )== literal[int] :
identifier[logger] . identifier[error] ( literal[string] , identifier[results] )
keyword[return] keyword[None]
keyword[assert] literal[string] keyword[in] identifier[results]
identifier[result_obj] = identifier[self] . identifier[_parse_response] ( identifier[results] )
keyword[return] identifier[result_obj] | def query(self, query, filters=None, columns=None, sort=None, start=0, rows=30):
"""
Queries Solr and returns results
query - Text query to search for
filters - dictionary of filters to apply when searching in form of { "field":"filter_value" }
columns - columns to return, list of strings
sort - list of fields to sort on in format of ["field asc", "field desc", ... ]
start - start number of first result (used in pagination)
rows - number of rows to return (used for pagination, defaults to 30)
"""
if not columns:
columns = ['*', 'score'] # depends on [control=['if'], data=[]] # Return facets as JSON objects
# Return score along with results
fields = {'q': query, 'json.nl': 'map', 'fl': ','.join(columns), 'start': str(start), 'rows': str(rows), 'wt': 'json'}
# Use shards parameter only if there are several cores active
if len(self.endpoints) > 1:
fields['shards'] = self._get_shards() # depends on [control=['if'], data=[]]
# Prepare filters
if not filters is None:
filter_list = []
for (filter_field, value) in filters.items():
filter_list.append('%s:%s' % (filter_field, value)) # depends on [control=['for'], data=[]]
fields['fq'] = ' AND '.join(filter_list) # depends on [control=['if'], data=[]]
# Append sorting parameters
if not sort is None:
fields['sort'] = ','.join(sort) # depends on [control=['if'], data=[]]
# Do request to Solr server to default endpoint (other cores will be queried with shard functionality)
assert self.default_endpoint in self.endpoints
request_url = _get_url(self.endpoints[self.default_endpoint], 'select')
results = self._send_solr_query(request_url, fields)
if not results:
return None # depends on [control=['if'], data=[]]
assert 'responseHeader' in results
# Check for response status
if not results.get('responseHeader').get('status') == 0:
logger.error('Server error while retrieving results: %s', results)
return None # depends on [control=['if'], data=[]]
assert 'response' in results
result_obj = self._parse_response(results)
return result_obj |
def h_from_V(self, V, method='spline'):
r'''Method to calculate the height of liquid in a fully defined tank
given a specified volume of liquid in it `V`. `V` must be under the
maximum volume. If the method is 'spline', and the interpolation table
is not yet defined, creates it by calling the method set_table. If the
method is 'chebyshev', and the coefficients have not yet been
calculated, they are created by calling `set_chebyshev_approximators`.
Parameters
----------
V : float
Volume of liquid in the tank up to the desired height, [m^3]
method : str
One of 'spline', 'chebyshev', or 'brenth'
Returns
-------
h : float
Height of liquid at which the volume is as desired, [m]
'''
if method == 'spline':
if not self.table:
self.set_table()
return float(self.interp_h_from_V(V))
elif method == 'chebyshev':
if not self.chebyshev:
self.set_chebyshev_approximators()
return self.h_from_V_cheb(V)
elif method == 'brenth':
to_solve = lambda h : self.V_from_h(h, method='full') - V
return brenth(to_solve, self.h_max, 0)
else:
raise Exception("Allowable methods are 'full' or 'chebyshev', "
"or 'brenth'.") | def function[h_from_V, parameter[self, V, method]]:
constant[Method to calculate the height of liquid in a fully defined tank
given a specified volume of liquid in it `V`. `V` must be under the
maximum volume. If the method is 'spline', and the interpolation table
is not yet defined, creates it by calling the method set_table. If the
method is 'chebyshev', and the coefficients have not yet been
calculated, they are created by calling `set_chebyshev_approximators`.
Parameters
----------
V : float
Volume of liquid in the tank up to the desired height, [m^3]
method : str
One of 'spline', 'chebyshev', or 'brenth'
Returns
-------
h : float
Height of liquid at which the volume is as desired, [m]
]
if compare[name[method] equal[==] constant[spline]] begin[:]
if <ast.UnaryOp object at 0x7da1b12cbc10> begin[:]
call[name[self].set_table, parameter[]]
return[call[name[float], parameter[call[name[self].interp_h_from_V, parameter[name[V]]]]]] | keyword[def] identifier[h_from_V] ( identifier[self] , identifier[V] , identifier[method] = literal[string] ):
literal[string]
keyword[if] identifier[method] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[table] :
identifier[self] . identifier[set_table] ()
keyword[return] identifier[float] ( identifier[self] . identifier[interp_h_from_V] ( identifier[V] ))
keyword[elif] identifier[method] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[chebyshev] :
identifier[self] . identifier[set_chebyshev_approximators] ()
keyword[return] identifier[self] . identifier[h_from_V_cheb] ( identifier[V] )
keyword[elif] identifier[method] == literal[string] :
identifier[to_solve] = keyword[lambda] identifier[h] : identifier[self] . identifier[V_from_h] ( identifier[h] , identifier[method] = literal[string] )- identifier[V]
keyword[return] identifier[brenth] ( identifier[to_solve] , identifier[self] . identifier[h_max] , literal[int] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] ) | def h_from_V(self, V, method='spline'):
"""Method to calculate the height of liquid in a fully defined tank
given a specified volume of liquid in it `V`. `V` must be under the
maximum volume. If the method is 'spline', and the interpolation table
is not yet defined, creates it by calling the method set_table. If the
method is 'chebyshev', and the coefficients have not yet been
calculated, they are created by calling `set_chebyshev_approximators`.
Parameters
----------
V : float
Volume of liquid in the tank up to the desired height, [m^3]
method : str
One of 'spline', 'chebyshev', or 'brenth'
Returns
-------
h : float
Height of liquid at which the volume is as desired, [m]
"""
if method == 'spline':
if not self.table:
self.set_table() # depends on [control=['if'], data=[]]
return float(self.interp_h_from_V(V)) # depends on [control=['if'], data=[]]
elif method == 'chebyshev':
if not self.chebyshev:
self.set_chebyshev_approximators() # depends on [control=['if'], data=[]]
return self.h_from_V_cheb(V) # depends on [control=['if'], data=[]]
elif method == 'brenth':
to_solve = lambda h: self.V_from_h(h, method='full') - V
return brenth(to_solve, self.h_max, 0) # depends on [control=['if'], data=[]]
else:
raise Exception("Allowable methods are 'full' or 'chebyshev', or 'brenth'.") |
async def get_entity(self, entity):
"""
Turns the given entity into a valid Telegram :tl:`User`, :tl:`Chat`
or :tl:`Channel`. You can also pass a list or iterable of entities,
and they will be efficiently fetched from the network.
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If a username is given, **the username will be resolved** making
an API call every time. Resolving usernames is an expensive
operation and will start hitting flood waits around 50 usernames
in a short period of time.
If you want to get the entity for a *cached* username, you should
first `get_input_entity(username) <get_input_entity>` which will
use the cache), and then use `get_entity` with the result of the
previous call.
Similar limits apply to invite links, and you should use their
ID instead.
Using phone numbers (from people in your contact list), exact
names, integer IDs or :tl:`Peer` rely on a `get_input_entity`
first, which in turn needs the entity to be in cache, unless
a :tl:`InputPeer` was passed.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`User`, :tl:`Chat` or :tl:`Channel` corresponding to the
input entity. A list will be returned if more than one was given.
"""
single = not utils.is_list_like(entity)
if single:
entity = (entity,)
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x)
else:
inputs.append(await self.get_input_entity(x))
users = [x for x in inputs
if isinstance(x, (types.InputPeerUser, types.InputPeerSelf))]
chats = [x.chat_id for x in inputs
if isinstance(x, types.InputPeerChat)]
channels = [x for x in inputs
if isinstance(x, types.InputPeerChannel)]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
curr, users = users[:200], users[200:]
tmp.extend(await self(functions.users.GetUsersRequest(curr)))
users = tmp
if chats: # TODO Handle chats slice?
chats = (await self(
functions.messages.GetChatsRequest(chats))).chats
if channels:
channels = (await self(
functions.channels.GetChannelsRequest(channels))).chats
# Merge users, chats and channels into a single dictionary
id_entity = {
utils.get_peer_id(x): x
for x in itertools.chain(users, chats, channels)
}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x))
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)])
else:
result.append(next(
u for u in id_entity.values()
if isinstance(u, types.User) and u.is_self
))
return result[0] if single else result | <ast.AsyncFunctionDef object at 0x7da18f00cfd0> | keyword[async] keyword[def] identifier[get_entity] ( identifier[self] , identifier[entity] ):
literal[string]
identifier[single] = keyword[not] identifier[utils] . identifier[is_list_like] ( identifier[entity] )
keyword[if] identifier[single] :
identifier[entity] =( identifier[entity] ,)
identifier[inputs] =[]
keyword[for] identifier[x] keyword[in] identifier[entity] :
keyword[if] identifier[isinstance] ( identifier[x] , identifier[str] ):
identifier[inputs] . identifier[append] ( identifier[x] )
keyword[else] :
identifier[inputs] . identifier[append] ( keyword[await] identifier[self] . identifier[get_input_entity] ( identifier[x] ))
identifier[users] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[inputs]
keyword[if] identifier[isinstance] ( identifier[x] ,( identifier[types] . identifier[InputPeerUser] , identifier[types] . identifier[InputPeerSelf] ))]
identifier[chats] =[ identifier[x] . identifier[chat_id] keyword[for] identifier[x] keyword[in] identifier[inputs]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[types] . identifier[InputPeerChat] )]
identifier[channels] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[inputs]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[types] . identifier[InputPeerChannel] )]
keyword[if] identifier[users] :
identifier[tmp] =[]
keyword[while] identifier[users] :
identifier[curr] , identifier[users] = identifier[users] [: literal[int] ], identifier[users] [ literal[int] :]
identifier[tmp] . identifier[extend] ( keyword[await] identifier[self] ( identifier[functions] . identifier[users] . identifier[GetUsersRequest] ( identifier[curr] )))
identifier[users] = identifier[tmp]
keyword[if] identifier[chats] :
identifier[chats] =( keyword[await] identifier[self] (
identifier[functions] . identifier[messages] . identifier[GetChatsRequest] ( identifier[chats] ))). identifier[chats]
keyword[if] identifier[channels] :
identifier[channels] =( keyword[await] identifier[self] (
identifier[functions] . identifier[channels] . identifier[GetChannelsRequest] ( identifier[channels] ))). identifier[chats]
identifier[id_entity] ={
identifier[utils] . identifier[get_peer_id] ( identifier[x] ): identifier[x]
keyword[for] identifier[x] keyword[in] identifier[itertools] . identifier[chain] ( identifier[users] , identifier[chats] , identifier[channels] )
}
identifier[result] =[]
keyword[for] identifier[x] keyword[in] identifier[inputs] :
keyword[if] identifier[isinstance] ( identifier[x] , identifier[str] ):
identifier[result] . identifier[append] ( keyword[await] identifier[self] . identifier[_get_entity_from_string] ( identifier[x] ))
keyword[elif] keyword[not] identifier[isinstance] ( identifier[x] , identifier[types] . identifier[InputPeerSelf] ):
identifier[result] . identifier[append] ( identifier[id_entity] [ identifier[utils] . identifier[get_peer_id] ( identifier[x] )])
keyword[else] :
identifier[result] . identifier[append] ( identifier[next] (
identifier[u] keyword[for] identifier[u] keyword[in] identifier[id_entity] . identifier[values] ()
keyword[if] identifier[isinstance] ( identifier[u] , identifier[types] . identifier[User] ) keyword[and] identifier[u] . identifier[is_self]
))
keyword[return] identifier[result] [ literal[int] ] keyword[if] identifier[single] keyword[else] identifier[result] | async def get_entity(self, entity):
"""
Turns the given entity into a valid Telegram :tl:`User`, :tl:`Chat`
or :tl:`Channel`. You can also pass a list or iterable of entities,
and they will be efficiently fetched from the network.
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If a username is given, **the username will be resolved** making
an API call every time. Resolving usernames is an expensive
operation and will start hitting flood waits around 50 usernames
in a short period of time.
If you want to get the entity for a *cached* username, you should
first `get_input_entity(username) <get_input_entity>` which will
use the cache), and then use `get_entity` with the result of the
previous call.
Similar limits apply to invite links, and you should use their
ID instead.
Using phone numbers (from people in your contact list), exact
names, integer IDs or :tl:`Peer` rely on a `get_input_entity`
first, which in turn needs the entity to be in cache, unless
a :tl:`InputPeer` was passed.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`User`, :tl:`Chat` or :tl:`Channel` corresponding to the
input entity. A list will be returned if more than one was given.
"""
single = not utils.is_list_like(entity)
if single:
entity = (entity,) # depends on [control=['if'], data=[]]
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x) # depends on [control=['if'], data=[]]
else:
inputs.append(await self.get_input_entity(x)) # depends on [control=['for'], data=['x']]
users = [x for x in inputs if isinstance(x, (types.InputPeerUser, types.InputPeerSelf))]
chats = [x.chat_id for x in inputs if isinstance(x, types.InputPeerChat)]
channels = [x for x in inputs if isinstance(x, types.InputPeerChannel)]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
(curr, users) = (users[:200], users[200:])
tmp.extend(await self(functions.users.GetUsersRequest(curr))) # depends on [control=['while'], data=[]]
users = tmp # depends on [control=['if'], data=[]]
if chats: # TODO Handle chats slice?
chats = (await self(functions.messages.GetChatsRequest(chats))).chats # depends on [control=['if'], data=[]]
if channels:
channels = (await self(functions.channels.GetChannelsRequest(channels))).chats # depends on [control=['if'], data=[]]
# Merge users, chats and channels into a single dictionary
id_entity = {utils.get_peer_id(x): x for x in itertools.chain(users, chats, channels)}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x)) # depends on [control=['if'], data=[]]
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)]) # depends on [control=['if'], data=[]]
else:
result.append(next((u for u in id_entity.values() if isinstance(u, types.User) and u.is_self))) # depends on [control=['for'], data=['x']]
return result[0] if single else result |
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num']) | def function[_validate_duplication, parameter[self, subj_and_pred, cl]]:
constant[returns error if we've already seen the member `pred` on `subj`]
<ast.Tuple object at 0x7da1b24ff310> assign[=] name[subj_and_pred]
call[name[log].info, parameter[binary_operation[constant[Validating duplication of member %s] <ast.Mod object at 0x7da2590d6920> name[pred]]]]
if compare[tuple[[<ast.Name object at 0x7da1b25ed090>, <ast.Name object at 0x7da1b25ecee0>]] in name[self].checked_attributes] begin[:]
variable[err] assign[=] call[name[self].err, parameter[constant[{0} - duplicated member of {1}], call[name[self]._field_name_from_uri, parameter[name[pred]]], call[name[self]._field_name_from_uri, parameter[name[cl]]]]]
return[call[name[ValidationWarning], parameter[name[ValidationResult].WARNING, call[name[err]][constant[err]], call[name[err]][constant[line]], call[name[err]][constant[num]]]]] | keyword[def] identifier[_validate_duplication] ( identifier[self] , identifier[subj_and_pred] , identifier[cl] ):
literal[string]
identifier[subj] , identifier[pred] = identifier[subj_and_pred]
identifier[log] . identifier[info] ( literal[string] % identifier[pred] )
keyword[if] ( identifier[subj] , identifier[pred] ) keyword[in] identifier[self] . identifier[checked_attributes] :
identifier[err] = identifier[self] . identifier[err] ( literal[string] ,
identifier[self] . identifier[_field_name_from_uri] ( identifier[pred] ),
identifier[self] . identifier[_field_name_from_uri] ( identifier[cl] ))
keyword[return] identifier[ValidationWarning] ( identifier[ValidationResult] . identifier[WARNING] , identifier[err] [ literal[string] ],
identifier[err] [ literal[string] ], identifier[err] [ literal[string] ]) | def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
(subj, pred) = subj_and_pred
log.info('Validating duplication of member %s' % pred)
if (subj, pred) in self.checked_attributes:
err = self.err('{0} - duplicated member of {1}', self._field_name_from_uri(pred), self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'], err['line'], err['num']) # depends on [control=['if'], data=[]] |
def log_the_survey_settings(
log,
pathToYamlFile):
"""
*Create a MD log of the survey settings*
**Key Arguments:**
- ``log`` -- logger
- ``pathToYamlFile`` -- yaml results file
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import yaml
## LOCAL APPLICATION ##
from datetime import datetime, date, time
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
################ >ACTION(S) ################
# IMPORT THE SIMULATION SETTINGS
fileName = pathToYamlFile
stream = file(fileName, 'r')
yamlContent = yaml.load(stream)
snSurveyDiscoveryTimes = yamlContent['Discoveries Relative to Survey Year']
lightCurveDiscoveryTimes = yamlContent[
'Discoveries Relative to Peak Magnitudes']
snTypes = yamlContent['SN Types']
redshifts = yamlContent['Redshifts']
cadenceDictionary = yamlContent['Cadence Dictionary']
peakAppMagList = yamlContent['Peak Apparent Magnitudes']
snCampaignLengthList = yamlContent['Campaign Length']
programSettings = yamlContent["Program Settings"]
limitingMags = yamlContent["Limiting Magnitudes"]
# for key in limitingMags:
# log.debug('filter: %s, limit: %s' % (key, limitingMags[key]))
sampleNumber = yamlContent["Simulation Sample"]
peakMagnitudeDistributions = yamlContent[
"SN Absolute Peak-Magnitude Distributions"]
#log.debug('snDistributions[magnitude] %s' % (snDistributions["magnitude"],))
#log.debug('snDistributions[sigma] %s' % (snDistributions["sigma"],))
relativeRatesSet = yamlContent["Relative Rate Set to Use"]
relativeSNRates = yamlContent["Relative SN Rates"][relativeRatesSet]
#log.debug('relativeSNRates %s' % (relativeSNRates,))
lowerRedshiftLimit = yamlContent["Lower Redshift Limit"]
upperRedshiftLimit = yamlContent["Upper Redshift Limit"]
#log.debug('upperRedshiftLimit %s' % (upperRedshiftLimit,))
redshiftResolution = yamlContent["Redshift Resolution"]
extinctionSettings = yamlContent["Extinctions"]
extinctionType = extinctionSettings["constant or random"]
hostExtinctionDistributions = extinctionSettings["host"]
#log.debug('hostExtinctionDistributions %s' % (hostExtinctionDistributions,))
galacticExtinctionDistribution = extinctionSettings["galactic"]
#log.debug('galacticExtinctionDistribution %s' % (galacticExtinctionDistribution,))
surveyCadenceSettings = yamlContent["Survey Cadence"]
#log.debug('surveyCadenceSettings %s' % (surveyCadenceSettings,))
snLightCurves = yamlContent["Lightcurves"]
#log.debug('snlightCurves %s' % (snlightCurves,))
surveyArea = yamlContent["Sky Area of the Survey (square degrees)"]
extraSurveyConstraints = yamlContent["Extra Survey Constraints"]
weatherLossFraction = surveyCadenceSettings[
"Fraction of Year Lost to Weather etc"]
observableFraction = surveyCadenceSettings["Observable Fraction of Year"]
extinctionConstant = extinctionSettings["constant E(b-v)"]
CCSNRateFraction = yamlContent[
"CCSN Progenitor Population Fraction of IMF"]
transientToCCSNRateFraction = yamlContent["Transient to CCSN Ratio"]
restFrameFilter = yamlContent["Rest Frame Filter for K-corrections"]
peakMagLimit = extraSurveyConstraints["Faint-Limit of Peak Magnitude"]
campaignLengthLimit = extraSurveyConstraints[
"Observable for at least ? number of days"]
# CALCULATE THE SURVEY VOLUME
c = converter(log=log)
dists = c.redshift_to_distance(
z=float(upperRedshiftLimit),
WM=0.3,
WV=0.7,
H0=70.0
)
dl_mpc = dists["dl_mpc"]
sphereOuter = (4. / 3.) * math.pi * dl_mpc**3
if float(lowerRedshiftLimit) == 0.:
surveyVolume = sphereOuter
else:
dists = c.redshift_to_distance(
z=float(lowerRedshiftLimit),
WM=0.3,
WV=0.7,
H0=70.0
)
dl_mpc = dists["dl_mpc"]
sphereInner = (4. / 3.) * math.pi * dl_mpc**3
surveyVolume = sphereOuter - sphereInner
# NOW SCALE TO SKY-AREA
surveyVolume = surveyVolume * surveyArea / 41253
surveyVolume = surveyVolume / (1000.)**3
if surveyVolume < 1:
surveyVolume = "%(surveyVolume)0.4f" % locals()
elif surveyVolume < 100:
surveyVolume = "%(surveyVolume)0.2f" % locals()
else:
surveyVolume = "%(surveyVolume)0.1f" % locals()
# CALCULATE OVERALL DETECTION FRACTIONS
discoveryFraction, tooFaintFraction, shortCampaignFraction = calculate_fraction_of_sn_discovered(
log, surveyCadenceSettings, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, lowerRedshiftLimit, upperRedshiftLimit)
discoveryFraction = discoveryFraction * 100.
if discoveryFraction < 1:
discoveryFraction = "%(discoveryFraction)0.4f" % locals()
elif discoveryFraction < 10:
discoveryFraction = "%(discoveryFraction)0.2f" % locals()
else:
discoveryFraction = "%(discoveryFraction)0.1f" % locals()
# log.info('yamlContent %s' % (yamlContent,))
stream.close()
settings_log = """
# SN Survey Simulation Results - %s
The *%s*-band liming magnitudes of this simulated survey are:
| Filter | Magnitude |
|:---|:----|
""" % (now, restFrameFilter)
for k, v in limitingMags.iteritems():
settings_log += """| %s | %s |\n""" % (k, v,)
settings_log += """
A total of **%s** transients where simulated in the survey, within a **redshift-range of %s-%s** and **survey footprint of %s deg<sup>2</sup>** (a **total volume of %s Gpc<sup>-3</sup> yr<sup>-1</sup>**). **%s%% of these simulated transients had solid 'discoveries'**. A constant galactic extinction of `E(B-V) = %s` is used, it's assumed that any given field in the sky is visible for %s of the survey year and the typical fraction of survey time lost to weather of %s is accounted for. Here are the relative rates and peak magnitude distributions of the SN used in the survey:
| SN Type | Relative Rate | Peak Magnitude | Sigma Peak |
|:---|:---|:---|:---|
""" % (sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, surveyArea, surveyVolume, discoveryFraction, extinctionConstant, observableFraction, weatherLossFraction,)
for k, v in peakMagnitudeDistributions['magnitude'].iteritems():
settings_log += """| %s | %s | %s | %s |\n""" % (
k, relativeSNRates[k], v, peakMagnitudeDistributions['sigma'][k])
settings_log += """
If a transient is detected by the simulated survey, extra constraints are placed upon the detected object to secure positive identification as the object.
1. The peak apparent magnitude of the object must be brighter than **%s mag**
2. The object must be detectable for long enough to complete a follow-up campaign of longer than **%s days** within 1 survey year with any single filter.
The transient rate for the survey volume is estimated by assuming a rate of **%s** times that of the CCSN rate (itself a fraction of **%s** of the total SFR).
""" % (peakMagLimit, campaignLengthLimit, transientToCCSNRateFraction, CCSNRateFraction)
return settings_log | def function[log_the_survey_settings, parameter[log, pathToYamlFile]]:
constant[
*Create a MD log of the survey settings*
**Key Arguments:**
- ``log`` -- logger
- ``pathToYamlFile`` -- yaml results file
**Return:**
- None
]
import module[yaml]
from relative_module[datetime] import module[datetime], module[date], module[time]
variable[now] assign[=] call[name[datetime].now, parameter[]]
variable[now] assign[=] call[name[now].strftime, parameter[constant[%Y%m%dt%H%M%S]]]
variable[fileName] assign[=] name[pathToYamlFile]
variable[stream] assign[=] call[name[file], parameter[name[fileName], constant[r]]]
variable[yamlContent] assign[=] call[name[yaml].load, parameter[name[stream]]]
variable[snSurveyDiscoveryTimes] assign[=] call[name[yamlContent]][constant[Discoveries Relative to Survey Year]]
variable[lightCurveDiscoveryTimes] assign[=] call[name[yamlContent]][constant[Discoveries Relative to Peak Magnitudes]]
variable[snTypes] assign[=] call[name[yamlContent]][constant[SN Types]]
variable[redshifts] assign[=] call[name[yamlContent]][constant[Redshifts]]
variable[cadenceDictionary] assign[=] call[name[yamlContent]][constant[Cadence Dictionary]]
variable[peakAppMagList] assign[=] call[name[yamlContent]][constant[Peak Apparent Magnitudes]]
variable[snCampaignLengthList] assign[=] call[name[yamlContent]][constant[Campaign Length]]
variable[programSettings] assign[=] call[name[yamlContent]][constant[Program Settings]]
variable[limitingMags] assign[=] call[name[yamlContent]][constant[Limiting Magnitudes]]
variable[sampleNumber] assign[=] call[name[yamlContent]][constant[Simulation Sample]]
variable[peakMagnitudeDistributions] assign[=] call[name[yamlContent]][constant[SN Absolute Peak-Magnitude Distributions]]
variable[relativeRatesSet] assign[=] call[name[yamlContent]][constant[Relative Rate Set to Use]]
variable[relativeSNRates] assign[=] call[call[name[yamlContent]][constant[Relative SN Rates]]][name[relativeRatesSet]]
variable[lowerRedshiftLimit] assign[=] call[name[yamlContent]][constant[Lower Redshift Limit]]
variable[upperRedshiftLimit] assign[=] call[name[yamlContent]][constant[Upper Redshift Limit]]
variable[redshiftResolution] assign[=] call[name[yamlContent]][constant[Redshift Resolution]]
variable[extinctionSettings] assign[=] call[name[yamlContent]][constant[Extinctions]]
variable[extinctionType] assign[=] call[name[extinctionSettings]][constant[constant or random]]
variable[hostExtinctionDistributions] assign[=] call[name[extinctionSettings]][constant[host]]
variable[galacticExtinctionDistribution] assign[=] call[name[extinctionSettings]][constant[galactic]]
variable[surveyCadenceSettings] assign[=] call[name[yamlContent]][constant[Survey Cadence]]
variable[snLightCurves] assign[=] call[name[yamlContent]][constant[Lightcurves]]
variable[surveyArea] assign[=] call[name[yamlContent]][constant[Sky Area of the Survey (square degrees)]]
variable[extraSurveyConstraints] assign[=] call[name[yamlContent]][constant[Extra Survey Constraints]]
variable[weatherLossFraction] assign[=] call[name[surveyCadenceSettings]][constant[Fraction of Year Lost to Weather etc]]
variable[observableFraction] assign[=] call[name[surveyCadenceSettings]][constant[Observable Fraction of Year]]
variable[extinctionConstant] assign[=] call[name[extinctionSettings]][constant[constant E(b-v)]]
variable[CCSNRateFraction] assign[=] call[name[yamlContent]][constant[CCSN Progenitor Population Fraction of IMF]]
variable[transientToCCSNRateFraction] assign[=] call[name[yamlContent]][constant[Transient to CCSN Ratio]]
variable[restFrameFilter] assign[=] call[name[yamlContent]][constant[Rest Frame Filter for K-corrections]]
variable[peakMagLimit] assign[=] call[name[extraSurveyConstraints]][constant[Faint-Limit of Peak Magnitude]]
variable[campaignLengthLimit] assign[=] call[name[extraSurveyConstraints]][constant[Observable for at least ? number of days]]
variable[c] assign[=] call[name[converter], parameter[]]
variable[dists] assign[=] call[name[c].redshift_to_distance, parameter[]]
variable[dl_mpc] assign[=] call[name[dists]][constant[dl_mpc]]
variable[sphereOuter] assign[=] binary_operation[binary_operation[binary_operation[constant[4.0] / constant[3.0]] * name[math].pi] * binary_operation[name[dl_mpc] ** constant[3]]]
if compare[call[name[float], parameter[name[lowerRedshiftLimit]]] equal[==] constant[0.0]] begin[:]
variable[surveyVolume] assign[=] name[sphereOuter]
variable[surveyVolume] assign[=] binary_operation[binary_operation[name[surveyVolume] * name[surveyArea]] / constant[41253]]
variable[surveyVolume] assign[=] binary_operation[name[surveyVolume] / binary_operation[constant[1000.0] ** constant[3]]]
if compare[name[surveyVolume] less[<] constant[1]] begin[:]
variable[surveyVolume] assign[=] binary_operation[constant[%(surveyVolume)0.4f] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
<ast.Tuple object at 0x7da1b13533a0> assign[=] call[name[calculate_fraction_of_sn_discovered], parameter[name[log], name[surveyCadenceSettings], name[snSurveyDiscoveryTimes], name[redshifts], name[peakAppMagList], name[snCampaignLengthList], name[extraSurveyConstraints], name[lowerRedshiftLimit], name[upperRedshiftLimit]]]
variable[discoveryFraction] assign[=] binary_operation[name[discoveryFraction] * constant[100.0]]
if compare[name[discoveryFraction] less[<] constant[1]] begin[:]
variable[discoveryFraction] assign[=] binary_operation[constant[%(discoveryFraction)0.4f] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
call[name[stream].close, parameter[]]
variable[settings_log] assign[=] binary_operation[constant[
# SN Survey Simulation Results - %s
The *%s*-band liming magnitudes of this simulated survey are:
| Filter | Magnitude |
|:---|:----|
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1352920>, <ast.Name object at 0x7da1b13528f0>]]]
for taget[tuple[[<ast.Name object at 0x7da1b1352860>, <ast.Name object at 0x7da1b1352830>]]] in starred[call[name[limitingMags].iteritems, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b1352770>
<ast.AugAssign object at 0x7da1b1352620>
for taget[tuple[[<ast.Name object at 0x7da1b1352320>, <ast.Name object at 0x7da1b13522f0>]]] in starred[call[call[name[peakMagnitudeDistributions]][constant[magnitude]].iteritems, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b13521d0>
<ast.AugAssign object at 0x7da1b1351f00>
return[name[settings_log]] | keyword[def] identifier[log_the_survey_settings] (
identifier[log] ,
identifier[pathToYamlFile] ):
literal[string]
keyword[import] identifier[yaml]
keyword[from] identifier[datetime] keyword[import] identifier[datetime] , identifier[date] , identifier[time]
identifier[now] = identifier[datetime] . identifier[now] ()
identifier[now] = identifier[now] . identifier[strftime] ( literal[string] )
identifier[fileName] = identifier[pathToYamlFile]
identifier[stream] = identifier[file] ( identifier[fileName] , literal[string] )
identifier[yamlContent] = identifier[yaml] . identifier[load] ( identifier[stream] )
identifier[snSurveyDiscoveryTimes] = identifier[yamlContent] [ literal[string] ]
identifier[lightCurveDiscoveryTimes] = identifier[yamlContent] [
literal[string] ]
identifier[snTypes] = identifier[yamlContent] [ literal[string] ]
identifier[redshifts] = identifier[yamlContent] [ literal[string] ]
identifier[cadenceDictionary] = identifier[yamlContent] [ literal[string] ]
identifier[peakAppMagList] = identifier[yamlContent] [ literal[string] ]
identifier[snCampaignLengthList] = identifier[yamlContent] [ literal[string] ]
identifier[programSettings] = identifier[yamlContent] [ literal[string] ]
identifier[limitingMags] = identifier[yamlContent] [ literal[string] ]
identifier[sampleNumber] = identifier[yamlContent] [ literal[string] ]
identifier[peakMagnitudeDistributions] = identifier[yamlContent] [
literal[string] ]
identifier[relativeRatesSet] = identifier[yamlContent] [ literal[string] ]
identifier[relativeSNRates] = identifier[yamlContent] [ literal[string] ][ identifier[relativeRatesSet] ]
identifier[lowerRedshiftLimit] = identifier[yamlContent] [ literal[string] ]
identifier[upperRedshiftLimit] = identifier[yamlContent] [ literal[string] ]
identifier[redshiftResolution] = identifier[yamlContent] [ literal[string] ]
identifier[extinctionSettings] = identifier[yamlContent] [ literal[string] ]
identifier[extinctionType] = identifier[extinctionSettings] [ literal[string] ]
identifier[hostExtinctionDistributions] = identifier[extinctionSettings] [ literal[string] ]
identifier[galacticExtinctionDistribution] = identifier[extinctionSettings] [ literal[string] ]
identifier[surveyCadenceSettings] = identifier[yamlContent] [ literal[string] ]
identifier[snLightCurves] = identifier[yamlContent] [ literal[string] ]
identifier[surveyArea] = identifier[yamlContent] [ literal[string] ]
identifier[extraSurveyConstraints] = identifier[yamlContent] [ literal[string] ]
identifier[weatherLossFraction] = identifier[surveyCadenceSettings] [
literal[string] ]
identifier[observableFraction] = identifier[surveyCadenceSettings] [ literal[string] ]
identifier[extinctionConstant] = identifier[extinctionSettings] [ literal[string] ]
identifier[CCSNRateFraction] = identifier[yamlContent] [
literal[string] ]
identifier[transientToCCSNRateFraction] = identifier[yamlContent] [ literal[string] ]
identifier[restFrameFilter] = identifier[yamlContent] [ literal[string] ]
identifier[peakMagLimit] = identifier[extraSurveyConstraints] [ literal[string] ]
identifier[campaignLengthLimit] = identifier[extraSurveyConstraints] [
literal[string] ]
identifier[c] = identifier[converter] ( identifier[log] = identifier[log] )
identifier[dists] = identifier[c] . identifier[redshift_to_distance] (
identifier[z] = identifier[float] ( identifier[upperRedshiftLimit] ),
identifier[WM] = literal[int] ,
identifier[WV] = literal[int] ,
identifier[H0] = literal[int]
)
identifier[dl_mpc] = identifier[dists] [ literal[string] ]
identifier[sphereOuter] =( literal[int] / literal[int] )* identifier[math] . identifier[pi] * identifier[dl_mpc] ** literal[int]
keyword[if] identifier[float] ( identifier[lowerRedshiftLimit] )== literal[int] :
identifier[surveyVolume] = identifier[sphereOuter]
keyword[else] :
identifier[dists] = identifier[c] . identifier[redshift_to_distance] (
identifier[z] = identifier[float] ( identifier[lowerRedshiftLimit] ),
identifier[WM] = literal[int] ,
identifier[WV] = literal[int] ,
identifier[H0] = literal[int]
)
identifier[dl_mpc] = identifier[dists] [ literal[string] ]
identifier[sphereInner] =( literal[int] / literal[int] )* identifier[math] . identifier[pi] * identifier[dl_mpc] ** literal[int]
identifier[surveyVolume] = identifier[sphereOuter] - identifier[sphereInner]
identifier[surveyVolume] = identifier[surveyVolume] * identifier[surveyArea] / literal[int]
identifier[surveyVolume] = identifier[surveyVolume] /( literal[int] )** literal[int]
keyword[if] identifier[surveyVolume] < literal[int] :
identifier[surveyVolume] = literal[string] % identifier[locals] ()
keyword[elif] identifier[surveyVolume] < literal[int] :
identifier[surveyVolume] = literal[string] % identifier[locals] ()
keyword[else] :
identifier[surveyVolume] = literal[string] % identifier[locals] ()
identifier[discoveryFraction] , identifier[tooFaintFraction] , identifier[shortCampaignFraction] = identifier[calculate_fraction_of_sn_discovered] (
identifier[log] , identifier[surveyCadenceSettings] , identifier[snSurveyDiscoveryTimes] , identifier[redshifts] , identifier[peakAppMagList] , identifier[snCampaignLengthList] , identifier[extraSurveyConstraints] , identifier[lowerRedshiftLimit] , identifier[upperRedshiftLimit] )
identifier[discoveryFraction] = identifier[discoveryFraction] * literal[int]
keyword[if] identifier[discoveryFraction] < literal[int] :
identifier[discoveryFraction] = literal[string] % identifier[locals] ()
keyword[elif] identifier[discoveryFraction] < literal[int] :
identifier[discoveryFraction] = literal[string] % identifier[locals] ()
keyword[else] :
identifier[discoveryFraction] = literal[string] % identifier[locals] ()
identifier[stream] . identifier[close] ()
identifier[settings_log] = literal[string] %( identifier[now] , identifier[restFrameFilter] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[limitingMags] . identifier[iteritems] ():
identifier[settings_log] += literal[string] %( identifier[k] , identifier[v] ,)
identifier[settings_log] += literal[string] %( identifier[sampleNumber] , identifier[lowerRedshiftLimit] , identifier[upperRedshiftLimit] , identifier[surveyArea] , identifier[surveyVolume] , identifier[discoveryFraction] , identifier[extinctionConstant] , identifier[observableFraction] , identifier[weatherLossFraction] ,)
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[peakMagnitudeDistributions] [ literal[string] ]. identifier[iteritems] ():
identifier[settings_log] += literal[string] %(
identifier[k] , identifier[relativeSNRates] [ identifier[k] ], identifier[v] , identifier[peakMagnitudeDistributions] [ literal[string] ][ identifier[k] ])
identifier[settings_log] += literal[string] %( identifier[peakMagLimit] , identifier[campaignLengthLimit] , identifier[transientToCCSNRateFraction] , identifier[CCSNRateFraction] )
keyword[return] identifier[settings_log] | def log_the_survey_settings(log, pathToYamlFile):
"""
*Create a MD log of the survey settings*
**Key Arguments:**
- ``log`` -- logger
- ``pathToYamlFile`` -- yaml results file
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import yaml
## LOCAL APPLICATION ##
from datetime import datetime, date, time
now = datetime.now()
now = now.strftime('%Y%m%dt%H%M%S')
################ >ACTION(S) ################
# IMPORT THE SIMULATION SETTINGS
fileName = pathToYamlFile
stream = file(fileName, 'r')
yamlContent = yaml.load(stream)
snSurveyDiscoveryTimes = yamlContent['Discoveries Relative to Survey Year']
lightCurveDiscoveryTimes = yamlContent['Discoveries Relative to Peak Magnitudes']
snTypes = yamlContent['SN Types']
redshifts = yamlContent['Redshifts']
cadenceDictionary = yamlContent['Cadence Dictionary']
peakAppMagList = yamlContent['Peak Apparent Magnitudes']
snCampaignLengthList = yamlContent['Campaign Length']
programSettings = yamlContent['Program Settings']
limitingMags = yamlContent['Limiting Magnitudes']
# for key in limitingMags:
# log.debug('filter: %s, limit: %s' % (key, limitingMags[key]))
sampleNumber = yamlContent['Simulation Sample']
peakMagnitudeDistributions = yamlContent['SN Absolute Peak-Magnitude Distributions']
#log.debug('snDistributions[magnitude] %s' % (snDistributions["magnitude"],))
#log.debug('snDistributions[sigma] %s' % (snDistributions["sigma"],))
relativeRatesSet = yamlContent['Relative Rate Set to Use']
relativeSNRates = yamlContent['Relative SN Rates'][relativeRatesSet]
#log.debug('relativeSNRates %s' % (relativeSNRates,))
lowerRedshiftLimit = yamlContent['Lower Redshift Limit']
upperRedshiftLimit = yamlContent['Upper Redshift Limit']
#log.debug('upperRedshiftLimit %s' % (upperRedshiftLimit,))
redshiftResolution = yamlContent['Redshift Resolution']
extinctionSettings = yamlContent['Extinctions']
extinctionType = extinctionSettings['constant or random']
hostExtinctionDistributions = extinctionSettings['host']
#log.debug('hostExtinctionDistributions %s' % (hostExtinctionDistributions,))
galacticExtinctionDistribution = extinctionSettings['galactic']
#log.debug('galacticExtinctionDistribution %s' % (galacticExtinctionDistribution,))
surveyCadenceSettings = yamlContent['Survey Cadence']
#log.debug('surveyCadenceSettings %s' % (surveyCadenceSettings,))
snLightCurves = yamlContent['Lightcurves']
#log.debug('snlightCurves %s' % (snlightCurves,))
surveyArea = yamlContent['Sky Area of the Survey (square degrees)']
extraSurveyConstraints = yamlContent['Extra Survey Constraints']
weatherLossFraction = surveyCadenceSettings['Fraction of Year Lost to Weather etc']
observableFraction = surveyCadenceSettings['Observable Fraction of Year']
extinctionConstant = extinctionSettings['constant E(b-v)']
CCSNRateFraction = yamlContent['CCSN Progenitor Population Fraction of IMF']
transientToCCSNRateFraction = yamlContent['Transient to CCSN Ratio']
restFrameFilter = yamlContent['Rest Frame Filter for K-corrections']
peakMagLimit = extraSurveyConstraints['Faint-Limit of Peak Magnitude']
campaignLengthLimit = extraSurveyConstraints['Observable for at least ? number of days']
# CALCULATE THE SURVEY VOLUME
c = converter(log=log)
dists = c.redshift_to_distance(z=float(upperRedshiftLimit), WM=0.3, WV=0.7, H0=70.0)
dl_mpc = dists['dl_mpc']
sphereOuter = 4.0 / 3.0 * math.pi * dl_mpc ** 3
if float(lowerRedshiftLimit) == 0.0:
surveyVolume = sphereOuter # depends on [control=['if'], data=[]]
else:
dists = c.redshift_to_distance(z=float(lowerRedshiftLimit), WM=0.3, WV=0.7, H0=70.0)
dl_mpc = dists['dl_mpc']
sphereInner = 4.0 / 3.0 * math.pi * dl_mpc ** 3
surveyVolume = sphereOuter - sphereInner
# NOW SCALE TO SKY-AREA
surveyVolume = surveyVolume * surveyArea / 41253
surveyVolume = surveyVolume / 1000.0 ** 3
if surveyVolume < 1:
surveyVolume = '%(surveyVolume)0.4f' % locals() # depends on [control=['if'], data=['surveyVolume']]
elif surveyVolume < 100:
surveyVolume = '%(surveyVolume)0.2f' % locals() # depends on [control=['if'], data=['surveyVolume']]
else:
surveyVolume = '%(surveyVolume)0.1f' % locals()
# CALCULATE OVERALL DETECTION FRACTIONS
(discoveryFraction, tooFaintFraction, shortCampaignFraction) = calculate_fraction_of_sn_discovered(log, surveyCadenceSettings, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, lowerRedshiftLimit, upperRedshiftLimit)
discoveryFraction = discoveryFraction * 100.0
if discoveryFraction < 1:
discoveryFraction = '%(discoveryFraction)0.4f' % locals() # depends on [control=['if'], data=['discoveryFraction']]
elif discoveryFraction < 10:
discoveryFraction = '%(discoveryFraction)0.2f' % locals() # depends on [control=['if'], data=['discoveryFraction']]
else:
discoveryFraction = '%(discoveryFraction)0.1f' % locals()
# log.info('yamlContent %s' % (yamlContent,))
stream.close()
settings_log = '\n# SN Survey Simulation Results - %s\n\nThe *%s*-band liming magnitudes of this simulated survey are:\n\n| Filter | Magnitude |\n|:---|:----|\n' % (now, restFrameFilter)
for (k, v) in limitingMags.iteritems():
settings_log += '| %s | %s |\n' % (k, v) # depends on [control=['for'], data=[]]
settings_log += "\n\nA total of **%s** transients where simulated in the survey, within a **redshift-range of %s-%s** and **survey footprint of %s deg<sup>2</sup>** (a **total volume of %s Gpc<sup>-3</sup> yr<sup>-1</sup>**). **%s%% of these simulated transients had solid 'discoveries'**. A constant galactic extinction of `E(B-V) = %s` is used, it's assumed that any given field in the sky is visible for %s of the survey year and the typical fraction of survey time lost to weather of %s is accounted for. Here are the relative rates and peak magnitude distributions of the SN used in the survey:\n\n| SN Type | Relative Rate | Peak Magnitude | Sigma Peak |\n|:---|:---|:---|:---|\n" % (sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, surveyArea, surveyVolume, discoveryFraction, extinctionConstant, observableFraction, weatherLossFraction)
for (k, v) in peakMagnitudeDistributions['magnitude'].iteritems():
settings_log += '| %s | %s | %s | %s |\n' % (k, relativeSNRates[k], v, peakMagnitudeDistributions['sigma'][k]) # depends on [control=['for'], data=[]]
settings_log += '\n\nIf a transient is detected by the simulated survey, extra constraints are placed upon the detected object to secure positive identification as the object.\n\n1. The peak apparent magnitude of the object must be brighter than **%s mag**\n2. The object must be detectable for long enough to complete a follow-up campaign of longer than **%s days** within 1 survey year with any single filter.\n\nThe transient rate for the survey volume is estimated by assuming a rate of **%s** times that of the CCSN rate (itself a fraction of **%s** of the total SFR).\n\n' % (peakMagLimit, campaignLengthLimit, transientToCCSNRateFraction, CCSNRateFraction)
return settings_log |
def set_binary_path(name):
'''
Sets the path, where the syslog-ng binary can be found. This function is
intended to be used from states.
If syslog-ng is installed via a package manager, users don't need to use
this function.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_binary_path name=/usr/sbin
'''
global __SYSLOG_NG_BINARY_PATH
old = __SYSLOG_NG_BINARY_PATH
__SYSLOG_NG_BINARY_PATH = name
changes = _format_changes(old, name)
return _format_state_result(name, result=True, changes=changes) | def function[set_binary_path, parameter[name]]:
constant[
Sets the path, where the syslog-ng binary can be found. This function is
intended to be used from states.
If syslog-ng is installed via a package manager, users don't need to use
this function.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_binary_path name=/usr/sbin
]
<ast.Global object at 0x7da1b1c35900>
variable[old] assign[=] name[__SYSLOG_NG_BINARY_PATH]
variable[__SYSLOG_NG_BINARY_PATH] assign[=] name[name]
variable[changes] assign[=] call[name[_format_changes], parameter[name[old], name[name]]]
return[call[name[_format_state_result], parameter[name[name]]]] | keyword[def] identifier[set_binary_path] ( identifier[name] ):
literal[string]
keyword[global] identifier[__SYSLOG_NG_BINARY_PATH]
identifier[old] = identifier[__SYSLOG_NG_BINARY_PATH]
identifier[__SYSLOG_NG_BINARY_PATH] = identifier[name]
identifier[changes] = identifier[_format_changes] ( identifier[old] , identifier[name] )
keyword[return] identifier[_format_state_result] ( identifier[name] , identifier[result] = keyword[True] , identifier[changes] = identifier[changes] ) | def set_binary_path(name):
"""
Sets the path, where the syslog-ng binary can be found. This function is
intended to be used from states.
If syslog-ng is installed via a package manager, users don't need to use
this function.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_binary_path name=/usr/sbin
"""
global __SYSLOG_NG_BINARY_PATH
old = __SYSLOG_NG_BINARY_PATH
__SYSLOG_NG_BINARY_PATH = name
changes = _format_changes(old, name)
return _format_state_result(name, result=True, changes=changes) |
def save_current_figure_as(self):
"""Save the currently selected figure."""
if self.current_thumbnail is not None:
self.save_figure_as(self.current_thumbnail.canvas.fig,
self.current_thumbnail.canvas.fmt) | def function[save_current_figure_as, parameter[self]]:
constant[Save the currently selected figure.]
if compare[name[self].current_thumbnail is_not constant[None]] begin[:]
call[name[self].save_figure_as, parameter[name[self].current_thumbnail.canvas.fig, name[self].current_thumbnail.canvas.fmt]] | keyword[def] identifier[save_current_figure_as] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[current_thumbnail] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[save_figure_as] ( identifier[self] . identifier[current_thumbnail] . identifier[canvas] . identifier[fig] ,
identifier[self] . identifier[current_thumbnail] . identifier[canvas] . identifier[fmt] ) | def save_current_figure_as(self):
"""Save the currently selected figure."""
if self.current_thumbnail is not None:
self.save_figure_as(self.current_thumbnail.canvas.fig, self.current_thumbnail.canvas.fmt) # depends on [control=['if'], data=[]] |
def lineage(self):
"""
Return all nodes between this node and the root, including this one.
"""
if not self.parent:
return [self]
else:
L = self.parent.lineage()
L.append(self)
return L | def function[lineage, parameter[self]]:
constant[
Return all nodes between this node and the root, including this one.
]
if <ast.UnaryOp object at 0x7da1b198eda0> begin[:]
return[list[[<ast.Name object at 0x7da1b198ca90>]]] | keyword[def] identifier[lineage] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[parent] :
keyword[return] [ identifier[self] ]
keyword[else] :
identifier[L] = identifier[self] . identifier[parent] . identifier[lineage] ()
identifier[L] . identifier[append] ( identifier[self] )
keyword[return] identifier[L] | def lineage(self):
"""
Return all nodes between this node and the root, including this one.
"""
if not self.parent:
return [self] # depends on [control=['if'], data=[]]
else:
L = self.parent.lineage()
L.append(self)
return L |
def get_dev_and_path(filename):
"""Determines if a given file is located locally or remotely. We assume
that any directories from the pyboard take precedence over local
directories of the same name. /flash and /sdcard are associated with
the default device. /dev_name/path where dev_name is the name of a
given device is also considered to be associated with the named device.
If the file is associated with a remote device, then this function
returns a tuple (dev, dev_filename) where dev is the device and
dev_filename is the portion of the filename relative to the device.
If the file is not associated with the remote device, then the dev
portion of the returned tuple will be None.
"""
if DEFAULT_DEV:
if DEFAULT_DEV.is_root_path(filename):
return (DEFAULT_DEV, filename)
test_filename = filename + '/'
with DEV_LOCK:
for dev in DEVS:
if test_filename.startswith(dev.name_path):
dev_filename = filename[len(dev.name_path)-1:]
if dev_filename == '':
dev_filename = '/'
return (dev, dev_filename)
return (None, filename) | def function[get_dev_and_path, parameter[filename]]:
constant[Determines if a given file is located locally or remotely. We assume
that any directories from the pyboard take precedence over local
directories of the same name. /flash and /sdcard are associated with
the default device. /dev_name/path where dev_name is the name of a
given device is also considered to be associated with the named device.
If the file is associated with a remote device, then this function
returns a tuple (dev, dev_filename) where dev is the device and
dev_filename is the portion of the filename relative to the device.
If the file is not associated with the remote device, then the dev
portion of the returned tuple will be None.
]
if name[DEFAULT_DEV] begin[:]
if call[name[DEFAULT_DEV].is_root_path, parameter[name[filename]]] begin[:]
return[tuple[[<ast.Name object at 0x7da204962380>, <ast.Name object at 0x7da204962860>]]]
variable[test_filename] assign[=] binary_operation[name[filename] + constant[/]]
with name[DEV_LOCK] begin[:]
for taget[name[dev]] in starred[name[DEVS]] begin[:]
if call[name[test_filename].startswith, parameter[name[dev].name_path]] begin[:]
variable[dev_filename] assign[=] call[name[filename]][<ast.Slice object at 0x7da204963ee0>]
if compare[name[dev_filename] equal[==] constant[]] begin[:]
variable[dev_filename] assign[=] constant[/]
return[tuple[[<ast.Name object at 0x7da204961ba0>, <ast.Name object at 0x7da2049620e0>]]]
return[tuple[[<ast.Constant object at 0x7da204961f00>, <ast.Name object at 0x7da2047e8cd0>]]] | keyword[def] identifier[get_dev_and_path] ( identifier[filename] ):
literal[string]
keyword[if] identifier[DEFAULT_DEV] :
keyword[if] identifier[DEFAULT_DEV] . identifier[is_root_path] ( identifier[filename] ):
keyword[return] ( identifier[DEFAULT_DEV] , identifier[filename] )
identifier[test_filename] = identifier[filename] + literal[string]
keyword[with] identifier[DEV_LOCK] :
keyword[for] identifier[dev] keyword[in] identifier[DEVS] :
keyword[if] identifier[test_filename] . identifier[startswith] ( identifier[dev] . identifier[name_path] ):
identifier[dev_filename] = identifier[filename] [ identifier[len] ( identifier[dev] . identifier[name_path] )- literal[int] :]
keyword[if] identifier[dev_filename] == literal[string] :
identifier[dev_filename] = literal[string]
keyword[return] ( identifier[dev] , identifier[dev_filename] )
keyword[return] ( keyword[None] , identifier[filename] ) | def get_dev_and_path(filename):
"""Determines if a given file is located locally or remotely. We assume
that any directories from the pyboard take precedence over local
directories of the same name. /flash and /sdcard are associated with
the default device. /dev_name/path where dev_name is the name of a
given device is also considered to be associated with the named device.
If the file is associated with a remote device, then this function
returns a tuple (dev, dev_filename) where dev is the device and
dev_filename is the portion of the filename relative to the device.
If the file is not associated with the remote device, then the dev
portion of the returned tuple will be None.
"""
if DEFAULT_DEV:
if DEFAULT_DEV.is_root_path(filename):
return (DEFAULT_DEV, filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
test_filename = filename + '/'
with DEV_LOCK:
for dev in DEVS:
if test_filename.startswith(dev.name_path):
dev_filename = filename[len(dev.name_path) - 1:]
if dev_filename == '':
dev_filename = '/' # depends on [control=['if'], data=['dev_filename']]
return (dev, dev_filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dev']] # depends on [control=['with'], data=[]]
return (None, filename) |
def simplify(self):
"""Return a new equivalent unit object with a simplified unit expression
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
"""
expr = self.expr
self.expr = _cancel_mul(expr, self.registry)
return self | def function[simplify, parameter[self]]:
constant[Return a new equivalent unit object with a simplified unit expression
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
]
variable[expr] assign[=] name[self].expr
name[self].expr assign[=] call[name[_cancel_mul], parameter[name[expr], name[self].registry]]
return[name[self]] | keyword[def] identifier[simplify] ( identifier[self] ):
literal[string]
identifier[expr] = identifier[self] . identifier[expr]
identifier[self] . identifier[expr] = identifier[_cancel_mul] ( identifier[expr] , identifier[self] . identifier[registry] )
keyword[return] identifier[self] | def simplify(self):
"""Return a new equivalent unit object with a simplified unit expression
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
"""
expr = self.expr
self.expr = _cancel_mul(expr, self.registry)
return self |
def handle_data(self, content):
'''
Sometimes the weather data is set under an attribute of the "window"
DOM object. Sometimes it appears as part of a javascript function.
Catch either possibility.
'''
if self.weather_data is not None:
# We've already found weather data, no need to continue parsing
return
content = content.strip().rstrip(';')
try:
tag_text = self.get_starttag_text().lower()
except AttributeError:
tag_text = ''
if tag_text.startswith('<script'):
# Look for feed information embedded as a javascript variable
begin = content.find('window.__data')
if begin != -1:
self.logger.debug('Located window.__data')
# Look for end of JSON dict and end of javascript statement
end = content.find('};', begin)
if end == -1:
self.logger.debug('Failed to locate end of javascript statement')
else:
# Strip the "window.__data=" from the beginning
json_data = self.load_json(
content[begin:end + 1].split('=', 1)[1].lstrip()
)
if json_data is not None:
def _find_weather_data(data):
'''
Helper designed to minimize impact of potential
structural changes to this data.
'''
if isinstance(data, dict):
if 'Observation' in data and 'DailyForecast' in data:
return data
else:
for key in data:
ret = _find_weather_data(data[key])
if ret is not None:
return ret
return None
weather_data = _find_weather_data(json_data)
if weather_data is None:
self.logger.debug(
'Failed to locate weather data in the '
'following data structure: %s', json_data
)
else:
self.weather_data = weather_data
return
for line in content.splitlines():
line = line.strip().rstrip(';')
if line.startswith('var adaptorParams'):
# Strip off the "var adaptorParams = " from the beginning,
# and the javascript semicolon from the end. This will give
# us JSON that we can load.
weather_data = self.load_json(line.split('=', 1)[1].lstrip())
if weather_data is not None:
self.weather_data = weather_data
return | def function[handle_data, parameter[self, content]]:
constant[
Sometimes the weather data is set under an attribute of the "window"
DOM object. Sometimes it appears as part of a javascript function.
Catch either possibility.
]
if compare[name[self].weather_data is_not constant[None]] begin[:]
return[None]
variable[content] assign[=] call[call[name[content].strip, parameter[]].rstrip, parameter[constant[;]]]
<ast.Try object at 0x7da18ede6410>
if call[name[tag_text].startswith, parameter[constant[<script]]] begin[:]
variable[begin] assign[=] call[name[content].find, parameter[constant[window.__data]]]
if compare[name[begin] not_equal[!=] <ast.UnaryOp object at 0x7da18ede5030>] begin[:]
call[name[self].logger.debug, parameter[constant[Located window.__data]]]
variable[end] assign[=] call[name[content].find, parameter[constant[};], name[begin]]]
if compare[name[end] equal[==] <ast.UnaryOp object at 0x7da18ede75b0>] begin[:]
call[name[self].logger.debug, parameter[constant[Failed to locate end of javascript statement]]]
for taget[name[line]] in starred[call[name[content].splitlines, parameter[]]] begin[:]
variable[line] assign[=] call[call[name[line].strip, parameter[]].rstrip, parameter[constant[;]]]
if call[name[line].startswith, parameter[constant[var adaptorParams]]] begin[:]
variable[weather_data] assign[=] call[name[self].load_json, parameter[call[call[call[name[line].split, parameter[constant[=], constant[1]]]][constant[1]].lstrip, parameter[]]]]
if compare[name[weather_data] is_not constant[None]] begin[:]
name[self].weather_data assign[=] name[weather_data]
return[None] | keyword[def] identifier[handle_data] ( identifier[self] , identifier[content] ):
literal[string]
keyword[if] identifier[self] . identifier[weather_data] keyword[is] keyword[not] keyword[None] :
keyword[return]
identifier[content] = identifier[content] . identifier[strip] (). identifier[rstrip] ( literal[string] )
keyword[try] :
identifier[tag_text] = identifier[self] . identifier[get_starttag_text] (). identifier[lower] ()
keyword[except] identifier[AttributeError] :
identifier[tag_text] = literal[string]
keyword[if] identifier[tag_text] . identifier[startswith] ( literal[string] ):
identifier[begin] = identifier[content] . identifier[find] ( literal[string] )
keyword[if] identifier[begin] !=- literal[int] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[end] = identifier[content] . identifier[find] ( literal[string] , identifier[begin] )
keyword[if] identifier[end] ==- literal[int] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[json_data] = identifier[self] . identifier[load_json] (
identifier[content] [ identifier[begin] : identifier[end] + literal[int] ]. identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[lstrip] ()
)
keyword[if] identifier[json_data] keyword[is] keyword[not] keyword[None] :
keyword[def] identifier[_find_weather_data] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[if] literal[string] keyword[in] identifier[data] keyword[and] literal[string] keyword[in] identifier[data] :
keyword[return] identifier[data]
keyword[else] :
keyword[for] identifier[key] keyword[in] identifier[data] :
identifier[ret] = identifier[_find_weather_data] ( identifier[data] [ identifier[key] ])
keyword[if] identifier[ret] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ret]
keyword[return] keyword[None]
identifier[weather_data] = identifier[_find_weather_data] ( identifier[json_data] )
keyword[if] identifier[weather_data] keyword[is] keyword[None] :
identifier[self] . identifier[logger] . identifier[debug] (
literal[string]
literal[string] , identifier[json_data]
)
keyword[else] :
identifier[self] . identifier[weather_data] = identifier[weather_data]
keyword[return]
keyword[for] identifier[line] keyword[in] identifier[content] . identifier[splitlines] ():
identifier[line] = identifier[line] . identifier[strip] (). identifier[rstrip] ( literal[string] )
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[weather_data] = identifier[self] . identifier[load_json] ( identifier[line] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[lstrip] ())
keyword[if] identifier[weather_data] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[weather_data] = identifier[weather_data]
keyword[return] | def handle_data(self, content):
"""
Sometimes the weather data is set under an attribute of the "window"
DOM object. Sometimes it appears as part of a javascript function.
Catch either possibility.
"""
if self.weather_data is not None:
# We've already found weather data, no need to continue parsing
return # depends on [control=['if'], data=[]]
content = content.strip().rstrip(';')
try:
tag_text = self.get_starttag_text().lower() # depends on [control=['try'], data=[]]
except AttributeError:
tag_text = '' # depends on [control=['except'], data=[]]
if tag_text.startswith('<script'):
# Look for feed information embedded as a javascript variable
begin = content.find('window.__data')
if begin != -1:
self.logger.debug('Located window.__data')
# Look for end of JSON dict and end of javascript statement
end = content.find('};', begin)
if end == -1:
self.logger.debug('Failed to locate end of javascript statement') # depends on [control=['if'], data=[]]
else:
# Strip the "window.__data=" from the beginning
json_data = self.load_json(content[begin:end + 1].split('=', 1)[1].lstrip())
if json_data is not None:
def _find_weather_data(data):
"""
Helper designed to minimize impact of potential
structural changes to this data.
"""
if isinstance(data, dict):
if 'Observation' in data and 'DailyForecast' in data:
return data # depends on [control=['if'], data=[]]
else:
for key in data:
ret = _find_weather_data(data[key])
if ret is not None:
return ret # depends on [control=['if'], data=['ret']] # depends on [control=['for'], data=['key']]
return None # depends on [control=['if'], data=[]]
weather_data = _find_weather_data(json_data)
if weather_data is None:
self.logger.debug('Failed to locate weather data in the following data structure: %s', json_data) # depends on [control=['if'], data=[]]
else:
self.weather_data = weather_data
return # depends on [control=['if'], data=['json_data']] # depends on [control=['if'], data=['begin']]
for line in content.splitlines():
line = line.strip().rstrip(';')
if line.startswith('var adaptorParams'):
# Strip off the "var adaptorParams = " from the beginning,
# and the javascript semicolon from the end. This will give
# us JSON that we can load.
weather_data = self.load_json(line.split('=', 1)[1].lstrip())
if weather_data is not None:
self.weather_data = weather_data
return # depends on [control=['if'], data=['weather_data']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]] |
def predict_imgs(self, imgs):
'''
takes an image input and predicts on it
this expects an ndarray (heightxwidthxchannels)
this model shouldbe a (Nx224x224x3) numpy array
this method it noce if you want to do preprocessing
then predict results on those preprocessed images
this function expects the image array to be jpg
'''
imgs = preprocess_input(imgs)
return self.model.predict(imgs) | def function[predict_imgs, parameter[self, imgs]]:
constant[
takes an image input and predicts on it
this expects an ndarray (heightxwidthxchannels)
this model shouldbe a (Nx224x224x3) numpy array
this method it noce if you want to do preprocessing
then predict results on those preprocessed images
this function expects the image array to be jpg
]
variable[imgs] assign[=] call[name[preprocess_input], parameter[name[imgs]]]
return[call[name[self].model.predict, parameter[name[imgs]]]] | keyword[def] identifier[predict_imgs] ( identifier[self] , identifier[imgs] ):
literal[string]
identifier[imgs] = identifier[preprocess_input] ( identifier[imgs] )
keyword[return] identifier[self] . identifier[model] . identifier[predict] ( identifier[imgs] ) | def predict_imgs(self, imgs):
"""
takes an image input and predicts on it
this expects an ndarray (heightxwidthxchannels)
this model shouldbe a (Nx224x224x3) numpy array
this method it noce if you want to do preprocessing
then predict results on those preprocessed images
this function expects the image array to be jpg
"""
imgs = preprocess_input(imgs)
return self.model.predict(imgs) |
def get(self, request, provider=None):
"""prepare the social friend model"""
# Get the social auth connections
if USING_ALLAUTH:
self.social_auths = request.user.socialaccount_set.all()
else:
self.social_auths = request.user.social_auth.all()
self.social_friend_lists = []
# if the user did not connect any social accounts, no need to continue
if self.social_auths.count() == 0:
if REDIRECT_IF_NO_ACCOUNT:
return HttpResponseRedirect(REDIRECT_URL)
return super(FriendListView, self).get(request)
# for each social network, get or create social_friend_list
self.social_friend_lists = SocialFriendList.objects.get_or_create_with_social_auths(self.social_auths)
return super(FriendListView, self).get(request) | def function[get, parameter[self, request, provider]]:
constant[prepare the social friend model]
if name[USING_ALLAUTH] begin[:]
name[self].social_auths assign[=] call[name[request].user.socialaccount_set.all, parameter[]]
name[self].social_friend_lists assign[=] list[[]]
if compare[call[name[self].social_auths.count, parameter[]] equal[==] constant[0]] begin[:]
if name[REDIRECT_IF_NO_ACCOUNT] begin[:]
return[call[name[HttpResponseRedirect], parameter[name[REDIRECT_URL]]]]
return[call[call[name[super], parameter[name[FriendListView], name[self]]].get, parameter[name[request]]]]
name[self].social_friend_lists assign[=] call[name[SocialFriendList].objects.get_or_create_with_social_auths, parameter[name[self].social_auths]]
return[call[call[name[super], parameter[name[FriendListView], name[self]]].get, parameter[name[request]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[request] , identifier[provider] = keyword[None] ):
literal[string]
keyword[if] identifier[USING_ALLAUTH] :
identifier[self] . identifier[social_auths] = identifier[request] . identifier[user] . identifier[socialaccount_set] . identifier[all] ()
keyword[else] :
identifier[self] . identifier[social_auths] = identifier[request] . identifier[user] . identifier[social_auth] . identifier[all] ()
identifier[self] . identifier[social_friend_lists] =[]
keyword[if] identifier[self] . identifier[social_auths] . identifier[count] ()== literal[int] :
keyword[if] identifier[REDIRECT_IF_NO_ACCOUNT] :
keyword[return] identifier[HttpResponseRedirect] ( identifier[REDIRECT_URL] )
keyword[return] identifier[super] ( identifier[FriendListView] , identifier[self] ). identifier[get] ( identifier[request] )
identifier[self] . identifier[social_friend_lists] = identifier[SocialFriendList] . identifier[objects] . identifier[get_or_create_with_social_auths] ( identifier[self] . identifier[social_auths] )
keyword[return] identifier[super] ( identifier[FriendListView] , identifier[self] ). identifier[get] ( identifier[request] ) | def get(self, request, provider=None):
"""prepare the social friend model"""
# Get the social auth connections
if USING_ALLAUTH:
self.social_auths = request.user.socialaccount_set.all() # depends on [control=['if'], data=[]]
else:
self.social_auths = request.user.social_auth.all()
self.social_friend_lists = []
# if the user did not connect any social accounts, no need to continue
if self.social_auths.count() == 0:
if REDIRECT_IF_NO_ACCOUNT:
return HttpResponseRedirect(REDIRECT_URL) # depends on [control=['if'], data=[]]
return super(FriendListView, self).get(request) # depends on [control=['if'], data=[]]
# for each social network, get or create social_friend_list
self.social_friend_lists = SocialFriendList.objects.get_or_create_with_social_auths(self.social_auths)
return super(FriendListView, self).get(request) |
def get_parameters(signature, transmute_attrs, arguments_to_ignore=None):
"""given a function, categorize which arguments should be passed by
what types of parameters. The choices are:
* query parameters: passed in as query parameters in the url
* body parameters: retrieved from the request body (includes forms)
* header parameters: retrieved from the request header
* path parameters: retrieved from the uri path
The categorization is performed for an argument "arg" by:
1. examining the transmute parameters attached to the function (e.g.
func.transmute_query_parameters), and checking if "arg" is mentioned. If so,
it is added to the category.
2. If the argument is available in the path, it will be added
as a path parameter.
3. If the method of the function is GET and only GET, then "arg" will be
be added to the expected query parameters. Otherwise, "arg" will be added as
a body parameter.
"""
params = Parameters()
used_keys = set(arguments_to_ignore or [])
# examine what variables are categorized first.
for key in ["query", "header", "path"]:
param_set = getattr(params, key)
explicit_parameters = getattr(transmute_attrs, key + "_parameters")
used_keys |= load_parameters(
param_set, explicit_parameters, signature, transmute_attrs
)
body_parameters = transmute_attrs.body_parameters
if isinstance(body_parameters, str):
name = body_parameters
params.body = Param(
argument_name=name,
description=transmute_attrs.parameter_descriptions.get(name),
arginfo=signature.get_argument(name),
)
used_keys.add(name)
else:
used_keys |= load_parameters(
params.body, transmute_attrs.body_parameters, signature, transmute_attrs
)
# extract the parameters from the paths
for name in _extract_path_parameters_from_paths(transmute_attrs.paths):
params.path[name] = Param(
argument_name=name,
description=transmute_attrs.parameter_descriptions.get(name),
arginfo=signature.get_argument(name),
)
used_keys.add(name)
# check the method type, and decide if the parameters should be extracted
# from query parameters or the body
default_param_key = "query" if transmute_attrs.methods == set(["GET"]) else "body"
default_params = getattr(params, default_param_key)
# parse all positional params
for arginfo in signature:
if arginfo.name in used_keys:
continue
used_keys.add(arginfo.name)
default_params[arginfo.name] = Param(
arginfo.name,
description=transmute_attrs.parameter_descriptions.get(arginfo.name),
arginfo=arginfo,
)
return params | def function[get_parameters, parameter[signature, transmute_attrs, arguments_to_ignore]]:
constant[given a function, categorize which arguments should be passed by
what types of parameters. The choices are:
* query parameters: passed in as query parameters in the url
* body parameters: retrieved from the request body (includes forms)
* header parameters: retrieved from the request header
* path parameters: retrieved from the uri path
The categorization is performed for an argument "arg" by:
1. examining the transmute parameters attached to the function (e.g.
func.transmute_query_parameters), and checking if "arg" is mentioned. If so,
it is added to the category.
2. If the argument is available in the path, it will be added
as a path parameter.
3. If the method of the function is GET and only GET, then "arg" will be
be added to the expected query parameters. Otherwise, "arg" will be added as
a body parameter.
]
variable[params] assign[=] call[name[Parameters], parameter[]]
variable[used_keys] assign[=] call[name[set], parameter[<ast.BoolOp object at 0x7da1b1be6da0>]]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b1be5960>, <ast.Constant object at 0x7da1b1be4610>, <ast.Constant object at 0x7da1b1be6b30>]]] begin[:]
variable[param_set] assign[=] call[name[getattr], parameter[name[params], name[key]]]
variable[explicit_parameters] assign[=] call[name[getattr], parameter[name[transmute_attrs], binary_operation[name[key] + constant[_parameters]]]]
<ast.AugAssign object at 0x7da1b1be4d60>
variable[body_parameters] assign[=] name[transmute_attrs].body_parameters
if call[name[isinstance], parameter[name[body_parameters], name[str]]] begin[:]
variable[name] assign[=] name[body_parameters]
name[params].body assign[=] call[name[Param], parameter[]]
call[name[used_keys].add, parameter[name[name]]]
for taget[name[name]] in starred[call[name[_extract_path_parameters_from_paths], parameter[name[transmute_attrs].paths]]] begin[:]
call[name[params].path][name[name]] assign[=] call[name[Param], parameter[]]
call[name[used_keys].add, parameter[name[name]]]
variable[default_param_key] assign[=] <ast.IfExp object at 0x7da1b1ad1c30>
variable[default_params] assign[=] call[name[getattr], parameter[name[params], name[default_param_key]]]
for taget[name[arginfo]] in starred[name[signature]] begin[:]
if compare[name[arginfo].name in name[used_keys]] begin[:]
continue
call[name[used_keys].add, parameter[name[arginfo].name]]
call[name[default_params]][name[arginfo].name] assign[=] call[name[Param], parameter[name[arginfo].name]]
return[name[params]] | keyword[def] identifier[get_parameters] ( identifier[signature] , identifier[transmute_attrs] , identifier[arguments_to_ignore] = keyword[None] ):
literal[string]
identifier[params] = identifier[Parameters] ()
identifier[used_keys] = identifier[set] ( identifier[arguments_to_ignore] keyword[or] [])
keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[param_set] = identifier[getattr] ( identifier[params] , identifier[key] )
identifier[explicit_parameters] = identifier[getattr] ( identifier[transmute_attrs] , identifier[key] + literal[string] )
identifier[used_keys] |= identifier[load_parameters] (
identifier[param_set] , identifier[explicit_parameters] , identifier[signature] , identifier[transmute_attrs]
)
identifier[body_parameters] = identifier[transmute_attrs] . identifier[body_parameters]
keyword[if] identifier[isinstance] ( identifier[body_parameters] , identifier[str] ):
identifier[name] = identifier[body_parameters]
identifier[params] . identifier[body] = identifier[Param] (
identifier[argument_name] = identifier[name] ,
identifier[description] = identifier[transmute_attrs] . identifier[parameter_descriptions] . identifier[get] ( identifier[name] ),
identifier[arginfo] = identifier[signature] . identifier[get_argument] ( identifier[name] ),
)
identifier[used_keys] . identifier[add] ( identifier[name] )
keyword[else] :
identifier[used_keys] |= identifier[load_parameters] (
identifier[params] . identifier[body] , identifier[transmute_attrs] . identifier[body_parameters] , identifier[signature] , identifier[transmute_attrs]
)
keyword[for] identifier[name] keyword[in] identifier[_extract_path_parameters_from_paths] ( identifier[transmute_attrs] . identifier[paths] ):
identifier[params] . identifier[path] [ identifier[name] ]= identifier[Param] (
identifier[argument_name] = identifier[name] ,
identifier[description] = identifier[transmute_attrs] . identifier[parameter_descriptions] . identifier[get] ( identifier[name] ),
identifier[arginfo] = identifier[signature] . identifier[get_argument] ( identifier[name] ),
)
identifier[used_keys] . identifier[add] ( identifier[name] )
identifier[default_param_key] = literal[string] keyword[if] identifier[transmute_attrs] . identifier[methods] == identifier[set] ([ literal[string] ]) keyword[else] literal[string]
identifier[default_params] = identifier[getattr] ( identifier[params] , identifier[default_param_key] )
keyword[for] identifier[arginfo] keyword[in] identifier[signature] :
keyword[if] identifier[arginfo] . identifier[name] keyword[in] identifier[used_keys] :
keyword[continue]
identifier[used_keys] . identifier[add] ( identifier[arginfo] . identifier[name] )
identifier[default_params] [ identifier[arginfo] . identifier[name] ]= identifier[Param] (
identifier[arginfo] . identifier[name] ,
identifier[description] = identifier[transmute_attrs] . identifier[parameter_descriptions] . identifier[get] ( identifier[arginfo] . identifier[name] ),
identifier[arginfo] = identifier[arginfo] ,
)
keyword[return] identifier[params] | def get_parameters(signature, transmute_attrs, arguments_to_ignore=None):
"""given a function, categorize which arguments should be passed by
what types of parameters. The choices are:
* query parameters: passed in as query parameters in the url
* body parameters: retrieved from the request body (includes forms)
* header parameters: retrieved from the request header
* path parameters: retrieved from the uri path
The categorization is performed for an argument "arg" by:
1. examining the transmute parameters attached to the function (e.g.
func.transmute_query_parameters), and checking if "arg" is mentioned. If so,
it is added to the category.
2. If the argument is available in the path, it will be added
as a path parameter.
3. If the method of the function is GET and only GET, then "arg" will be
be added to the expected query parameters. Otherwise, "arg" will be added as
a body parameter.
"""
params = Parameters()
used_keys = set(arguments_to_ignore or [])
# examine what variables are categorized first.
for key in ['query', 'header', 'path']:
param_set = getattr(params, key)
explicit_parameters = getattr(transmute_attrs, key + '_parameters')
used_keys |= load_parameters(param_set, explicit_parameters, signature, transmute_attrs) # depends on [control=['for'], data=['key']]
body_parameters = transmute_attrs.body_parameters
if isinstance(body_parameters, str):
name = body_parameters
params.body = Param(argument_name=name, description=transmute_attrs.parameter_descriptions.get(name), arginfo=signature.get_argument(name))
used_keys.add(name) # depends on [control=['if'], data=[]]
else:
used_keys |= load_parameters(params.body, transmute_attrs.body_parameters, signature, transmute_attrs)
# extract the parameters from the paths
for name in _extract_path_parameters_from_paths(transmute_attrs.paths):
params.path[name] = Param(argument_name=name, description=transmute_attrs.parameter_descriptions.get(name), arginfo=signature.get_argument(name))
used_keys.add(name) # depends on [control=['for'], data=['name']]
# check the method type, and decide if the parameters should be extracted
# from query parameters or the body
default_param_key = 'query' if transmute_attrs.methods == set(['GET']) else 'body'
default_params = getattr(params, default_param_key)
# parse all positional params
for arginfo in signature:
if arginfo.name in used_keys:
continue # depends on [control=['if'], data=[]]
used_keys.add(arginfo.name)
default_params[arginfo.name] = Param(arginfo.name, description=transmute_attrs.parameter_descriptions.get(arginfo.name), arginfo=arginfo) # depends on [control=['for'], data=['arginfo']]
return params |
def download_urls(self, project, run=None, entity=None):
"""Generate download urls
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
A dict of extensions and urls
{
'weights.h5': { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
'model.json': { "url": "https://model.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }
}
"""
query = gql('''
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
files {
edges {
node {
name
url
md5
updatedAt
}
}
}
}
}
}
''')
query_result = self.gql(query, variable_values={
'name': project, 'run': run or self.settings('run'),
'entity': entity or self.settings('entity')})
files = self._flatten_edges(query_result['model']['bucket']['files'])
return {file['name']: file for file in files if file} | def function[download_urls, parameter[self, project, run, entity]]:
constant[Generate download urls
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
A dict of extensions and urls
{
'weights.h5': { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
'model.json': { "url": "https://model.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }
}
]
variable[query] assign[=] call[name[gql], parameter[constant[
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
files {
edges {
node {
name
url
md5
updatedAt
}
}
}
}
}
}
]]]
variable[query_result] assign[=] call[name[self].gql, parameter[name[query]]]
variable[files] assign[=] call[name[self]._flatten_edges, parameter[call[call[call[name[query_result]][constant[model]]][constant[bucket]]][constant[files]]]]
return[<ast.DictComp object at 0x7da18f00ce80>] | keyword[def] identifier[download_urls] ( identifier[self] , identifier[project] , identifier[run] = keyword[None] , identifier[entity] = keyword[None] ):
literal[string]
identifier[query] = identifier[gql] ( literal[string] )
identifier[query_result] = identifier[self] . identifier[gql] ( identifier[query] , identifier[variable_values] ={
literal[string] : identifier[project] , literal[string] : identifier[run] keyword[or] identifier[self] . identifier[settings] ( literal[string] ),
literal[string] : identifier[entity] keyword[or] identifier[self] . identifier[settings] ( literal[string] )})
identifier[files] = identifier[self] . identifier[_flatten_edges] ( identifier[query_result] [ literal[string] ][ literal[string] ][ literal[string] ])
keyword[return] { identifier[file] [ literal[string] ]: identifier[file] keyword[for] identifier[file] keyword[in] identifier[files] keyword[if] identifier[file] } | def download_urls(self, project, run=None, entity=None):
"""Generate download urls
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
A dict of extensions and urls
{
'weights.h5': { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
'model.json': { "url": "https://model.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }
}
"""
query = gql('\n query Model($name: String!, $entity: String!, $run: String!) {\n model(name: $name, entityName: $entity) {\n bucket(name: $run) {\n files {\n edges {\n node {\n name\n url\n md5\n updatedAt\n }\n }\n }\n }\n }\n }\n ')
query_result = self.gql(query, variable_values={'name': project, 'run': run or self.settings('run'), 'entity': entity or self.settings('entity')})
files = self._flatten_edges(query_result['model']['bucket']['files'])
return {file['name']: file for file in files if file} |
def register(self, name):
"""Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions.
"""
def decorator(func):
"""Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
"""
self.logic[name] = func
@wraps(func)
def wrapper():
"""Wrapper, not used directly."""
raise RuntimeError('working outside of request context')
return wrapper
return decorator | def function[register, parameter[self, name]]:
constant[Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions.
]
def function[decorator, parameter[func]]:
constant[Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
]
call[name[self].logic][name[name]] assign[=] name[func]
def function[wrapper, parameter[]]:
constant[Wrapper, not used directly.]
<ast.Raise object at 0x7da1b1588c40>
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[register] ( identifier[self] , identifier[name] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
literal[string]
identifier[self] . identifier[logic] [ identifier[name] ]= identifier[func]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ():
literal[string]
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def register(self, name):
"""Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions.
"""
def decorator(func):
"""Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
"""
self.logic[name] = func
@wraps(func)
def wrapper():
"""Wrapper, not used directly."""
raise RuntimeError('working outside of request context')
return wrapper
return decorator |
def RgbToHtml(r, g, b):
'''Convert the color from (r, g, b) to #RRGGBB.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A CSS string representation of this color (#RRGGBB).
>>> Color.RgbToHtml(1, 0.5, 0)
'#ff8000'
'''
return '#%02x%02x%02x' % tuple((min(round(v*255), 255) for v in (r, g, b))) | def function[RgbToHtml, parameter[r, g, b]]:
constant[Convert the color from (r, g, b) to #RRGGBB.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A CSS string representation of this color (#RRGGBB).
>>> Color.RgbToHtml(1, 0.5, 0)
'#ff8000'
]
return[binary_operation[constant[#%02x%02x%02x] <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20c6c5b70>]]]] | keyword[def] identifier[RgbToHtml] ( identifier[r] , identifier[g] , identifier[b] ):
literal[string]
keyword[return] literal[string] % identifier[tuple] (( identifier[min] ( identifier[round] ( identifier[v] * literal[int] ), literal[int] ) keyword[for] identifier[v] keyword[in] ( identifier[r] , identifier[g] , identifier[b] ))) | def RgbToHtml(r, g, b):
"""Convert the color from (r, g, b) to #RRGGBB.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A CSS string representation of this color (#RRGGBB).
>>> Color.RgbToHtml(1, 0.5, 0)
'#ff8000'
"""
return '#%02x%02x%02x' % tuple((min(round(v * 255), 255) for v in (r, g, b))) |
def _set_persistent_boot(self, values=[]):
"""Configures a boot from a specific device."""
xml = self._create_dynamic_xml(
'SET_PERSISTENT_BOOT', 'SERVER_INFO', 'write')
if six.PY2:
child_iterator = xml.getiterator()
else:
child_iterator = xml.iter()
for child in child_iterator:
for val in values:
if child.tag == 'SET_PERSISTENT_BOOT':
etree.SubElement(child, 'DEVICE', VALUE=val)
d = self._request_ilo(xml)
data = self._parse_output(d)
return data | def function[_set_persistent_boot, parameter[self, values]]:
constant[Configures a boot from a specific device.]
variable[xml] assign[=] call[name[self]._create_dynamic_xml, parameter[constant[SET_PERSISTENT_BOOT], constant[SERVER_INFO], constant[write]]]
if name[six].PY2 begin[:]
variable[child_iterator] assign[=] call[name[xml].getiterator, parameter[]]
for taget[name[child]] in starred[name[child_iterator]] begin[:]
for taget[name[val]] in starred[name[values]] begin[:]
if compare[name[child].tag equal[==] constant[SET_PERSISTENT_BOOT]] begin[:]
call[name[etree].SubElement, parameter[name[child], constant[DEVICE]]]
variable[d] assign[=] call[name[self]._request_ilo, parameter[name[xml]]]
variable[data] assign[=] call[name[self]._parse_output, parameter[name[d]]]
return[name[data]] | keyword[def] identifier[_set_persistent_boot] ( identifier[self] , identifier[values] =[]):
literal[string]
identifier[xml] = identifier[self] . identifier[_create_dynamic_xml] (
literal[string] , literal[string] , literal[string] )
keyword[if] identifier[six] . identifier[PY2] :
identifier[child_iterator] = identifier[xml] . identifier[getiterator] ()
keyword[else] :
identifier[child_iterator] = identifier[xml] . identifier[iter] ()
keyword[for] identifier[child] keyword[in] identifier[child_iterator] :
keyword[for] identifier[val] keyword[in] identifier[values] :
keyword[if] identifier[child] . identifier[tag] == literal[string] :
identifier[etree] . identifier[SubElement] ( identifier[child] , literal[string] , identifier[VALUE] = identifier[val] )
identifier[d] = identifier[self] . identifier[_request_ilo] ( identifier[xml] )
identifier[data] = identifier[self] . identifier[_parse_output] ( identifier[d] )
keyword[return] identifier[data] | def _set_persistent_boot(self, values=[]):
"""Configures a boot from a specific device."""
xml = self._create_dynamic_xml('SET_PERSISTENT_BOOT', 'SERVER_INFO', 'write')
if six.PY2:
child_iterator = xml.getiterator() # depends on [control=['if'], data=[]]
else:
child_iterator = xml.iter()
for child in child_iterator:
for val in values:
if child.tag == 'SET_PERSISTENT_BOOT':
etree.SubElement(child, 'DEVICE', VALUE=val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['val']] # depends on [control=['for'], data=['child']]
d = self._request_ilo(xml)
data = self._parse_output(d)
return data |
def onMessageDelivered(
self,
msg_ids=None,
delivered_for=None,
thread_id=None,
thread_type=ThreadType.USER,
ts=None,
metadata=None,
msg=None,
):
"""
Called when the client is listening, and somebody marks messages as delivered
:param msg_ids: The messages that are marked as delivered
:param delivered_for: The person that marked the messages as delivered
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
"""
log.info(
"Messages {} delivered to {} in {} ({}) at {}s".format(
msg_ids, delivered_for, thread_id, thread_type.name, ts / 1000
)
) | def function[onMessageDelivered, parameter[self, msg_ids, delivered_for, thread_id, thread_type, ts, metadata, msg]]:
constant[
Called when the client is listening, and somebody marks messages as delivered
:param msg_ids: The messages that are marked as delivered
:param delivered_for: The person that marked the messages as delivered
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
]
call[name[log].info, parameter[call[constant[Messages {} delivered to {} in {} ({}) at {}s].format, parameter[name[msg_ids], name[delivered_for], name[thread_id], name[thread_type].name, binary_operation[name[ts] / constant[1000]]]]]] | keyword[def] identifier[onMessageDelivered] (
identifier[self] ,
identifier[msg_ids] = keyword[None] ,
identifier[delivered_for] = keyword[None] ,
identifier[thread_id] = keyword[None] ,
identifier[thread_type] = identifier[ThreadType] . identifier[USER] ,
identifier[ts] = keyword[None] ,
identifier[metadata] = keyword[None] ,
identifier[msg] = keyword[None] ,
):
literal[string]
identifier[log] . identifier[info] (
literal[string] . identifier[format] (
identifier[msg_ids] , identifier[delivered_for] , identifier[thread_id] , identifier[thread_type] . identifier[name] , identifier[ts] / literal[int]
)
) | def onMessageDelivered(self, msg_ids=None, delivered_for=None, thread_id=None, thread_type=ThreadType.USER, ts=None, metadata=None, msg=None):
"""
Called when the client is listening, and somebody marks messages as delivered
:param msg_ids: The messages that are marked as delivered
:param delivered_for: The person that marked the messages as delivered
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
"""
log.info('Messages {} delivered to {} in {} ({}) at {}s'.format(msg_ids, delivered_for, thread_id, thread_type.name, ts / 1000)) |
def twolsp_checkplot_png(lspinfo1,
lspinfo2,
times,
mags,
errs,
varepoch='min',
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
sigclip=4.0,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=(-0.8,0.8),
unphasedms=2.0,
phasems=2.0,
phasebinms=4.0,
xliminsetmode=False,
bestperiodhighlight=None,
plotdpi=100,
outfile=None,
verbose=True):
'''This makes a checkplot using results from two independent period-finders.
Adapted from Luke Bouma's implementation of a similar function in his
work. This makes a special checkplot that uses two lspinfo dictionaries,
from two independent period-finding methods. For EBs, it's probably best to
use Stellingwerf PDM or Schwarzenberg-Czerny AoV as one of these, and the
Box Least-squared Search method as the other one.
The checkplot layout in this case is::
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
- pgram1 is the plot for the periodogram in the lspinfo1 dict
- pgram1 P1, P2, and P3 are the best three periods from lspinfo1
- pgram2 is the plot for the periodogram in the lspinfo2 dict
- pgram2 P1, P2, and P3 are the best three periods from lspinfo2
Note that we take the output file name from lspinfo1 if lspinfo1 is a string
filename pointing to a (gzipped) pickle containing the results dict from a
period-finding routine similar to those in periodbase.
Parameters
----------
lspinfo1,lspinfo2 : dict or str
If this is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 3 elements each,
e.g. describing the three 'best' (highest power) peaks in the
periodogram.
If lspinfo is a str, then it must be a path to a pickle file (ending
with the extension '.pkl' or '.pkl.gz') that contains a dict of the form
described above.
times,mags,errs : np.array
The mag/flux time-series arrays to process along with associated errors.
varepoch : 'min' or float or None or list of lists
This sets the time of minimum light finding strategy for the checkplot::
the epoch used for all phased
If `varepoch` is None -> light curve plots will be
`min(times)`.
If `varepoch='min'` -> automatic epoch finding for all
periods using light curve fits.
If varepoch is a single float -> this epoch will be used for all
phased light curve plots
If varepoch is a list of floats each epoch will be applied to
with length = `len(nbestperiods)` -> the phased light curve for each
from period-finder results period specifically
If you use a list for varepoch, it must be of length
`len(lspinfo['nbestperiods'])`.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately/
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplot PNG image.The `objectinfo` dict must be of the form and
contain at least the keys described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG.
- SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'
- 2MASS mag keys: 'jmag', 'hmag', 'kmag'
- Cousins mag keys: 'bmag', 'vmag'
- GAIA specific keys: 'gmag', 'teff'
- proper motion keys: 'pmra', 'pmdecl'
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
findercachedir : str
The directory where the FITS finder images are downloaded and cached.
normto : {'globalmedian', 'zero'} or a float
This sets the LC normalization target::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
phasewrap : bool
If this is True, the phased time-series will be wrapped around phase
0.0.
phasesort : bool
If this is True, the phased time-series will be sorted in phase.
phasebin : float or None
If this is provided, indicates the bin size to use to group together
measurements closer than this amount in phase. This is in units of
phase. The binned phased light curve will be overplotted on top of the
phased light curve. Useful for when one has many measurement points and
needs to pick out a small trend in an otherwise noisy phased light
curve.
minbinelems : int
The minimum number of elements in each phase bin.
plotxlim : sequence of two floats or None
The x-axis limits to use when making the phased light curve plot. By
default, this is (-0.8, 0.8), which places phase 0.0 at the center of
the plot and covers approximately two cycles in phase to make any trends
clear.
unphasedms : float
The marker size to use for the main unphased light curve plot symbols.
phasems : float
The marker size to use for the main phased light curve plot symbols.
phasebinms : float
The marker size to use for the binned phased light curve plot symbols.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
outfile : str or None
The file name of the file to save the checkplot to. If this is None,
will write to a file called 'checkplot.png' in the current working
directory.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
verbose : bool
If False, turns off many of the informational messages. Useful for
when an external function is driving lots of `checkplot_png` calls.
Returns
-------
str
The file path to the generated checkplot PNG file.
'''
# generate the plot filename
if not outfile and isinstance(lspinfo1,str):
plotfpath = os.path.join(
os.path.dirname(lspinfo1),
'twolsp-checkplot-%s.png' % (
os.path.basename(lspinfo1),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'twolsp-checkplot.png'
# get the first LSP from a pickle file transparently
if isinstance(lspinfo1,str) and os.path.exists(lspinfo1):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo1)
if '.gz' in lspinfo1:
with gzip.open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
else:
with open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
# get the second LSP from a pickle file transparently
if isinstance(lspinfo2,str) and os.path.exists(lspinfo2):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo2)
if '.gz' in lspinfo2:
with gzip.open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
else:
with open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo1 and 'periods' in lspinfo2 and
'lspvals' in lspinfo1 and 'lspvals' in lspinfo2 and
'bestperiod' in lspinfo1 and 'bestperiod' in lspinfo2):
bestperiod1 = lspinfo1['bestperiod']
nbestperiods1 = lspinfo1['nbestperiods']
lspmethod1 = lspinfo1['method']
bestperiod2 = lspinfo2['bestperiod']
nbestperiods2 = lspinfo2['nbestperiods']
lspmethod2 = lspinfo2['method']
else:
LOGERROR('could not understand lspinfo1 or lspinfo2 '
'for this object, skipping...')
return None
if (not npisfinite(bestperiod1)) or (not npisfinite(bestperiod2)):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
######################################################################
## PLOT 1 is the LSP from lspinfo1, including objectinfo and finder ##
######################################################################
_make_periodogram(axes[0], lspinfo1, objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
#####################################
## PLOT 2 is the LSP from lspinfo2 ##
#####################################
_make_periodogram(axes[1], lspinfo2, None,
findercmap, finderconvolve)
##########################################
## FIX UP THE MAGS AND REMOVE BAD STUFF ##
##########################################
# sigclip first
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 3 is an unphased LC ##
##############################
_make_magseries_plot(axes[2], stimes, smags, serrs,
magsarefluxes=magsarefluxes,
ms=unphasedms)
# make the plot for each best period
lspbestperiods1 = nbestperiods1[::]
lspbestperiods2 = nbestperiods2[::]
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO1 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods1[:3],
[axes[3], axes[4], axes[5]]):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod1,
lspmethodind=0,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode,
verbose=verbose,
phasems=phasems,
phasebinms=phasebinms)
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO2 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods2[:3],
[axes[6], axes[7], axes[8]]):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod2,
lspmethodind=1,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode,
verbose=verbose,
phasems=phasems,
phasebinms=phasebinms)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath | def function[twolsp_checkplot_png, parameter[lspinfo1, lspinfo2, times, mags, errs, varepoch, magsarefluxes, objectinfo, findercmap, finderconvolve, findercachedir, normto, normmingap, sigclip, phasewrap, phasesort, phasebin, minbinelems, plotxlim, unphasedms, phasems, phasebinms, xliminsetmode, bestperiodhighlight, plotdpi, outfile, verbose]]:
constant[This makes a checkplot using results from two independent period-finders.
Adapted from Luke Bouma's implementation of a similar function in his
work. This makes a special checkplot that uses two lspinfo dictionaries,
from two independent period-finding methods. For EBs, it's probably best to
use Stellingwerf PDM or Schwarzenberg-Czerny AoV as one of these, and the
Box Least-squared Search method as the other one.
The checkplot layout in this case is::
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
- pgram1 is the plot for the periodogram in the lspinfo1 dict
- pgram1 P1, P2, and P3 are the best three periods from lspinfo1
- pgram2 is the plot for the periodogram in the lspinfo2 dict
- pgram2 P1, P2, and P3 are the best three periods from lspinfo2
Note that we take the output file name from lspinfo1 if lspinfo1 is a string
filename pointing to a (gzipped) pickle containing the results dict from a
period-finding routine similar to those in periodbase.
Parameters
----------
lspinfo1,lspinfo2 : dict or str
If this is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 3 elements each,
e.g. describing the three 'best' (highest power) peaks in the
periodogram.
If lspinfo is a str, then it must be a path to a pickle file (ending
with the extension '.pkl' or '.pkl.gz') that contains a dict of the form
described above.
times,mags,errs : np.array
The mag/flux time-series arrays to process along with associated errors.
varepoch : 'min' or float or None or list of lists
This sets the time of minimum light finding strategy for the checkplot::
the epoch used for all phased
If `varepoch` is None -> light curve plots will be
`min(times)`.
If `varepoch='min'` -> automatic epoch finding for all
periods using light curve fits.
If varepoch is a single float -> this epoch will be used for all
phased light curve plots
If varepoch is a list of floats each epoch will be applied to
with length = `len(nbestperiods)` -> the phased light curve for each
from period-finder results period specifically
If you use a list for varepoch, it must be of length
`len(lspinfo['nbestperiods'])`.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately/
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplot PNG image.The `objectinfo` dict must be of the form and
contain at least the keys described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG.
- SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'
- 2MASS mag keys: 'jmag', 'hmag', 'kmag'
- Cousins mag keys: 'bmag', 'vmag'
- GAIA specific keys: 'gmag', 'teff'
- proper motion keys: 'pmra', 'pmdecl'
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
findercachedir : str
The directory where the FITS finder images are downloaded and cached.
normto : {'globalmedian', 'zero'} or a float
This sets the LC normalization target::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
phasewrap : bool
If this is True, the phased time-series will be wrapped around phase
0.0.
phasesort : bool
If this is True, the phased time-series will be sorted in phase.
phasebin : float or None
If this is provided, indicates the bin size to use to group together
measurements closer than this amount in phase. This is in units of
phase. The binned phased light curve will be overplotted on top of the
phased light curve. Useful for when one has many measurement points and
needs to pick out a small trend in an otherwise noisy phased light
curve.
minbinelems : int
The minimum number of elements in each phase bin.
plotxlim : sequence of two floats or None
The x-axis limits to use when making the phased light curve plot. By
default, this is (-0.8, 0.8), which places phase 0.0 at the center of
the plot and covers approximately two cycles in phase to make any trends
clear.
unphasedms : float
The marker size to use for the main unphased light curve plot symbols.
phasems : float
The marker size to use for the main phased light curve plot symbols.
phasebinms : float
The marker size to use for the binned phased light curve plot symbols.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
outfile : str or None
The file name of the file to save the checkplot to. If this is None,
will write to a file called 'checkplot.png' in the current working
directory.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
verbose : bool
If False, turns off many of the informational messages. Useful for
when an external function is driving lots of `checkplot_png` calls.
Returns
-------
str
The file path to the generated checkplot PNG file.
]
if <ast.BoolOp object at 0x7da1affeb4c0> begin[:]
variable[plotfpath] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[lspinfo1]]], binary_operation[constant[twolsp-checkplot-%s.png] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1affe8f10>]]]]]
if <ast.BoolOp object at 0x7da1affe8c70> begin[:]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[loading LSP info from pickle %s] <ast.Mod object at 0x7da2590d6920> name[lspinfo1]]]]
if compare[constant[.gz] in name[lspinfo1]] begin[:]
with call[name[gzip].open, parameter[name[lspinfo1], constant[rb]]] begin[:]
variable[lspinfo1] assign[=] call[name[pickle].load, parameter[name[infd]]]
if <ast.BoolOp object at 0x7da1affe8280> begin[:]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[loading LSP info from pickle %s] <ast.Mod object at 0x7da2590d6920> name[lspinfo2]]]]
if compare[constant[.gz] in name[lspinfo2]] begin[:]
with call[name[gzip].open, parameter[name[lspinfo2], constant[rb]]] begin[:]
variable[lspinfo2] assign[=] call[name[pickle].load, parameter[name[infd]]]
if <ast.BoolOp object at 0x7da1b00cb490> begin[:]
variable[bestperiod1] assign[=] call[name[lspinfo1]][constant[bestperiod]]
variable[nbestperiods1] assign[=] call[name[lspinfo1]][constant[nbestperiods]]
variable[lspmethod1] assign[=] call[name[lspinfo1]][constant[method]]
variable[bestperiod2] assign[=] call[name[lspinfo2]][constant[bestperiod]]
variable[nbestperiods2] assign[=] call[name[lspinfo2]][constant[nbestperiods]]
variable[lspmethod2] assign[=] call[name[lspinfo2]][constant[method]]
if <ast.BoolOp object at 0x7da1aff4da20> begin[:]
call[name[LOGWARNING], parameter[constant[no best period found for this object, skipping...]]]
return[constant[None]]
<ast.Tuple object at 0x7da1aff4f0d0> assign[=] call[name[plt].subplots, parameter[constant[3], constant[3]]]
variable[axes] assign[=] call[name[npravel], parameter[name[axes]]]
call[name[fig].set_size_inches, parameter[constant[30], constant[24]]]
call[name[_make_periodogram], parameter[call[name[axes]][constant[0]], name[lspinfo1], name[objectinfo], name[findercmap], name[finderconvolve]]]
call[name[_make_periodogram], parameter[call[name[axes]][constant[1]], name[lspinfo2], constant[None], name[findercmap], name[finderconvolve]]]
<ast.Tuple object at 0x7da1aff4d150> assign[=] call[name[sigclip_magseries], parameter[name[times], name[mags], name[errs]]]
if compare[name[normto] is_not constant[False]] begin[:]
<ast.Tuple object at 0x7da1aff42920> assign[=] call[name[normalize_magseries], parameter[name[stimes], name[smags]]]
if compare[call[name[len], parameter[name[stimes]]] greater_or_equal[>=] constant[50]] begin[:]
call[name[_make_magseries_plot], parameter[call[name[axes]][constant[2]], name[stimes], name[smags], name[serrs]]]
variable[lspbestperiods1] assign[=] call[name[nbestperiods1]][<ast.Slice object at 0x7da1aff43880>]
variable[lspbestperiods2] assign[=] call[name[nbestperiods2]][<ast.Slice object at 0x7da1aff43790>]
for taget[tuple[[<ast.Name object at 0x7da1aff43700>, <ast.Name object at 0x7da1aff436d0>, <ast.Name object at 0x7da1aff436a0>]]] in starred[call[name[zip], parameter[list[[<ast.Constant object at 0x7da1aff435e0>, <ast.Constant object at 0x7da1aff435b0>, <ast.Constant object at 0x7da1aff43580>]], call[name[lspbestperiods1]][<ast.Slice object at 0x7da1aff434f0>], list[[<ast.Subscript object at 0x7da1aff43460>, <ast.Subscript object at 0x7da1aff433d0>, <ast.Subscript object at 0x7da1aff43340>]]]]] begin[:]
if <ast.BoolOp object at 0x7da1aff43280> begin[:]
if compare[name[MPLVERSION] greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1aff430d0>, <ast.Constant object at 0x7da1aff430a0>, <ast.Constant object at 0x7da1aff43070>]]] begin[:]
call[name[plotaxes].set_facecolor, parameter[name[bestperiodhighlight]]]
call[name[_make_phased_magseries_plot], parameter[name[plotaxes], name[periodind], name[stimes], name[smags], name[serrs], name[varperiod], name[varepoch], name[phasewrap], name[phasesort], name[phasebin], name[minbinelems], name[plotxlim], name[lspmethod1]]]
for taget[tuple[[<ast.Name object at 0x7da1aff42080>, <ast.Name object at 0x7da1aff420b0>, <ast.Name object at 0x7da1aff420e0>]]] in starred[call[name[zip], parameter[list[[<ast.Constant object at 0x7da1aff421a0>, <ast.Constant object at 0x7da1aff421d0>, <ast.Constant object at 0x7da1aff42200>]], call[name[lspbestperiods2]][<ast.Slice object at 0x7da1aff42290>], list[[<ast.Subscript object at 0x7da1aff42320>, <ast.Subscript object at 0x7da1aff423b0>, <ast.Subscript object at 0x7da1aff42440>]]]]] begin[:]
if <ast.BoolOp object at 0x7da1aff42500> begin[:]
if compare[name[MPLVERSION] greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1aff202e0>, <ast.Constant object at 0x7da1aff20310>, <ast.Constant object at 0x7da1aff20340>]]] begin[:]
call[name[plotaxes].set_facecolor, parameter[name[bestperiodhighlight]]]
call[name[_make_phased_magseries_plot], parameter[name[plotaxes], name[periodind], name[stimes], name[smags], name[serrs], name[varperiod], name[varepoch], name[phasewrap], name[phasesort], name[phasebin], name[minbinelems], name[plotxlim], name[lspmethod2]]]
call[name[fig].set_tight_layout, parameter[constant[True]]]
if call[name[plotfpath].endswith, parameter[constant[.png]]] begin[:]
call[name[fig].savefig, parameter[name[plotfpath]]]
call[name[plt].close, parameter[]]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[checkplot done -> %s] <ast.Mod object at 0x7da2590d6920> name[plotfpath]]]]
return[name[plotfpath]] | keyword[def] identifier[twolsp_checkplot_png] ( identifier[lspinfo1] ,
identifier[lspinfo2] ,
identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[varepoch] = literal[string] ,
identifier[magsarefluxes] = keyword[False] ,
identifier[objectinfo] = keyword[None] ,
identifier[findercmap] = literal[string] ,
identifier[finderconvolve] = keyword[None] ,
identifier[findercachedir] = literal[string] ,
identifier[normto] = literal[string] ,
identifier[normmingap] = literal[int] ,
identifier[sigclip] = literal[int] ,
identifier[phasewrap] = keyword[True] ,
identifier[phasesort] = keyword[True] ,
identifier[phasebin] = literal[int] ,
identifier[minbinelems] = literal[int] ,
identifier[plotxlim] =(- literal[int] , literal[int] ),
identifier[unphasedms] = literal[int] ,
identifier[phasems] = literal[int] ,
identifier[phasebinms] = literal[int] ,
identifier[xliminsetmode] = keyword[False] ,
identifier[bestperiodhighlight] = keyword[None] ,
identifier[plotdpi] = literal[int] ,
identifier[outfile] = keyword[None] ,
identifier[verbose] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[outfile] keyword[and] identifier[isinstance] ( identifier[lspinfo1] , identifier[str] ):
identifier[plotfpath] = identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[dirname] ( identifier[lspinfo1] ),
literal[string] %(
identifier[os] . identifier[path] . identifier[basename] ( identifier[lspinfo1] ),
)
)
keyword[elif] identifier[outfile] :
identifier[plotfpath] = identifier[outfile]
keyword[else] :
identifier[plotfpath] = literal[string]
keyword[if] identifier[isinstance] ( identifier[lspinfo1] , identifier[str] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[lspinfo1] ):
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string] % identifier[lspinfo1] )
keyword[if] literal[string] keyword[in] identifier[lspinfo1] :
keyword[with] identifier[gzip] . identifier[open] ( identifier[lspinfo1] , literal[string] ) keyword[as] identifier[infd] :
identifier[lspinfo1] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[else] :
keyword[with] identifier[open] ( identifier[lspinfo1] , literal[string] ) keyword[as] identifier[infd] :
identifier[lspinfo1] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[if] identifier[isinstance] ( identifier[lspinfo2] , identifier[str] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[lspinfo2] ):
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string] % identifier[lspinfo2] )
keyword[if] literal[string] keyword[in] identifier[lspinfo2] :
keyword[with] identifier[gzip] . identifier[open] ( identifier[lspinfo2] , literal[string] ) keyword[as] identifier[infd] :
identifier[lspinfo2] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[else] :
keyword[with] identifier[open] ( identifier[lspinfo2] , literal[string] ) keyword[as] identifier[infd] :
identifier[lspinfo2] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[if] ( literal[string] keyword[in] identifier[lspinfo1] keyword[and] literal[string] keyword[in] identifier[lspinfo2] keyword[and]
literal[string] keyword[in] identifier[lspinfo1] keyword[and] literal[string] keyword[in] identifier[lspinfo2] keyword[and]
literal[string] keyword[in] identifier[lspinfo1] keyword[and] literal[string] keyword[in] identifier[lspinfo2] ):
identifier[bestperiod1] = identifier[lspinfo1] [ literal[string] ]
identifier[nbestperiods1] = identifier[lspinfo1] [ literal[string] ]
identifier[lspmethod1] = identifier[lspinfo1] [ literal[string] ]
identifier[bestperiod2] = identifier[lspinfo2] [ literal[string] ]
identifier[nbestperiods2] = identifier[lspinfo2] [ literal[string] ]
identifier[lspmethod2] = identifier[lspinfo2] [ literal[string] ]
keyword[else] :
identifier[LOGERROR] ( literal[string]
literal[string] )
keyword[return] keyword[None]
keyword[if] ( keyword[not] identifier[npisfinite] ( identifier[bestperiod1] )) keyword[or] ( keyword[not] identifier[npisfinite] ( identifier[bestperiod2] )):
identifier[LOGWARNING] ( literal[string] )
keyword[return] keyword[None]
identifier[fig] , identifier[axes] = identifier[plt] . identifier[subplots] ( literal[int] , literal[int] )
identifier[axes] = identifier[npravel] ( identifier[axes] )
identifier[fig] . identifier[set_size_inches] ( literal[int] , literal[int] )
identifier[_make_periodogram] ( identifier[axes] [ literal[int] ], identifier[lspinfo1] , identifier[objectinfo] ,
identifier[findercmap] , identifier[finderconvolve] ,
identifier[verbose] = identifier[verbose] ,
identifier[findercachedir] = identifier[findercachedir] )
identifier[_make_periodogram] ( identifier[axes] [ literal[int] ], identifier[lspinfo2] , keyword[None] ,
identifier[findercmap] , identifier[finderconvolve] )
identifier[stimes] , identifier[smags] , identifier[serrs] = identifier[sigclip_magseries] ( identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[sigclip] = identifier[sigclip] )
keyword[if] identifier[normto] keyword[is] keyword[not] keyword[False] :
identifier[stimes] , identifier[smags] = identifier[normalize_magseries] ( identifier[stimes] , identifier[smags] ,
identifier[normto] = identifier[normto] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[mingap] = identifier[normmingap] )
keyword[if] identifier[len] ( identifier[stimes] )>= literal[int] :
identifier[_make_magseries_plot] ( identifier[axes] [ literal[int] ], identifier[stimes] , identifier[smags] , identifier[serrs] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[ms] = identifier[unphasedms] )
identifier[lspbestperiods1] = identifier[nbestperiods1] [::]
identifier[lspbestperiods2] = identifier[nbestperiods2] [::]
keyword[for] identifier[periodind] , identifier[varperiod] , identifier[plotaxes] keyword[in] identifier[zip] ([ literal[int] , literal[int] , literal[int] ],
identifier[lspbestperiods1] [: literal[int] ],
[ identifier[axes] [ literal[int] ], identifier[axes] [ literal[int] ], identifier[axes] [ literal[int] ]]):
keyword[if] identifier[periodind] == literal[int] keyword[and] identifier[bestperiodhighlight] :
keyword[if] identifier[MPLVERSION] >=( literal[int] , literal[int] , literal[int] ):
identifier[plotaxes] . identifier[set_facecolor] ( identifier[bestperiodhighlight] )
keyword[else] :
identifier[plotaxes] . identifier[set_axis_bgcolor] ( identifier[bestperiodhighlight] )
identifier[_make_phased_magseries_plot] ( identifier[plotaxes] ,
identifier[periodind] ,
identifier[stimes] , identifier[smags] , identifier[serrs] ,
identifier[varperiod] , identifier[varepoch] ,
identifier[phasewrap] , identifier[phasesort] ,
identifier[phasebin] , identifier[minbinelems] ,
identifier[plotxlim] , identifier[lspmethod1] ,
identifier[lspmethodind] = literal[int] ,
identifier[twolspmode] = keyword[True] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[xliminsetmode] = identifier[xliminsetmode] ,
identifier[verbose] = identifier[verbose] ,
identifier[phasems] = identifier[phasems] ,
identifier[phasebinms] = identifier[phasebinms] )
keyword[for] identifier[periodind] , identifier[varperiod] , identifier[plotaxes] keyword[in] identifier[zip] ([ literal[int] , literal[int] , literal[int] ],
identifier[lspbestperiods2] [: literal[int] ],
[ identifier[axes] [ literal[int] ], identifier[axes] [ literal[int] ], identifier[axes] [ literal[int] ]]):
keyword[if] identifier[periodind] == literal[int] keyword[and] identifier[bestperiodhighlight] :
keyword[if] identifier[MPLVERSION] >=( literal[int] , literal[int] , literal[int] ):
identifier[plotaxes] . identifier[set_facecolor] ( identifier[bestperiodhighlight] )
keyword[else] :
identifier[plotaxes] . identifier[set_axis_bgcolor] ( identifier[bestperiodhighlight] )
identifier[_make_phased_magseries_plot] ( identifier[plotaxes] ,
identifier[periodind] ,
identifier[stimes] , identifier[smags] , identifier[serrs] ,
identifier[varperiod] , identifier[varepoch] ,
identifier[phasewrap] , identifier[phasesort] ,
identifier[phasebin] , identifier[minbinelems] ,
identifier[plotxlim] , identifier[lspmethod2] ,
identifier[lspmethodind] = literal[int] ,
identifier[twolspmode] = keyword[True] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[xliminsetmode] = identifier[xliminsetmode] ,
identifier[verbose] = identifier[verbose] ,
identifier[phasems] = identifier[phasems] ,
identifier[phasebinms] = identifier[phasebinms] )
identifier[fig] . identifier[set_tight_layout] ( keyword[True] )
keyword[if] identifier[plotfpath] . identifier[endswith] ( literal[string] ):
identifier[fig] . identifier[savefig] ( identifier[plotfpath] , identifier[dpi] = identifier[plotdpi] )
keyword[else] :
identifier[fig] . identifier[savefig] ( identifier[plotfpath] )
identifier[plt] . identifier[close] ()
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string] % identifier[plotfpath] )
keyword[return] identifier[plotfpath]
keyword[else] :
identifier[LOGWARNING] ( literal[string] )
keyword[for] identifier[periodind] keyword[in] identifier[range] ( literal[int] ):
identifier[axes] [ identifier[periodind] + literal[int] ]. identifier[text] (
literal[int] , literal[int] ,
( literal[string] ),
identifier[horizontalalignment] = literal[string] ,
identifier[verticalalignment] = literal[string] ,
identifier[transform] = identifier[axes] [ identifier[periodind] + literal[int] ]. identifier[transAxes]
)
identifier[fig] . identifier[set_tight_layout] ( keyword[True] )
keyword[if] identifier[plotfpath] . identifier[endswith] ( literal[string] ):
identifier[fig] . identifier[savefig] ( identifier[plotfpath] , identifier[dpi] = identifier[plotdpi] )
keyword[else] :
identifier[fig] . identifier[savefig] ( identifier[plotfpath] )
identifier[plt] . identifier[close] ()
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string] % identifier[plotfpath] )
keyword[return] identifier[plotfpath] | def twolsp_checkplot_png(lspinfo1, lspinfo2, times, mags, errs, varepoch='min', magsarefluxes=False, objectinfo=None, findercmap='gray_r', finderconvolve=None, findercachedir='~/.astrobase/stamp-cache', normto='globalmedian', normmingap=4.0, sigclip=4.0, phasewrap=True, phasesort=True, phasebin=0.002, minbinelems=7, plotxlim=(-0.8, 0.8), unphasedms=2.0, phasems=2.0, phasebinms=4.0, xliminsetmode=False, bestperiodhighlight=None, plotdpi=100, outfile=None, verbose=True):
"""This makes a checkplot using results from two independent period-finders.
Adapted from Luke Bouma's implementation of a similar function in his
work. This makes a special checkplot that uses two lspinfo dictionaries,
from two independent period-finding methods. For EBs, it's probably best to
use Stellingwerf PDM or Schwarzenberg-Czerny AoV as one of these, and the
Box Least-squared Search method as the other one.
The checkplot layout in this case is::
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
- pgram1 is the plot for the periodogram in the lspinfo1 dict
- pgram1 P1, P2, and P3 are the best three periods from lspinfo1
- pgram2 is the plot for the periodogram in the lspinfo2 dict
- pgram2 P1, P2, and P3 are the best three periods from lspinfo2
Note that we take the output file name from lspinfo1 if lspinfo1 is a string
filename pointing to a (gzipped) pickle containing the results dict from a
period-finding routine similar to those in periodbase.
Parameters
----------
lspinfo1,lspinfo2 : dict or str
If this is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 3 elements each,
e.g. describing the three 'best' (highest power) peaks in the
periodogram.
If lspinfo is a str, then it must be a path to a pickle file (ending
with the extension '.pkl' or '.pkl.gz') that contains a dict of the form
described above.
times,mags,errs : np.array
The mag/flux time-series arrays to process along with associated errors.
varepoch : 'min' or float or None or list of lists
This sets the time of minimum light finding strategy for the checkplot::
the epoch used for all phased
If `varepoch` is None -> light curve plots will be
`min(times)`.
If `varepoch='min'` -> automatic epoch finding for all
periods using light curve fits.
If varepoch is a single float -> this epoch will be used for all
phased light curve plots
If varepoch is a list of floats each epoch will be applied to
with length = `len(nbestperiods)` -> the phased light curve for each
from period-finder results period specifically
If you use a list for varepoch, it must be of length
`len(lspinfo['nbestperiods'])`.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately/
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplot PNG image.The `objectinfo` dict must be of the form and
contain at least the keys described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG.
- SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'
- 2MASS mag keys: 'jmag', 'hmag', 'kmag'
- Cousins mag keys: 'bmag', 'vmag'
- GAIA specific keys: 'gmag', 'teff'
- proper motion keys: 'pmra', 'pmdecl'
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
findercachedir : str
The directory where the FITS finder images are downloaded and cached.
normto : {'globalmedian', 'zero'} or a float
This sets the LC normalization target::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
phasewrap : bool
If this is True, the phased time-series will be wrapped around phase
0.0.
phasesort : bool
If this is True, the phased time-series will be sorted in phase.
phasebin : float or None
If this is provided, indicates the bin size to use to group together
measurements closer than this amount in phase. This is in units of
phase. The binned phased light curve will be overplotted on top of the
phased light curve. Useful for when one has many measurement points and
needs to pick out a small trend in an otherwise noisy phased light
curve.
minbinelems : int
The minimum number of elements in each phase bin.
plotxlim : sequence of two floats or None
The x-axis limits to use when making the phased light curve plot. By
default, this is (-0.8, 0.8), which places phase 0.0 at the center of
the plot and covers approximately two cycles in phase to make any trends
clear.
unphasedms : float
The marker size to use for the main unphased light curve plot symbols.
phasems : float
The marker size to use for the main phased light curve plot symbols.
phasebinms : float
The marker size to use for the binned phased light curve plot symbols.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
outfile : str or None
The file name of the file to save the checkplot to. If this is None,
will write to a file called 'checkplot.png' in the current working
directory.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
verbose : bool
If False, turns off many of the informational messages. Useful for
when an external function is driving lots of `checkplot_png` calls.
Returns
-------
str
The file path to the generated checkplot PNG file.
"""
# generate the plot filename
if not outfile and isinstance(lspinfo1, str):
plotfpath = os.path.join(os.path.dirname(lspinfo1), 'twolsp-checkplot-%s.png' % (os.path.basename(lspinfo1),)) # depends on [control=['if'], data=[]]
elif outfile:
plotfpath = outfile # depends on [control=['if'], data=[]]
else:
plotfpath = 'twolsp-checkplot.png'
# get the first LSP from a pickle file transparently
if isinstance(lspinfo1, str) and os.path.exists(lspinfo1):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo1) # depends on [control=['if'], data=[]]
if '.gz' in lspinfo1:
with gzip.open(lspinfo1, 'rb') as infd:
lspinfo1 = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=['lspinfo1']]
else:
with open(lspinfo1, 'rb') as infd:
lspinfo1 = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=[]]
# get the second LSP from a pickle file transparently
if isinstance(lspinfo2, str) and os.path.exists(lspinfo2):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo2) # depends on [control=['if'], data=[]]
if '.gz' in lspinfo2:
with gzip.open(lspinfo2, 'rb') as infd:
lspinfo2 = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=['lspinfo2']]
else:
with open(lspinfo2, 'rb') as infd:
lspinfo2 = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=[]]
# get the things to plot out of the data
if 'periods' in lspinfo1 and 'periods' in lspinfo2 and ('lspvals' in lspinfo1) and ('lspvals' in lspinfo2) and ('bestperiod' in lspinfo1) and ('bestperiod' in lspinfo2):
bestperiod1 = lspinfo1['bestperiod']
nbestperiods1 = lspinfo1['nbestperiods']
lspmethod1 = lspinfo1['method']
bestperiod2 = lspinfo2['bestperiod']
nbestperiods2 = lspinfo2['nbestperiods']
lspmethod2 = lspinfo2['method'] # depends on [control=['if'], data=[]]
else:
LOGERROR('could not understand lspinfo1 or lspinfo2 for this object, skipping...')
return None
if not npisfinite(bestperiod1) or not npisfinite(bestperiod2):
LOGWARNING('no best period found for this object, skipping...')
return None # depends on [control=['if'], data=[]]
# initialize the plot
(fig, axes) = plt.subplots(3, 3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30, 24)
######################################################################
## PLOT 1 is the LSP from lspinfo1, including objectinfo and finder ##
######################################################################
_make_periodogram(axes[0], lspinfo1, objectinfo, findercmap, finderconvolve, verbose=verbose, findercachedir=findercachedir)
#####################################
## PLOT 2 is the LSP from lspinfo2 ##
#####################################
_make_periodogram(axes[1], lspinfo2, None, findercmap, finderconvolve)
##########################################
## FIX UP THE MAGS AND REMOVE BAD STUFF ##
##########################################
# sigclip first
(stimes, smags, serrs) = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip)
# take care of the normalization
if normto is not False:
(stimes, smags) = normalize_magseries(stimes, smags, normto=normto, magsarefluxes=magsarefluxes, mingap=normmingap) # depends on [control=['if'], data=['normto']]
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 3 is an unphased LC ##
##############################
_make_magseries_plot(axes[2], stimes, smags, serrs, magsarefluxes=magsarefluxes, ms=unphasedms)
# make the plot for each best period
lspbestperiods1 = nbestperiods1[:]
lspbestperiods2 = nbestperiods2[:]
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO1 ###
##########################################################
for (periodind, varperiod, plotaxes) in zip([0, 1, 2], lspbestperiods1[:3], [axes[3], axes[4], axes[5]]):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2, 0, 0):
plotaxes.set_facecolor(bestperiodhighlight) # depends on [control=['if'], data=[]]
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight) # depends on [control=['if'], data=[]]
_make_phased_magseries_plot(plotaxes, periodind, stimes, smags, serrs, varperiod, varepoch, phasewrap, phasesort, phasebin, minbinelems, plotxlim, lspmethod1, lspmethodind=0, twolspmode=True, magsarefluxes=magsarefluxes, xliminsetmode=xliminsetmode, verbose=verbose, phasems=phasems, phasebinms=phasebinms) # depends on [control=['for'], data=[]]
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO2 ###
##########################################################
for (periodind, varperiod, plotaxes) in zip([0, 1, 2], lspbestperiods2[:3], [axes[6], axes[7], axes[8]]):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2, 0, 0):
plotaxes.set_facecolor(bestperiodhighlight) # depends on [control=['if'], data=[]]
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight) # depends on [control=['if'], data=[]]
_make_phased_magseries_plot(plotaxes, periodind, stimes, smags, serrs, varperiod, varepoch, phasewrap, phasesort, phasebin, minbinelems, plotxlim, lspmethod2, lspmethodind=1, twolspmode=True, magsarefluxes=magsarefluxes, xliminsetmode=xliminsetmode, verbose=verbose, phasems=phasems, phasebinms=phasebinms) # depends on [control=['for'], data=[]]
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi) # depends on [control=['if'], data=[]]
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath) # depends on [control=['if'], data=[]]
return plotfpath # depends on [control=['if'], data=[]]
else:
# otherwise, there's no valid data for this plot
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind + 2].text(0.5, 0.5, 'no best aperture light curve available', horizontalalignment='center', verticalalignment='center', transform=axes[periodind + 2].transAxes) # depends on [control=['for'], data=['periodind']]
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi) # depends on [control=['if'], data=[]]
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath) # depends on [control=['if'], data=[]]
return plotfpath |
def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
+ np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
+ np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
p = make_continuous(p, occlusion)
vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return vx, vy | def function[project, parameter[vx, vy, occlusion]]:
constant[Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel.]
variable[p] assign[=] call[name[np].zeros, parameter[name[vx].shape]]
variable[div] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18c4cf430> * binary_operation[binary_operation[binary_operation[call[name[np].roll, parameter[name[vx], <ast.UnaryOp object at 0x7da18c4cc3d0>]] - call[name[np].roll, parameter[name[vx], constant[1]]]] + call[name[np].roll, parameter[name[vy], <ast.UnaryOp object at 0x7da18c4cf610>]]] - call[name[np].roll, parameter[name[vy], constant[1]]]]]
variable[div] assign[=] call[name[make_continuous], parameter[name[div], name[occlusion]]]
for taget[name[k]] in starred[call[name[range], parameter[constant[50]]]] begin[:]
variable[p] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[div] + call[name[np].roll, parameter[name[p], constant[1]]]] + call[name[np].roll, parameter[name[p], <ast.UnaryOp object at 0x7da18c4ce860>]]] + call[name[np].roll, parameter[name[p], constant[1]]]] + call[name[np].roll, parameter[name[p], <ast.UnaryOp object at 0x7da1b2345e40>]]] / constant[4.0]]
variable[p] assign[=] call[name[make_continuous], parameter[name[p], name[occlusion]]]
variable[vx] assign[=] binary_operation[name[vx] - binary_operation[constant[0.5] * binary_operation[call[name[np].roll, parameter[name[p], <ast.UnaryOp object at 0x7da18c4cf4f0>]] - call[name[np].roll, parameter[name[p], constant[1]]]]]]
variable[vy] assign[=] binary_operation[name[vy] - binary_operation[constant[0.5] * binary_operation[call[name[np].roll, parameter[name[p], <ast.UnaryOp object at 0x7da18c4cc220>]] - call[name[np].roll, parameter[name[p], constant[1]]]]]]
variable[vx] assign[=] call[name[occlude], parameter[name[vx], name[occlusion]]]
variable[vy] assign[=] call[name[occlude], parameter[name[vy], name[occlusion]]]
return[tuple[[<ast.Name object at 0x7da18c4cd360>, <ast.Name object at 0x7da18c4ce0e0>]]] | keyword[def] identifier[project] ( identifier[vx] , identifier[vy] , identifier[occlusion] ):
literal[string]
identifier[p] = identifier[np] . identifier[zeros] ( identifier[vx] . identifier[shape] )
identifier[div] =- literal[int] *( identifier[np] . identifier[roll] ( identifier[vx] ,- literal[int] , identifier[axis] = literal[int] )- identifier[np] . identifier[roll] ( identifier[vx] , literal[int] , identifier[axis] = literal[int] )
+ identifier[np] . identifier[roll] ( identifier[vy] ,- literal[int] , identifier[axis] = literal[int] )- identifier[np] . identifier[roll] ( identifier[vy] , literal[int] , identifier[axis] = literal[int] ))
identifier[div] = identifier[make_continuous] ( identifier[div] , identifier[occlusion] )
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] ):
identifier[p] =( identifier[div] + identifier[np] . identifier[roll] ( identifier[p] , literal[int] , identifier[axis] = literal[int] )+ identifier[np] . identifier[roll] ( identifier[p] ,- literal[int] , identifier[axis] = literal[int] )
+ identifier[np] . identifier[roll] ( identifier[p] , literal[int] , identifier[axis] = literal[int] )+ identifier[np] . identifier[roll] ( identifier[p] ,- literal[int] , identifier[axis] = literal[int] ))/ literal[int]
identifier[p] = identifier[make_continuous] ( identifier[p] , identifier[occlusion] )
identifier[vx] = identifier[vx] - literal[int] *( identifier[np] . identifier[roll] ( identifier[p] ,- literal[int] , identifier[axis] = literal[int] )- identifier[np] . identifier[roll] ( identifier[p] , literal[int] , identifier[axis] = literal[int] ))
identifier[vy] = identifier[vy] - literal[int] *( identifier[np] . identifier[roll] ( identifier[p] ,- literal[int] , identifier[axis] = literal[int] )- identifier[np] . identifier[roll] ( identifier[p] , literal[int] , identifier[axis] = literal[int] ))
identifier[vx] = identifier[occlude] ( identifier[vx] , identifier[occlusion] )
identifier[vy] = identifier[occlude] ( identifier[vy] , identifier[occlusion] )
keyword[return] identifier[vx] , identifier[vy] | def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1) + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1) + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0)) / 4.0
p = make_continuous(p, occlusion) # depends on [control=['for'], data=[]]
vx = vx - 0.5 * (np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5 * (np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return (vx, vy) |
def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
"""
logger.debug('Loading events from %s', self._file_path)
# GetNext() expects a status argument on TF <= 1.7.
get_next_args = inspect.getargspec(self._reader.GetNext).args # pylint: disable=deprecated-method
# First argument is self
legacy_get_next = (len(get_next_args) > 1)
while True:
try:
if legacy_get_next:
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
else:
self._reader.GetNext()
except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e:
logger.debug('Cannot read more events: %s', e)
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield self._reader.record()
logger.debug('No more events in %s', self._file_path) | def function[Load, parameter[self]]:
constant[Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
]
call[name[logger].debug, parameter[constant[Loading events from %s], name[self]._file_path]]
variable[get_next_args] assign[=] call[name[inspect].getargspec, parameter[name[self]._reader.GetNext]].args
variable[legacy_get_next] assign[=] compare[call[name[len], parameter[name[get_next_args]]] greater[>] constant[1]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b21e9ab0>
<ast.Yield object at 0x7da1b21ccf10>
call[name[logger].debug, parameter[constant[No more events in %s], name[self]._file_path]] | keyword[def] identifier[Load] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[_file_path] )
identifier[get_next_args] = identifier[inspect] . identifier[getargspec] ( identifier[self] . identifier[_reader] . identifier[GetNext] ). identifier[args]
identifier[legacy_get_next] =( identifier[len] ( identifier[get_next_args] )> literal[int] )
keyword[while] keyword[True] :
keyword[try] :
keyword[if] identifier[legacy_get_next] :
keyword[with] identifier[tf] . identifier[compat] . identifier[v1] . identifier[errors] . identifier[raise_exception_on_not_ok_status] () keyword[as] identifier[status] :
identifier[self] . identifier[_reader] . identifier[GetNext] ( identifier[status] )
keyword[else] :
identifier[self] . identifier[_reader] . identifier[GetNext] ()
keyword[except] ( identifier[tf] . identifier[errors] . identifier[DataLossError] , identifier[tf] . identifier[errors] . identifier[OutOfRangeError] ) keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[e] )
keyword[break]
keyword[yield] identifier[self] . identifier[_reader] . identifier[record] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[_file_path] ) | def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
"""
logger.debug('Loading events from %s', self._file_path)
# GetNext() expects a status argument on TF <= 1.7.
get_next_args = inspect.getargspec(self._reader.GetNext).args # pylint: disable=deprecated-method
# First argument is self
legacy_get_next = len(get_next_args) > 1
while True:
try:
if legacy_get_next:
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status) # depends on [control=['with'], data=['status']] # depends on [control=['if'], data=[]]
else:
self._reader.GetNext() # depends on [control=['try'], data=[]]
except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e:
logger.debug('Cannot read more events: %s', e)
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break # depends on [control=['except'], data=['e']]
yield self._reader.record() # depends on [control=['while'], data=[]]
logger.debug('No more events in %s', self._file_path) |
def main(command_line=True, **kwargs):
"""
NAME
generic_magic.py
DESCRIPTION
converts magnetometer files in generic format to MagIC measurements format
SYNTAX
generic_magic.py [command line options]
OPTIONS
-h
prints the help message and quits.
-usr USER
identify user, default is ""
-f FILE:
specify path to input file, required
-fsa SAMPFILE:
specify the samples file for sample orientation data. default is er_samples.txt
-F FILE
specify output file, default is magic_measurements.txt
-Fsa FILE
specify output file, default is er_samples.txt
-exp EXPERIMENT-TYPE
Demag:
AF and/or Thermal
PI:
paleointenisty thermal experiment (ZI/IZ/IZZI/TT)
ATRM n:
ATRM in n positions (n=6)
AARM n:
AARM in n positions
CR:
cooling rate experiment
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,.....xx -A
where xx, yyy,zzz...xxx are cooling rates in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
No need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate meaurements in the file
NLT:
non-linear-TRM experiment
-samp X Y
specimen-sample naming convention.
X determines which kind of convention (initial characters, terminal characters, or delimiter
Y determines how many characters to remove to go from specimen --> sample OR which delimiter to use
X=0 Y=n: specimen is distinguished from sample by n initial characters.
(example: "generic_magic.py -samp 0 4"
if n=4 then and specimen = mgf13a then sample = mgf13)
X=1 Y=n: specimen is distiguished from sample by n terminate characters.
(example: "generic_magic.py -samp 1 1)
if n=1 then and specimen = mgf13a then sample = mgf13)
X=2 Y=c: specimen is distinguishing from sample by a delimiter.
(example: "generic_magic.py -samp 2 -"
if c=- then and specimen = mgf13-a then sample = mgf13)
default: sample is the same as specimen name
-site X Y
sample-site naming convention.
X determines which kind of convention (initial characters, terminal characters, or delimiter
Y determines how many characters to remove to go from sample --> site OR which delimiter to use
X=0 Y=n: sample is distiguished from site by n initial characters.
(example: "generic_magic.py --site 0 3"
if n=3 then and sample = mgf13 then sample = mgf)
X=1 Y=n: sample is distiguished from site by n terminate characters.
(example: "generic_magic.py --site 1 2"
if n=2 and sample = mgf13 then site = mgf)
X=2 Y=c: specimen is distiguishing from sample by a delimiter.
(example: "generic_magic.py -site 2 -"
if c='-' and sample = 'mgf-13' then site = mgf)
default: site name is the same as sample name
-loc LOCNAM
specify location/study name.
-dc B PHI THETA:
B: dc lab field (in micro tesla)
PHI (declination). takes numbers from 0 to 360
THETA (inclination). takes numbers from -90 to 90
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment.
-A: don't average replicate measurements. Take the last measurement from replicate measurements.
-WD working directory
INPUT
A generic file is a tab-delimited file. Each column should have a header.
The file must include the following headers (the order of the columns is not important):
specimen
string specifying specimen name
treatment:
a number with one or two decimal point (X.Y)
coding for thermal demagnetization:
0.0 or 0 is NRM.
X is temperature in celsius
Y is always 0
coding for AF demagnetization:
0.0 or 0 is NRM.
X is AF peak field in mT
Y is always 0
coding for Thellier-type experiment:
0.0 or 0 is NRM
X is temperature in celsius
Y=0: zerofield
Y=1: infield (IZZI, IZ, ZI, and Thellier protocol- first infield)
Y=2: pTRM check
Y=3: pTRM tail check
Y=4: Additivity check
Y=5: Thellier protocol: second infield
coding for ATRM experiment (6 poitions):
X is temperature in celsius
Y=0: zerofield baseline to be subtracted
Y=1: +x
Y=2: -x
Y=3: +y
Y=4: -y
Y=5: +z
Y=6: -z
Y=7: alteration check
coding for NLT experiment:
X is temperature in celsius
Y=0: zerofield baseline to be subtracted
Y!=0: oven field in microT
coding for CR experiment:
see "OPTIONS" list above
treatment_type:
N: NRM
A: AF
T: Thermal
moment:
magnetic moment in emu !!
In addition, at least one of the following headers are required:
dec_s:
declination in specimen coordinate system (0 to 360)
inc_s:
inclination in specimen coordinate system (-90 to 90)
dec_g:
declination in geographic coordinate system (0 to 360)
inc_g:
inclination in geographic coordinate system (-90 to 90)
dec_t:
declination in tilt-corrected coordinate system (0 to 360)
inc_t:
inclination in tilt-corrected coordinate system (-90 to 90)
"""
#--------------------------------------
# functions
#--------------------------------------
def sort_magic_file(path,ignore_lines_n,sort_by_this_name):
'''
reads a file with headers. Each line is stored as a dictionary following the headers.
Lines are sorted in DATA by the sort_by_this_name header
DATA[sort_by_this_name]=[dictionary1,dictionary2,...]
'''
DATA={}
fin=open(path,'r')
#ignore first lines
for i in range(ignore_lines_n):
fin.readline()
#header
line=fin.readline()
header=line.strip('\n').split('\t')
#print header
for line in fin.readlines():
if line[0]=="#":
continue
tmp_data={}
tmp_line=line.strip('\n').split('\t')
#print tmp_line
for i in range(len(tmp_line)):
if i>= len(header):
continue
tmp_data[header[i]]=tmp_line[i]
DATA[tmp_data[sort_by_this_name]]=tmp_data
fin.close()
return(DATA)
def read_generic_file(path,average_replicates):
'''
reads a generic file format. If average_replicates==True average replicate measurements.
Rrturns a Data dictionary with measurements line sorted by specimen
Data[specimen_name][dict1,dict2,...]
'''
Data={}
Fin=open(path,'r')
header=Fin.readline().strip('\n').split('\t')
duplicates=[]
for line in Fin.readlines():
tmp_data={}
#found_duplicate=False
l=line.strip('\n').split('\t')
for i in range(min(len(header),len(l))):
tmp_data[header[i]]=l[i]
specimen=tmp_data['specimen']
if specimen not in list(Data.keys()):
Data[specimen]=[]
Data[specimen].append(tmp_data)
# search fro duplicates
for specimen in list(Data.keys()):
x=len(Data[specimen])-1
new_data=[]
duplicates=[]
for i in range(1,x):
while i< len(Data[specimen]) and Data[specimen][i]['treatment']==Data[specimen][i-1]['treatment'] and Data[specimen][i]['treatment_type']==Data[specimen][i-1]['treatment_type']:
duplicates.append(Data[specimen][i])
del(Data[specimen][i])
if len(duplicates)>0:
if average_replicates:
duplicates.append(Data[specimen][i-1])
Data[specimen][i-1]=average_duplicates(duplicates)
print("-W- WARNING: averaging %i duplicates for specimen %s treatmant %s"%(len(duplicates),specimen,duplicates[-1]['treatment']))
duplicates=[]
else:
Data[specimen][i-1]=duplicates[-1]
print("-W- WARNING: found %i duplicates for specimen %s treatmant %s. Taking the last measurement only"%(len(duplicates),specimen,duplicates[-1]['treatment']))
duplicates=[]
if i==len(Data[specimen])-1:
break
# if tmp_data['treatment']==Data[specimen][-1]['treatment'] and tmp_data['treatment_type']==Data[specimen][-1]['treatment_type']:
#
## check replicates
#if tmp_data['treatment']==Data[specimen][-1]['treatment'] and tmp_data['treatment_type']==Data[specimen][-1]['treatment_type']:
# #found_duplicate=True
# duplicates.append(Data[specimen][-1])
# duplicates.append(tmp_data)
# del(Data[specimen][-1])
# continue
#else:
# if len(duplicates)>0:
# if average_replicates:
# Data[specimen].append(average_duplicates(duplicates))
# print "-W- WARNING: averaging %i duplicates for specimen %s treatmant %s"%(len(duplicates),specimen,duplicates[-1]['treatment'])
# else:
# Data[specimen].append(duplicates[-1])
# print "-W- WARNING: found %i duplicates for specimen %s treatmant %s. Taking the last measurement only"%(len(duplicates),specimen,duplicates[-1]['treatment'])
# duplicates=[]
# Data[specimen].append(tmp_data)
return(Data)
def average_duplicates(duplicates):
'''
avarage replicate measurements.
'''
carts_s,carts_g,carts_t=[],[],[]
for rec in duplicates:
moment=float(rec['moment'])
if 'dec_s' in list(rec.keys()) and 'inc_s' in list(rec.keys()):
if rec['dec_s']!="" and rec['inc_s']!="":
dec_s=float(rec['dec_s'])
inc_s=float(rec['inc_s'])
cart_s=pmag.dir2cart([dec_s,inc_s,moment])
carts_s.append(cart_s)
if 'dec_g' in list(rec.keys()) and 'inc_g' in list(rec.keys()):
if rec['dec_g']!="" and rec['inc_g']!="":
dec_g=float(rec['dec_g'])
inc_g=float(rec['inc_g'])
cart_g=pmag.dir2cart([dec_g,inc_g,moment])
carts_g.append(cart_g)
if 'dec_t' in list(rec.keys()) and 'inc_t' in list(rec.keys()):
if rec['dec_t']!="" and rec['inc_t']!="":
dec_t=float(rec['dec_t'])
inc_t=float(rec['inc_t'])
cart_t=pmag.dir2cart([dec_t,inc_t,moment])
carts_t.append(cart_t)
if len(carts_s)>0:
carts=scipy.array(carts_s)
x_mean=scipy.mean(carts[:,0])
y_mean=scipy.mean(carts[:,1])
z_mean=scipy.mean(carts[:,2])
mean_dir=pmag.cart2dir([x_mean,y_mean,z_mean])
mean_dec_s="%.2f"%mean_dir[0]
mean_inc_s="%.2f"%mean_dir[1]
mean_moment="%10.3e"%mean_dir[2]
else:
mean_dec_s,mean_inc_s="",""
if len(carts_g)>0:
carts=scipy.array(carts_g)
x_mean=scipy.mean(carts[:,0])
y_mean=scipy.mean(carts[:,1])
z_mean=scipy.mean(carts[:,2])
mean_dir=pmag.cart2dir([x_mean,y_mean,z_mean])
mean_dec_g="%.2f"%mean_dir[0]
mean_inc_g="%.2f"%mean_dir[1]
mean_moment="%10.3e"%mean_dir[2]
else:
mean_dec_g,mean_inc_g="",""
if len(carts_t)>0:
carts=scipy.array(carts_t)
x_mean=scipy.mean(carts[:,0])
y_mean=scipy.mean(carts[:,1])
z_mean=scipy.mean(carts[:,2])
mean_dir=pmag.cart2dir([x_mean,y_mean,z_mean])
mean_dec_t="%.2f"%mean_dir[0]
mean_inc_t="%.2f"%mean_dir[1]
mean_moment="%10.3e"%mean_dir[2]
else:
mean_dec_t,mean_inc_t="",""
meanrec={}
for key in list(duplicates[0].keys()):
if key in ['dec_s','inc_s','dec_g','inc_g','dec_t','inc_t','moment']:
continue
else:
meanrec[key]=duplicates[0][key]
meanrec['dec_s']=mean_dec_s
meanrec['dec_g']=mean_dec_g
meanrec['dec_t']=mean_dec_t
meanrec['inc_s']=mean_inc_s
meanrec['inc_g']=mean_inc_g
meanrec['inc_t']=mean_inc_t
meanrec['moment']=mean_moment
return meanrec
def get_upper_level_name(name,nc):
'''
get sample/site name from specimen/sample using naming convention
'''
if float(nc[0])==0:
if float(nc[1])!=0:
number_of_char=int(nc[1])
high_name=name[:number_of_char]
else:
high_name=name
elif float(nc[0])==1:
if float(nc[1])!=0:
number_of_char=int(nc[1])*-1
high_name=name[:number_of_char]
else:
high_name=name
elif float(nc[0])==2:
d=str(nc[1])
name_splitted=name.split(d)
if len(name_splitted)==1:
high_name=name_splitted[0]
else:
high_name=d.join(name_splitted[:-1])
else:
high_name=name
return high_name
def merge_pmag_recs(old_recs):
recs={}
recs=copy.deepcopy(old_recs)
headers=[]
for rec in recs:
for key in list(rec.keys()):
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header]=""
return recs
# initialize some variables
experiment = ''
sample_nc = [1, 0]
site_nc = [1, 0]
meas_file = "magic_measurements.txt"
labfield = 0
#--------------------------------------
# get command line arguments
#--------------------------------------
if command_line:
args=sys.argv
user=""
if "-h" in args:
print(main.__doc__)
return False
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
else:
user=""
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file=args[ind+1]
else:
samp_file="er_samples.txt"
if '-f' in args:
ind=args.index("-f")
magfile=args[ind+1]
if "-dc" in args:
ind=args.index("-dc")
labfield=float(args[ind+1])*1e-6
labfield_phi=float(args[ind+2])
labfield_theta=float(args[ind+3])
if '-exp' in args:
ind=args.index("-exp")
experiment=args[ind+1]
if "-samp" in args:
ind=args.index("-samp")
sample_nc=[]
sample_nc.append(args[ind+1])
sample_nc.append(args[ind+2])
if "-site" in args:
ind=args.index("-site")
site_nc=[]
site_nc.append(args[ind+1])
site_nc.append(args[ind+2])
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
else:
er_location_name=""
if "-A" in args:
noave=1
else:
noave=0
if "-WD" in args:
ind=args.index("-WD")
WD=args[ind+1]
os.chdir(WD)
# unpack keyword args if using as module
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
magfile = kwargs.get('magfile', '')
labfield = int(kwargs.get('labfield', 0))
if labfield:
labfield *= 1e-6
labfield_phi = int(kwargs.get('labfield_phi', 0))
labfield_theta = int(kwargs.get('labfield_theta', 0))
experiment = kwargs.get('experiment', '')
cooling_times = kwargs.get('cooling_times_list', '')
sample_nc = kwargs.get('sample_nc', [1, 0])
site_nc = kwargs.get('site_nc', [1, 0])
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # 0 is default, means do average
WD = kwargs.get('WD', '.')
#os.chdir(WD)
# format and validate variables
if magfile:
try:
input=open(magfile,'r')
except:
print("bad mag file:",magfile)
return False, "bad mag file"
else:
print("mag_file field is required option")
print(main.__doc__)
return False, "mag_file field is required option"
if not experiment:
print("-exp is required option. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see below for format), NLT")
print(main.__doc__)
return False, "-exp is required option"
if experiment=='ATRM':
if command_line:
ind=args.index("ATRM")
atrm_n_pos=int(args[ind+1])
else:
atrm_n_pos = 6
if experiment=='AARM':
if command_line:
ind=args.index("AARM")
aarm_n_pos=int(args[ind+1])
else:
aarm_n_pos = 6
if experiment=='CR':
if command_line:
ind=args.index("CR")
cooling_times=args[ind+1]
cooling_times_list=cooling_times.split(',')
# if not command line, cooling_times_list is already set
#--------------------------------------
# read data from er_samples.txt
#--------------------------------------
#if "-fsa" in args:
# ind=args.index("-fsa")
# er_sample_file=args[ind+1]
#else:
# er_sample_file="er_samples.txt"
er_sample_data={}
#er_sample_data=sort_magic_file(samp_file,1,'er_sample_name')
try:
er_sample_data=sort_magic_file(samp_file,1,'er_sample_name')
print("-I- Found er_samples.txt")
print('-I- sample information will be appended to existing er_samples.txt file')
except:
print("-I- Cant find file er_samples.txt")
print('-I- sample information will be stored in new er_samples.txt file')
#--------------------------------------
# read data from generic file
#--------------------------------------
if noave:
mag_data=read_generic_file(magfile,False)
else:
mag_data=read_generic_file(magfile,True)
#--------------------------------------
# for each specimen get the data, and translate it to MagIC format
#--------------------------------------
ErSamplesRecs=[]
MagRecs=[]
specimens_list=list(mag_data.keys())
specimens_list.sort()
for specimen in specimens_list:
measurement_running_number=0
this_specimen_treatments=[] # a list of all treatments
MagRecs_this_specimen=[]
LP_this_specimen=[] # a list of all lab protocols
IZ,ZI=0,0 # counter for IZ and ZI steps
for meas_line in mag_data[specimen]:
#------------------
# trivial MagRec data
#------------------
MagRec={}
MagRec['er_citation_names']="This study"
MagRec["er_specimen_name"]=meas_line['specimen']
MagRec["er_sample_name"]=get_upper_level_name(MagRec["er_specimen_name"],sample_nc)
MagRec["er_site_name"]=get_upper_level_name(MagRec["er_sample_name"],site_nc)
MagRec['er_location_name']=er_location_name
MagRec['er_analyst_mail_names']=user
MagRec["magic_instrument_codes"]=""
MagRec["measurement_flag"]='g'
MagRec["measurement_number"]="%i"%measurement_running_number
MagRec["measurement_magn_moment"]='%10.3e'%(float(meas_line["moment"])*1e-3) # in Am^2
MagRec["measurement_temp"]='273.' # room temp in kelvin
#------------------
# decode treatments from treatment column in the generic file
#------------------
treatment=[]
treatment_code=str(meas_line['treatment']).split(".")
treatment.append(float(treatment_code[0]))
if len(treatment_code)==1:
treatment.append(0)
else:
treatment.append(float(treatment_code[1]))
#------------------
# lab field direction
#------------------
if experiment in ['PI','NLT','CR']:
if float(treatment[1])==0:
MagRec["treatment_dc_field"]="0"
MagRec["treatment_dc_field_phi"]="0"
MagRec["treatment_dc_field_theta"]="0"
elif not labfield:
print("-W- WARNING: labfield (-dc) is a required argument for this experiment type")
return False, "labfield (-dc) is a required argument for this experiment type"
else:
MagRec["treatment_dc_field"]='%8.3e'%(float(labfield))
MagRec["treatment_dc_field_phi"]="%.2f"%(float(labfield_phi))
MagRec["treatment_dc_field_theta"]="%.2f"%(float(labfield_theta))
else:
MagRec["treatment_dc_field"]=""
MagRec["treatment_dc_field_phi"]=""
MagRec["treatment_dc_field_theta"]=""
#------------------
# treatment temperature/peak field
#------------------
if experiment == 'Demag':
if meas_line['treatment_type']=='A':
MagRec['treatment_temp']="273."
MagRec["treatment_ac_field"]="%.3e"%(treatment[0]*1e-3)
elif meas_line['treatment_type']=='N':
MagRec['treatment_temp']="273."
MagRec["treatment_ac_field"]=""
else:
MagRec['treatment_temp']="%.2f"%(treatment[0]+273.)
MagRec["treatment_ac_field"]=""
else:
MagRec['treatment_temp']="%.2f"%(treatment[0]+273.)
MagRec["treatment_ac_field"]=""
#---------------------
# Lab treatment
# Lab protocol
#---------------------
#---------------------
# Lab treatment and lab protocoal for NRM:
#---------------------
if float(meas_line['treatment'])==0:
LT="LT-NO"
LP="" # will be filled later after finishing reading all measurements line
#---------------------
# Lab treatment and lab protocoal for paleointensity experiment
#---------------------
elif experiment =='PI':
LP="LP-PI-TRM"
if treatment[1]==0:
LT="LT-T-Z"
elif treatment[1]==1 or treatment[1]==10: # infield
LT="LT-T-I"
elif treatment[1]==2 or treatment[1]==20: # pTRM check
LT="LT-PTRM-I"
LP=LP+":"+"LP-PI-ALT-PTRM"
elif treatment[1]==3 or treatment[1]==30: # Tail check
LT="LT-PTRM-MD"
LP=LP+":"+"LP-PI-BT-MD"
elif treatment[1]==4 or treatment[1]==40: # Additivity check
LT="LT-PTRM-AC"
LP=LP+":"+"LP-PI-BT-MD"
elif treatment[1]==5 or treatment[1]==50: # Thellier protocol, second infield step
LT="LT-T-I"
LP=LP+":"+"LP-PI-II"
# adjust field direction in thellier protocol
MagRec["treatment_dc_field_phi"]="%.2f"%( (float(labfield_phi) +180.0)%360. )
MagRec["treatment_dc_field_theta"]="%.2f"%( float(labfield_theta)*-1 )
else:
print("-E- unknown measurement code specimen %s treatmemt %s"%(meas_line['specimen'],meas_line['treatment']))
MagRec={}
continue
# save all treatment in a list
# we will use this later to distinguidh between ZI / IZ / and IZZI
this_specimen_treatments.append(float(meas_line['treatment']))
if LT=="LT-T-Z":
if float(treatment[0]+0.1) in this_specimen_treatments:
LP=LP+":"+"LP-PI-IZ"
if LT=="LT-T-I":
if float(treatment[0]+0.0) in this_specimen_treatments:
LP=LP+":"+"LP-PI-ZI"
#---------------------
# Lab treatment and lab protocoal for demag experiment
#---------------------
elif "Demag" in experiment:
if meas_line['treatment_type']=='A':
LT="LT-AF-Z"
LP="LP-DIR-AF"
else:
LT="LT-T-Z"
LP="LP-DIR-T"
#---------------------
# Lab treatment and lab protocoal for ATRM experiment
#---------------------
elif experiment in ['ATRM','AARM']:
if experiment=='ATRM':
LP="LP-AN-TRM"
n_pos=atrm_n_pos
if n_pos!=6:
print("the program does not support ATRM in %i position."%n_pos)
continue
if experiment=='AARM':
#MagRec['treatment_temp']="273."
#MagRec["treatment_ac_field"]=""
LP="LP-AN-ARM"
n_pos=aarm_n_pos
if n_pos!=6:
print("the program does not support AARM in %i position."%n_pos)
continue
if treatment[1]==0:
if experiment=='ATRM':
LT="LT-T-Z"
MagRec['treatment_temp']="%.2f"%(treatment[0]+273.)
MagRec["treatment_ac_field"]=""
else:
LT="LT-AF-Z"
MagRec['treatment_temp']="273."
MagRec["treatment_ac_field"]="%.3e"%(treatment[0]*1e-3)
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
if experiment=='ATRM':
if float(treatment[1])==70 or float(treatment[1])==7: # alteration check as final measurement
LT="LT-PTRM-I"
else:
LT="LT-T-I"
else:
LT="LT-AF-I"
MagRec["treatment_dc_field"]='%8.3e'%(float(labfield))
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
tdec=[0,90,0,180,270,0,0,90,0]
tinc=[0,0,90,0,0,-90,0,0,90]
if treatment[1] < 10:
ipos_code=int(treatment[1])-1
else:
ipos_code=int(old_div(treatment[1],10))-1
# (2) using the magnetization
if meas_line["dec_s"]!="":
DEC=float(meas_line["dec_s"])
INC=float(meas_line["inc_s"])
elif meas_line["dec_g"]!="":
DEC=float(meas_line["dec_g"])
INC=float(meas_line["inc_g"])
elif meas_line["dec_t"]!="":
DEC=float(meas_line["dec_t"])
INC=float(meas_line["inc_t"])
if DEC<0 and DEC>-359:
DEC=360.+DEC
if INC < 45 and INC > -45:
if DEC>315 or DEC<45: ipos_guess=0
if DEC>45 and DEC<135: ipos_guess=1
if DEC>135 and DEC<225: ipos_guess=3
if DEC>225 and DEC<315: ipos_guess=4
else:
if INC >45: ipos_guess=2
if INC <-45: ipos_guess=5
# prefer the guess over the code
ipos=ipos_guess
# check it
if treatment[1]!= 7 and treatment[1]!= 70:
if ipos_guess!=ipos_code:
print("-W- WARNING: check specimen %s step %s, anistropy measurements, coding does not match the direction of the lab field"%(specimen,meas_line['treatment']))
MagRec["treatment_dc_field_phi"]='%7.1f' %(tdec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (tinc[ipos])
#---------------------
# Lab treatment and lab protocoal for cooling rate experiment
#---------------------
elif experiment == "CR":
cooling_times_list
LP="LP-CR-TRM"
MagRec["treatment_temp"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin
if treatment[1]==0:
LT="LT-T-Z"
MagRec["treatment_dc_field"]="0"
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
if treatment[1]==7: # alteration check as final measurement
LT="LT-PTRM-I"
else:
LT="LT-T-I"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f' % (labfield_phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (labfield_theta) # labfield theta
indx=int(treatment[1])-1
# alteration check matjed as 0.7 in the measurement file
if indx==6:
cooling_time= cooling_times_list[-1]
else:
cooling_time=cooling_times_list[indx]
MagRec["measurement_description"]="cooling_rate"+":"+cooling_time+":"+"K/min"
#---------------------
# Lab treatment and lab protocoal for NLT experiment
#---------------------
elif 'NLT' in experiment :
print("Dont support yet NLT rate experiment file. Contact rshaar@ucsd.edu")
#---------------------
# magic_method_codes for this measurement only
# LP will be fixed after all measurement lines are read
#---------------------
MagRec["magic_method_codes"]=LT+":"+LP
#---------------------
# Demag experiments only:
# search if orientation data exists in er_samples.txt
# if not: create one and save
#---------------------
# see if core azimuth and tilt-corrected data are in er_samples.txt
sample=MagRec["er_sample_name"]
found_sample_azimuth,found_sample_dip,found_sample_bed_dip_direction,found_sample_bed_dip=False,False,False,False
if sample in list(er_sample_data.keys()):
if "sample_azimuth" in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_azimuth'] !="":
sample_azimuth=float(er_sample_data[sample]['sample_azimuth'])
found_sample_azimuth=True
if "sample_dip" in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_dip']!="":
sample_dip=float(er_sample_data[sample]['sample_dip'])
found_sample_dip=True
if "sample_bed_dip_direction" in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_bed_dip_direction']!="":
sample_bed_dip_direction=float(er_sample_data[sample]['sample_bed_dip_direction'])
found_sample_bed_dip_direction=True
if "sample_bed_dip" in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_bed_dip']!="":
sample_bed_dip=float(er_sample_data[sample]['sample_bed_dip'])
found_sample_bed_dip=True
else:
er_sample_data[sample]={}
#--------------------
# deal with specimen orientation and different coordinate system
#--------------------
found_s,found_geo,found_tilt=False,False,False
if "dec_s" in list(meas_line.keys()) and "inc_s" in list(meas_line.keys()):
if meas_line["dec_s"]!="" and meas_line["inc_s"]!="":
found_s=True
MagRec["measurement_dec"]=meas_line["dec_s"]
MagRec["measurement_inc"]=meas_line["inc_s"]
if "dec_g" in list(meas_line.keys()) and "inc_g" in list(meas_line.keys()):
if meas_line["dec_g"]!="" and meas_line["inc_g"]!="":
found_geo=True
if "dec_t" in list(meas_line.keys()) and "inc_t" in list(meas_line.keys()):
if meas_line["dec_t"]!="" and meas_line["inc_t"]!="":
found_tilt=True
#-----------------------------
# specimen coordinates: no
# geographic coordinates: yes
#-----------------------------
if found_geo and not found_s:
MagRec["measurement_dec"]=meas_line["dec_g"]
MagRec["measurement_inc"]=meas_line["inc_g"]
# core azimuth/plunge is not in er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
er_sample_data[sample]['sample_azimuth']="0"
er_sample_data[sample]['sample_dip']="0"
# core azimuth/plunge is in er_samples.txt
else:
sample_azimuth=float(er_sample_data[sample]['sample_azimuth'])
sample_dip=float(er_sample_data[sample]['sample_dip'])
if sample_azimuth!=0 and sample_dip!=0:
print("-W- WARNING: delete core azimuth/plunge in er_samples.txt\n\
becasue dec_s and inc_s are unavaialable")
#-----------------------------
# specimen coordinates: no
# geographic coordinates: no
#-----------------------------
if not found_geo and not found_s:
print("-E- ERROR: sample %s does not have dec_s/inc_s or dec_g/inc_g. Ignore specimen %s "%(sample,specimen))
break
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: yes
#
# commant: Ron, this need to be tested !!
#-----------------------------
if found_geo and found_s:
cdec,cinc=float(meas_line["dec_s"]),float(meas_line["inc_s"])
gdec,ginc=float(meas_line["dec_g"]),float(meas_line["inc_g"])
az,pl=pmag.get_azpl(cdec,cinc,gdec,ginc)
# core azimuth/plunge is not in er_samples.txt:
# calculate core az/pl and add it to er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
er_sample_data[sample]['sample_azimuth']="%.1f"%az
er_sample_data[sample]['sample_dip']="%.1f"%pl
# core azimuth/plunge is in er_samples.txt
else:
if float(er_sample_data[sample]['sample_azimuth'])!= az:
print("-E- ERROR in sample_azimuth sample %s. Check it! using the value in er_samples.txt"%sample)
if float(er_sample_data[sample]['sample_dip'])!= pl:
print("-E- ERROR in sample_dip sample %s. Check it! using the value in er_samples.txt"%sample)
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: no
#-----------------------------
if not found_geo and found_s:
if found_sample_dip and found_sample_azimuth:
pass
# (nothing to do)
else:
if "Demag" in experiment:
print("-W- WARNING: missing sample_dip or sample_azimuth for sample %s"%sample)
#-----------------------------
# tilt-corrected coordinates: yes
# geographic coordinates: no
#-----------------------------
if found_tilt and not found_geo:
print("-E- ERROR: missing geographic data for sample %s. Ignoring tilt-corrected data "%sample)
if found_tilt and found_geo:
dec_geo,inc_geo=float(meas_line["dec_g"]),float(meas_line["inc_g"])
dec_tilt,inc_tilt=float(meas_line["dec_t"]),float(meas_line["inc_t"])
if dec_geo==dec_tilt and inc_geo==inc_tilt:
DipDir,Dip=0.,0.
else:
DipDir,Dip=pmag.get_tilt(dec_geo,inc_geo,dec_tilt,inc_tilt)
if not found_sample_bed_dip_direction or not found_sample_bed_dip:
print("-I- calculating dip and dip direction used for tilt correction sample %s. results are put in er_samples.txt"%sample)
er_sample_data[sample]['sample_bed_dip_direction']="%.1f"%DipDir
er_sample_data[sample]['sample_bed_dip']="%.1f"%Dip
#-----------------------------
# er_samples method codes
# geographic coordinates: no
#-----------------------------
if found_tilt or found_geo:
er_sample_data[sample]['magic_method_codes']="SO-NO"
#-----------------
# er_samples_data
#-----------------
if sample in list(er_sample_data.keys()):
er_sample_data[sample]['er_sample_name']=sample
er_sample_data[sample]['er_site_name']=MagRec["er_site_name"]
er_sample_data[sample]['er_location_name']=MagRec["er_location_name"]
#MagRec["magic_method_codes"]=LT
MagRecs_this_specimen.append(MagRec)
#if LP!="" and LP not in LP_this_specimen:
# LP_this_specimen.append(LP)
measurement_running_number+=1
#-------
#-------
# after reading all the measurements lines for this specimen
# 1) add magic_experiment_name
# 2) fix magic_method_codes with the correct lab protocol
#-------
LP_this_specimen=[]
for MagRec in MagRecs_this_specimen:
magic_method_codes=MagRec["magic_method_codes"].split(":")
for code in magic_method_codes:
if "LP" in code and code not in LP_this_specimen:
LP_this_specimen.append(code)
# check IZ/ZI/IZZI
if "LP-PI-ZI" in LP_this_specimen and "LP-PI-IZ" in LP_this_specimen:
LP_this_specimen.remove("LP-PI-ZI")
LP_this_specimen.remove("LP-PI-IZ")
LP_this_specimen.append("LP-PI-BT-IZZI")
# add the right LP codes and fix experiment name
for MagRec in MagRecs_this_specimen:
MagRec["magic_experiment_name"]=MagRec["er_specimen_name"]+":"+":".join(LP_this_specimen)
magic_method_codes=MagRec["magic_method_codes"].split(":")
LT=""
for code in magic_method_codes:
if code[:3]=="LT-":
LT=code;
break
MagRec["magic_method_codes"]=LT+":"+":".join(LP_this_specimen)
MagRecs.append(MagRec)
#--
# write magic_measurements.txt
#--
MagRecs_fixed=merge_pmag_recs(MagRecs)
pmag.magic_write(meas_file,MagRecs_fixed,'magic_measurements')
print("-I- MagIC file is saved in %s"%meas_file)
#--
# write er_samples.txt
#--
ErSamplesRecs=[]
samples=list(er_sample_data.keys())
samples.sort()
for sample in samples:
ErSamplesRecs.append(er_sample_data[sample])
ErSamplesRecs_fixed=merge_pmag_recs(ErSamplesRecs)
pmag.magic_write(samp_file,ErSamplesRecs_fixed,'er_samples')
return True, meas_file | def function[main, parameter[command_line]]:
constant[
NAME
generic_magic.py
DESCRIPTION
converts magnetometer files in generic format to MagIC measurements format
SYNTAX
generic_magic.py [command line options]
OPTIONS
-h
prints the help message and quits.
-usr USER
identify user, default is ""
-f FILE:
specify path to input file, required
-fsa SAMPFILE:
specify the samples file for sample orientation data. default is er_samples.txt
-F FILE
specify output file, default is magic_measurements.txt
-Fsa FILE
specify output file, default is er_samples.txt
-exp EXPERIMENT-TYPE
Demag:
AF and/or Thermal
PI:
paleointenisty thermal experiment (ZI/IZ/IZZI/TT)
ATRM n:
ATRM in n positions (n=6)
AARM n:
AARM in n positions
CR:
cooling rate experiment
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,.....xx -A
where xx, yyy,zzz...xxx are cooling rates in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
No need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate meaurements in the file
NLT:
non-linear-TRM experiment
-samp X Y
specimen-sample naming convention.
X determines which kind of convention (initial characters, terminal characters, or delimiter
Y determines how many characters to remove to go from specimen --> sample OR which delimiter to use
X=0 Y=n: specimen is distinguished from sample by n initial characters.
(example: "generic_magic.py -samp 0 4"
if n=4 then and specimen = mgf13a then sample = mgf13)
X=1 Y=n: specimen is distiguished from sample by n terminate characters.
(example: "generic_magic.py -samp 1 1)
if n=1 then and specimen = mgf13a then sample = mgf13)
X=2 Y=c: specimen is distinguishing from sample by a delimiter.
(example: "generic_magic.py -samp 2 -"
if c=- then and specimen = mgf13-a then sample = mgf13)
default: sample is the same as specimen name
-site X Y
sample-site naming convention.
X determines which kind of convention (initial characters, terminal characters, or delimiter
Y determines how many characters to remove to go from sample --> site OR which delimiter to use
X=0 Y=n: sample is distiguished from site by n initial characters.
(example: "generic_magic.py --site 0 3"
if n=3 then and sample = mgf13 then sample = mgf)
X=1 Y=n: sample is distiguished from site by n terminate characters.
(example: "generic_magic.py --site 1 2"
if n=2 and sample = mgf13 then site = mgf)
X=2 Y=c: specimen is distiguishing from sample by a delimiter.
(example: "generic_magic.py -site 2 -"
if c='-' and sample = 'mgf-13' then site = mgf)
default: site name is the same as sample name
-loc LOCNAM
specify location/study name.
-dc B PHI THETA:
B: dc lab field (in micro tesla)
PHI (declination). takes numbers from 0 to 360
THETA (inclination). takes numbers from -90 to 90
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment.
-A: don't average replicate measurements. Take the last measurement from replicate measurements.
-WD working directory
INPUT
A generic file is a tab-delimited file. Each column should have a header.
The file must include the following headers (the order of the columns is not important):
specimen
string specifying specimen name
treatment:
a number with one or two decimal point (X.Y)
coding for thermal demagnetization:
0.0 or 0 is NRM.
X is temperature in celsius
Y is always 0
coding for AF demagnetization:
0.0 or 0 is NRM.
X is AF peak field in mT
Y is always 0
coding for Thellier-type experiment:
0.0 or 0 is NRM
X is temperature in celsius
Y=0: zerofield
Y=1: infield (IZZI, IZ, ZI, and Thellier protocol- first infield)
Y=2: pTRM check
Y=3: pTRM tail check
Y=4: Additivity check
Y=5: Thellier protocol: second infield
coding for ATRM experiment (6 poitions):
X is temperature in celsius
Y=0: zerofield baseline to be subtracted
Y=1: +x
Y=2: -x
Y=3: +y
Y=4: -y
Y=5: +z
Y=6: -z
Y=7: alteration check
coding for NLT experiment:
X is temperature in celsius
Y=0: zerofield baseline to be subtracted
Y!=0: oven field in microT
coding for CR experiment:
see "OPTIONS" list above
treatment_type:
N: NRM
A: AF
T: Thermal
moment:
magnetic moment in emu !!
In addition, at least one of the following headers are required:
dec_s:
declination in specimen coordinate system (0 to 360)
inc_s:
inclination in specimen coordinate system (-90 to 90)
dec_g:
declination in geographic coordinate system (0 to 360)
inc_g:
inclination in geographic coordinate system (-90 to 90)
dec_t:
declination in tilt-corrected coordinate system (0 to 360)
inc_t:
inclination in tilt-corrected coordinate system (-90 to 90)
]
def function[sort_magic_file, parameter[path, ignore_lines_n, sort_by_this_name]]:
constant[
reads a file with headers. Each line is stored as a dictionary following the headers.
Lines are sorted in DATA by the sort_by_this_name header
DATA[sort_by_this_name]=[dictionary1,dictionary2,...]
]
variable[DATA] assign[=] dictionary[[], []]
variable[fin] assign[=] call[name[open], parameter[name[path], constant[r]]]
for taget[name[i]] in starred[call[name[range], parameter[name[ignore_lines_n]]]] begin[:]
call[name[fin].readline, parameter[]]
variable[line] assign[=] call[name[fin].readline, parameter[]]
variable[header] assign[=] call[call[name[line].strip, parameter[constant[
]]].split, parameter[constant[ ]]]
for taget[name[line]] in starred[call[name[fin].readlines, parameter[]]] begin[:]
if compare[call[name[line]][constant[0]] equal[==] constant[#]] begin[:]
continue
variable[tmp_data] assign[=] dictionary[[], []]
variable[tmp_line] assign[=] call[call[name[line].strip, parameter[constant[
]]].split, parameter[constant[ ]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[tmp_line]]]]]] begin[:]
if compare[name[i] greater_or_equal[>=] call[name[len], parameter[name[header]]]] begin[:]
continue
call[name[tmp_data]][call[name[header]][name[i]]] assign[=] call[name[tmp_line]][name[i]]
call[name[DATA]][call[name[tmp_data]][name[sort_by_this_name]]] assign[=] name[tmp_data]
call[name[fin].close, parameter[]]
return[name[DATA]]
def function[read_generic_file, parameter[path, average_replicates]]:
constant[
reads a generic file format. If average_replicates==True average replicate measurements.
Rrturns a Data dictionary with measurements line sorted by specimen
Data[specimen_name][dict1,dict2,...]
]
variable[Data] assign[=] dictionary[[], []]
variable[Fin] assign[=] call[name[open], parameter[name[path], constant[r]]]
variable[header] assign[=] call[call[call[name[Fin].readline, parameter[]].strip, parameter[constant[
]]].split, parameter[constant[ ]]]
variable[duplicates] assign[=] list[[]]
for taget[name[line]] in starred[call[name[Fin].readlines, parameter[]]] begin[:]
variable[tmp_data] assign[=] dictionary[[], []]
variable[l] assign[=] call[call[name[line].strip, parameter[constant[
]]].split, parameter[constant[ ]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[min], parameter[call[name[len], parameter[name[header]]], call[name[len], parameter[name[l]]]]]]]] begin[:]
call[name[tmp_data]][call[name[header]][name[i]]] assign[=] call[name[l]][name[i]]
variable[specimen] assign[=] call[name[tmp_data]][constant[specimen]]
if compare[name[specimen] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[Data].keys, parameter[]]]]] begin[:]
call[name[Data]][name[specimen]] assign[=] list[[]]
call[call[name[Data]][name[specimen]].append, parameter[name[tmp_data]]]
for taget[name[specimen]] in starred[call[name[list], parameter[call[name[Data].keys, parameter[]]]]] begin[:]
variable[x] assign[=] binary_operation[call[name[len], parameter[call[name[Data]][name[specimen]]]] - constant[1]]
variable[new_data] assign[=] list[[]]
variable[duplicates] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[x]]]] begin[:]
while <ast.BoolOp object at 0x7da1b0405f60> begin[:]
call[name[duplicates].append, parameter[call[call[name[Data]][name[specimen]]][name[i]]]]
<ast.Delete object at 0x7da1b0407760>
if compare[call[name[len], parameter[name[duplicates]]] greater[>] constant[0]] begin[:]
if name[average_replicates] begin[:]
call[name[duplicates].append, parameter[call[call[name[Data]][name[specimen]]][binary_operation[name[i] - constant[1]]]]]
call[call[name[Data]][name[specimen]]][binary_operation[name[i] - constant[1]]] assign[=] call[name[average_duplicates], parameter[name[duplicates]]]
call[name[print], parameter[binary_operation[constant[-W- WARNING: averaging %i duplicates for specimen %s treatmant %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0407280>, <ast.Name object at 0x7da1b0406ec0>, <ast.Subscript object at 0x7da1b0406e30>]]]]]
variable[duplicates] assign[=] list[[]]
if compare[name[i] equal[==] binary_operation[call[name[len], parameter[call[name[Data]][name[specimen]]]] - constant[1]]] begin[:]
break
return[name[Data]]
def function[average_duplicates, parameter[duplicates]]:
constant[
avarage replicate measurements.
]
<ast.Tuple object at 0x7da1b04a7610> assign[=] tuple[[<ast.List object at 0x7da1b04a7520>, <ast.List object at 0x7da1b04a74f0>, <ast.List object at 0x7da1b04a74c0>]]
for taget[name[rec]] in starred[name[duplicates]] begin[:]
variable[moment] assign[=] call[name[float], parameter[call[name[rec]][constant[moment]]]]
if <ast.BoolOp object at 0x7da1b04a7280> begin[:]
if <ast.BoolOp object at 0x7da1b04a6f80> begin[:]
variable[dec_s] assign[=] call[name[float], parameter[call[name[rec]][constant[dec_s]]]]
variable[inc_s] assign[=] call[name[float], parameter[call[name[rec]][constant[inc_s]]]]
variable[cart_s] assign[=] call[name[pmag].dir2cart, parameter[list[[<ast.Name object at 0x7da1b04a69b0>, <ast.Name object at 0x7da1b04a6980>, <ast.Name object at 0x7da1b04a6950>]]]]
call[name[carts_s].append, parameter[name[cart_s]]]
if <ast.BoolOp object at 0x7da1b04a67d0> begin[:]
if <ast.BoolOp object at 0x7da1b04a64d0> begin[:]
variable[dec_g] assign[=] call[name[float], parameter[call[name[rec]][constant[dec_g]]]]
variable[inc_g] assign[=] call[name[float], parameter[call[name[rec]][constant[inc_g]]]]
variable[cart_g] assign[=] call[name[pmag].dir2cart, parameter[list[[<ast.Name object at 0x7da1b04a5f00>, <ast.Name object at 0x7da1b04a5ed0>, <ast.Name object at 0x7da1b04a5ea0>]]]]
call[name[carts_g].append, parameter[name[cart_g]]]
if <ast.BoolOp object at 0x7da1b04a5d20> begin[:]
if <ast.BoolOp object at 0x7da1b04a5a20> begin[:]
variable[dec_t] assign[=] call[name[float], parameter[call[name[rec]][constant[dec_t]]]]
variable[inc_t] assign[=] call[name[float], parameter[call[name[rec]][constant[inc_t]]]]
variable[cart_t] assign[=] call[name[pmag].dir2cart, parameter[list[[<ast.Name object at 0x7da1b04a5450>, <ast.Name object at 0x7da1b04a5420>, <ast.Name object at 0x7da1b04a53f0>]]]]
call[name[carts_t].append, parameter[name[cart_t]]]
if compare[call[name[len], parameter[name[carts_s]]] greater[>] constant[0]] begin[:]
variable[carts] assign[=] call[name[scipy].array, parameter[name[carts_s]]]
variable[x_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04a4ee0>, <ast.Constant object at 0x7da1b04a4eb0>]]]]]
variable[y_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04a4d00>, <ast.Constant object at 0x7da1b04a4cd0>]]]]]
variable[z_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04a4b20>, <ast.Constant object at 0x7da1b04a4af0>]]]]]
variable[mean_dir] assign[=] call[name[pmag].cart2dir, parameter[list[[<ast.Name object at 0x7da1b04a49a0>, <ast.Name object at 0x7da1b04a4970>, <ast.Name object at 0x7da1b04a4940>]]]]
variable[mean_dec_s] assign[=] binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[0]]]
variable[mean_inc_s] assign[=] binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[1]]]
variable[mean_moment] assign[=] binary_operation[constant[%10.3e] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[2]]]
if compare[call[name[len], parameter[name[carts_g]]] greater[>] constant[0]] begin[:]
variable[carts] assign[=] call[name[scipy].array, parameter[name[carts_g]]]
variable[x_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b0473fd0>, <ast.Constant object at 0x7da1b0473fa0>]]]]]
variable[y_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b0473df0>, <ast.Constant object at 0x7da1b0473dc0>]]]]]
variable[z_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04cb6a0>, <ast.Constant object at 0x7da1b04cb670>]]]]]
variable[mean_dir] assign[=] call[name[pmag].cart2dir, parameter[list[[<ast.Name object at 0x7da1b04cb520>, <ast.Name object at 0x7da1b04cb4f0>, <ast.Name object at 0x7da1b04cb4c0>]]]]
variable[mean_dec_g] assign[=] binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[0]]]
variable[mean_inc_g] assign[=] binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[1]]]
variable[mean_moment] assign[=] binary_operation[constant[%10.3e] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[2]]]
if compare[call[name[len], parameter[name[carts_t]]] greater[>] constant[0]] begin[:]
variable[carts] assign[=] call[name[scipy].array, parameter[name[carts_t]]]
variable[x_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04cab90>, <ast.Constant object at 0x7da1b04cab60>]]]]]
variable[y_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04ca9b0>, <ast.Constant object at 0x7da1b04ca980>]]]]]
variable[z_mean] assign[=] call[name[scipy].mean, parameter[call[name[carts]][tuple[[<ast.Slice object at 0x7da1b04ca7d0>, <ast.Constant object at 0x7da1b04ca7a0>]]]]]
variable[mean_dir] assign[=] call[name[pmag].cart2dir, parameter[list[[<ast.Name object at 0x7da1b04ca650>, <ast.Name object at 0x7da1b04ca620>, <ast.Name object at 0x7da1b04ca5f0>]]]]
variable[mean_dec_t] assign[=] binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[0]]]
variable[mean_inc_t] assign[=] binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[1]]]
variable[mean_moment] assign[=] binary_operation[constant[%10.3e] <ast.Mod object at 0x7da2590d6920> call[name[mean_dir]][constant[2]]]
variable[meanrec] assign[=] dictionary[[], []]
for taget[name[key]] in starred[call[name[list], parameter[call[call[name[duplicates]][constant[0]].keys, parameter[]]]]] begin[:]
if compare[name[key] in list[[<ast.Constant object at 0x7da1b04c9d80>, <ast.Constant object at 0x7da1b04c9d50>, <ast.Constant object at 0x7da1b04c9d20>, <ast.Constant object at 0x7da1b04c9cf0>, <ast.Constant object at 0x7da1b04c9cc0>, <ast.Constant object at 0x7da1b04c9c90>, <ast.Constant object at 0x7da1b04c9c60>]]] begin[:]
continue
call[name[meanrec]][constant[dec_s]] assign[=] name[mean_dec_s]
call[name[meanrec]][constant[dec_g]] assign[=] name[mean_dec_g]
call[name[meanrec]][constant[dec_t]] assign[=] name[mean_dec_t]
call[name[meanrec]][constant[inc_s]] assign[=] name[mean_inc_s]
call[name[meanrec]][constant[inc_g]] assign[=] name[mean_inc_g]
call[name[meanrec]][constant[inc_t]] assign[=] name[mean_inc_t]
call[name[meanrec]][constant[moment]] assign[=] name[mean_moment]
return[name[meanrec]]
def function[get_upper_level_name, parameter[name, nc]]:
constant[
get sample/site name from specimen/sample using naming convention
]
if compare[call[name[float], parameter[call[name[nc]][constant[0]]]] equal[==] constant[0]] begin[:]
if compare[call[name[float], parameter[call[name[nc]][constant[1]]]] not_equal[!=] constant[0]] begin[:]
variable[number_of_char] assign[=] call[name[int], parameter[call[name[nc]][constant[1]]]]
variable[high_name] assign[=] call[name[name]][<ast.Slice object at 0x7da1b04c8ca0>]
return[name[high_name]]
def function[merge_pmag_recs, parameter[old_recs]]:
variable[recs] assign[=] dictionary[[], []]
variable[recs] assign[=] call[name[copy].deepcopy, parameter[name[old_recs]]]
variable[headers] assign[=] list[[]]
for taget[name[rec]] in starred[name[recs]] begin[:]
for taget[name[key]] in starred[call[name[list], parameter[call[name[rec].keys, parameter[]]]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:]
call[name[headers].append, parameter[name[key]]]
for taget[name[rec]] in starred[name[recs]] begin[:]
for taget[name[header]] in starred[name[headers]] begin[:]
if compare[name[header] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[rec].keys, parameter[]]]]] begin[:]
call[name[rec]][name[header]] assign[=] constant[]
return[name[recs]]
variable[experiment] assign[=] constant[]
variable[sample_nc] assign[=] list[[<ast.Constant object at 0x7da1b0576470>, <ast.Constant object at 0x7da1b0576440>]]
variable[site_nc] assign[=] list[[<ast.Constant object at 0x7da1b05764a0>, <ast.Constant object at 0x7da1b0576530>]]
variable[meas_file] assign[=] constant[magic_measurements.txt]
variable[labfield] assign[=] constant[0]
if name[command_line] begin[:]
variable[args] assign[=] name[sys].argv
variable[user] assign[=] constant[]
if compare[constant[-h] in name[args]] begin[:]
call[name[print], parameter[name[main].__doc__]]
return[constant[False]]
if compare[constant[-usr] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-usr]]]
variable[user] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-F] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-F]]]
variable[meas_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-Fsa] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-Fsa]]]
variable[samp_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-f] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-f]]]
variable[magfile] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-dc] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-dc]]]
variable[labfield] assign[=] binary_operation[call[name[float], parameter[call[name[args]][binary_operation[name[ind] + constant[1]]]]] * constant[1e-06]]
variable[labfield_phi] assign[=] call[name[float], parameter[call[name[args]][binary_operation[name[ind] + constant[2]]]]]
variable[labfield_theta] assign[=] call[name[float], parameter[call[name[args]][binary_operation[name[ind] + constant[3]]]]]
if compare[constant[-exp] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-exp]]]
variable[experiment] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-samp] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-samp]]]
variable[sample_nc] assign[=] list[[]]
call[name[sample_nc].append, parameter[call[name[args]][binary_operation[name[ind] + constant[1]]]]]
call[name[sample_nc].append, parameter[call[name[args]][binary_operation[name[ind] + constant[2]]]]]
if compare[constant[-site] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-site]]]
variable[site_nc] assign[=] list[[]]
call[name[site_nc].append, parameter[call[name[args]][binary_operation[name[ind] + constant[1]]]]]
call[name[site_nc].append, parameter[call[name[args]][binary_operation[name[ind] + constant[2]]]]]
if compare[constant[-loc] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-loc]]]
variable[er_location_name] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-A] in name[args]] begin[:]
variable[noave] assign[=] constant[1]
if compare[constant[-WD] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-WD]]]
variable[WD] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
call[name[os].chdir, parameter[name[WD]]]
if <ast.UnaryOp object at 0x7da1b047f190> begin[:]
variable[user] assign[=] call[name[kwargs].get, parameter[constant[user], constant[]]]
variable[meas_file] assign[=] call[name[kwargs].get, parameter[constant[meas_file], constant[magic_measurements.txt]]]
variable[samp_file] assign[=] call[name[kwargs].get, parameter[constant[samp_file], constant[er_samples.txt]]]
variable[magfile] assign[=] call[name[kwargs].get, parameter[constant[magfile], constant[]]]
variable[labfield] assign[=] call[name[int], parameter[call[name[kwargs].get, parameter[constant[labfield], constant[0]]]]]
if name[labfield] begin[:]
<ast.AugAssign object at 0x7da1b047cbe0>
variable[labfield_phi] assign[=] call[name[int], parameter[call[name[kwargs].get, parameter[constant[labfield_phi], constant[0]]]]]
variable[labfield_theta] assign[=] call[name[int], parameter[call[name[kwargs].get, parameter[constant[labfield_theta], constant[0]]]]]
variable[experiment] assign[=] call[name[kwargs].get, parameter[constant[experiment], constant[]]]
variable[cooling_times] assign[=] call[name[kwargs].get, parameter[constant[cooling_times_list], constant[]]]
variable[sample_nc] assign[=] call[name[kwargs].get, parameter[constant[sample_nc], list[[<ast.Constant object at 0x7da1b047e320>, <ast.Constant object at 0x7da1b047e500>]]]]
variable[site_nc] assign[=] call[name[kwargs].get, parameter[constant[site_nc], list[[<ast.Constant object at 0x7da1b047cfd0>, <ast.Constant object at 0x7da1b047c6a0>]]]]
variable[er_location_name] assign[=] call[name[kwargs].get, parameter[constant[er_location_name], constant[]]]
variable[noave] assign[=] call[name[kwargs].get, parameter[constant[noave], constant[0]]]
variable[WD] assign[=] call[name[kwargs].get, parameter[constant[WD], constant[.]]]
if name[magfile] begin[:]
<ast.Try object at 0x7da1b047e6b0>
if <ast.UnaryOp object at 0x7da1b047cfa0> begin[:]
call[name[print], parameter[constant[-exp is required option. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see below for format), NLT]]]
call[name[print], parameter[name[main].__doc__]]
return[tuple[[<ast.Constant object at 0x7da1b047ffd0>, <ast.Constant object at 0x7da1b047dde0>]]]
if compare[name[experiment] equal[==] constant[ATRM]] begin[:]
if name[command_line] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[ATRM]]]
variable[atrm_n_pos] assign[=] call[name[int], parameter[call[name[args]][binary_operation[name[ind] + constant[1]]]]]
if compare[name[experiment] equal[==] constant[AARM]] begin[:]
if name[command_line] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[AARM]]]
variable[aarm_n_pos] assign[=] call[name[int], parameter[call[name[args]][binary_operation[name[ind] + constant[1]]]]]
if compare[name[experiment] equal[==] constant[CR]] begin[:]
if name[command_line] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[CR]]]
variable[cooling_times] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
variable[cooling_times_list] assign[=] call[name[cooling_times].split, parameter[constant[,]]]
variable[er_sample_data] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b01f8490>
if name[noave] begin[:]
variable[mag_data] assign[=] call[name[read_generic_file], parameter[name[magfile], constant[False]]]
variable[ErSamplesRecs] assign[=] list[[]]
variable[MagRecs] assign[=] list[[]]
variable[specimens_list] assign[=] call[name[list], parameter[call[name[mag_data].keys, parameter[]]]]
call[name[specimens_list].sort, parameter[]]
for taget[name[specimen]] in starred[name[specimens_list]] begin[:]
variable[measurement_running_number] assign[=] constant[0]
variable[this_specimen_treatments] assign[=] list[[]]
variable[MagRecs_this_specimen] assign[=] list[[]]
variable[LP_this_specimen] assign[=] list[[]]
<ast.Tuple object at 0x7da1b01e5270> assign[=] tuple[[<ast.Constant object at 0x7da1b01e72e0>, <ast.Constant object at 0x7da1b01e7d00>]]
for taget[name[meas_line]] in starred[call[name[mag_data]][name[specimen]]] begin[:]
variable[MagRec] assign[=] dictionary[[], []]
call[name[MagRec]][constant[er_citation_names]] assign[=] constant[This study]
call[name[MagRec]][constant[er_specimen_name]] assign[=] call[name[meas_line]][constant[specimen]]
call[name[MagRec]][constant[er_sample_name]] assign[=] call[name[get_upper_level_name], parameter[call[name[MagRec]][constant[er_specimen_name]], name[sample_nc]]]
call[name[MagRec]][constant[er_site_name]] assign[=] call[name[get_upper_level_name], parameter[call[name[MagRec]][constant[er_sample_name]], name[site_nc]]]
call[name[MagRec]][constant[er_location_name]] assign[=] name[er_location_name]
call[name[MagRec]][constant[er_analyst_mail_names]] assign[=] name[user]
call[name[MagRec]][constant[magic_instrument_codes]] assign[=] constant[]
call[name[MagRec]][constant[measurement_flag]] assign[=] constant[g]
call[name[MagRec]][constant[measurement_number]] assign[=] binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> name[measurement_running_number]]
call[name[MagRec]][constant[measurement_magn_moment]] assign[=] binary_operation[constant[%10.3e] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[float], parameter[call[name[meas_line]][constant[moment]]]] * constant[0.001]]]
call[name[MagRec]][constant[measurement_temp]] assign[=] constant[273.]
variable[treatment] assign[=] list[[]]
variable[treatment_code] assign[=] call[call[name[str], parameter[call[name[meas_line]][constant[treatment]]]].split, parameter[constant[.]]]
call[name[treatment].append, parameter[call[name[float], parameter[call[name[treatment_code]][constant[0]]]]]]
if compare[call[name[len], parameter[name[treatment_code]]] equal[==] constant[1]] begin[:]
call[name[treatment].append, parameter[constant[0]]]
if compare[name[experiment] in list[[<ast.Constant object at 0x7da1b0337370>, <ast.Constant object at 0x7da1b0335de0>, <ast.Constant object at 0x7da1b0336d70>]]] begin[:]
if compare[call[name[float], parameter[call[name[treatment]][constant[1]]]] equal[==] constant[0]] begin[:]
call[name[MagRec]][constant[treatment_dc_field]] assign[=] constant[0]
call[name[MagRec]][constant[treatment_dc_field_phi]] assign[=] constant[0]
call[name[MagRec]][constant[treatment_dc_field_theta]] assign[=] constant[0]
if compare[name[experiment] equal[==] constant[Demag]] begin[:]
if compare[call[name[meas_line]][constant[treatment_type]] equal[==] constant[A]] begin[:]
call[name[MagRec]][constant[treatment_temp]] assign[=] constant[273.]
call[name[MagRec]][constant[treatment_ac_field]] assign[=] binary_operation[constant[%.3e] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[treatment]][constant[0]] * constant[0.001]]]
if compare[call[name[float], parameter[call[name[meas_line]][constant[treatment]]]] equal[==] constant[0]] begin[:]
variable[LT] assign[=] constant[LT-NO]
variable[LP] assign[=] constant[]
call[name[MagRec]][constant[magic_method_codes]] assign[=] binary_operation[binary_operation[name[LT] + constant[:]] + name[LP]]
variable[sample] assign[=] call[name[MagRec]][constant[er_sample_name]]
<ast.Tuple object at 0x7da1b03bf580> assign[=] tuple[[<ast.Constant object at 0x7da1b03bf6a0>, <ast.Constant object at 0x7da1b03bf6d0>, <ast.Constant object at 0x7da1b03bf700>, <ast.Constant object at 0x7da1b03bf730>]]
if compare[name[sample] in call[name[list], parameter[call[name[er_sample_data].keys, parameter[]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b03bf910> begin[:]
variable[sample_azimuth] assign[=] call[name[float], parameter[call[call[name[er_sample_data]][name[sample]]][constant[sample_azimuth]]]]
variable[found_sample_azimuth] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b03bfeb0> begin[:]
variable[sample_dip] assign[=] call[name[float], parameter[call[call[name[er_sample_data]][name[sample]]][constant[sample_dip]]]]
variable[found_sample_dip] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b0398490> begin[:]
variable[sample_bed_dip_direction] assign[=] call[name[float], parameter[call[call[name[er_sample_data]][name[sample]]][constant[sample_bed_dip_direction]]]]
variable[found_sample_bed_dip_direction] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b0398a30> begin[:]
variable[sample_bed_dip] assign[=] call[name[float], parameter[call[call[name[er_sample_data]][name[sample]]][constant[sample_bed_dip]]]]
variable[found_sample_bed_dip] assign[=] constant[True]
<ast.Tuple object at 0x7da1b03990c0> assign[=] tuple[[<ast.Constant object at 0x7da1b03991b0>, <ast.Constant object at 0x7da1b03991e0>, <ast.Constant object at 0x7da1b0399210>]]
if <ast.BoolOp object at 0x7da1b0399270> begin[:]
if <ast.BoolOp object at 0x7da1b0399570> begin[:]
variable[found_s] assign[=] constant[True]
call[name[MagRec]][constant[measurement_dec]] assign[=] call[name[meas_line]][constant[dec_s]]
call[name[MagRec]][constant[measurement_inc]] assign[=] call[name[meas_line]][constant[inc_s]]
if <ast.BoolOp object at 0x7da1b0399ae0> begin[:]
if <ast.BoolOp object at 0x7da1b0399de0> begin[:]
variable[found_geo] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b039a0b0> begin[:]
if <ast.BoolOp object at 0x7da1b039a3b0> begin[:]
variable[found_tilt] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b039a680> begin[:]
call[name[MagRec]][constant[measurement_dec]] assign[=] call[name[meas_line]][constant[dec_g]]
call[name[MagRec]][constant[measurement_inc]] assign[=] call[name[meas_line]][constant[inc_g]]
if <ast.BoolOp object at 0x7da1b039aa10> begin[:]
call[call[name[er_sample_data]][name[sample]]][constant[sample_azimuth]] assign[=] constant[0]
call[call[name[er_sample_data]][name[sample]]][constant[sample_dip]] assign[=] constant[0]
if <ast.BoolOp object at 0x7da1b039b3a0> begin[:]
call[name[print], parameter[binary_operation[constant[-E- ERROR: sample %s does not have dec_s/inc_s or dec_g/inc_g. Ignore specimen %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b039b5b0>, <ast.Name object at 0x7da1b039b5e0>]]]]]
break
if <ast.BoolOp object at 0x7da1b039b6d0> begin[:]
<ast.Tuple object at 0x7da1b039b790> assign[=] tuple[[<ast.Call object at 0x7da1b039b850>, <ast.Call object at 0x7da1b039b940>]]
<ast.Tuple object at 0x7da1b039ba60> assign[=] tuple[[<ast.Call object at 0x7da1b039bb20>, <ast.Call object at 0x7da1b039bc10>]]
<ast.Tuple object at 0x7da1b039bd30> assign[=] call[name[pmag].get_azpl, parameter[name[cdec], name[cinc], name[gdec], name[ginc]]]
if <ast.BoolOp object at 0x7da1b039bf40> begin[:]
call[call[name[er_sample_data]][name[sample]]][constant[sample_azimuth]] assign[=] binary_operation[constant[%.1f] <ast.Mod object at 0x7da2590d6920> name[az]]
call[call[name[er_sample_data]][name[sample]]][constant[sample_dip]] assign[=] binary_operation[constant[%.1f] <ast.Mod object at 0x7da2590d6920> name[pl]]
if <ast.BoolOp object at 0x7da1b03c8a60> begin[:]
if <ast.BoolOp object at 0x7da1b03c8b50> begin[:]
pass
if <ast.BoolOp object at 0x7da1b03c8e80> begin[:]
call[name[print], parameter[binary_operation[constant[-E- ERROR: missing geographic data for sample %s. Ignoring tilt-corrected data ] <ast.Mod object at 0x7da2590d6920> name[sample]]]]
if <ast.BoolOp object at 0x7da1b03c90c0> begin[:]
<ast.Tuple object at 0x7da1b03c9180> assign[=] tuple[[<ast.Call object at 0x7da1b03c9240>, <ast.Call object at 0x7da1b03c9330>]]
<ast.Tuple object at 0x7da1b03c9450> assign[=] tuple[[<ast.Call object at 0x7da1b03c9510>, <ast.Call object at 0x7da1b03c9600>]]
if <ast.BoolOp object at 0x7da1b03c9720> begin[:]
<ast.Tuple object at 0x7da1b03c98a0> assign[=] tuple[[<ast.Constant object at 0x7da1b03c9960>, <ast.Constant object at 0x7da1b03c9990>]]
if <ast.BoolOp object at 0x7da1b03c9c00> begin[:]
call[name[print], parameter[binary_operation[constant[-I- calculating dip and dip direction used for tilt correction sample %s. results are put in er_samples.txt] <ast.Mod object at 0x7da2590d6920> name[sample]]]]
call[call[name[er_sample_data]][name[sample]]][constant[sample_bed_dip_direction]] assign[=] binary_operation[constant[%.1f] <ast.Mod object at 0x7da2590d6920> name[DipDir]]
call[call[name[er_sample_data]][name[sample]]][constant[sample_bed_dip]] assign[=] binary_operation[constant[%.1f] <ast.Mod object at 0x7da2590d6920> name[Dip]]
if <ast.BoolOp object at 0x7da1b03ca1d0> begin[:]
call[call[name[er_sample_data]][name[sample]]][constant[magic_method_codes]] assign[=] constant[SO-NO]
if compare[name[sample] in call[name[list], parameter[call[name[er_sample_data].keys, parameter[]]]]] begin[:]
call[call[name[er_sample_data]][name[sample]]][constant[er_sample_name]] assign[=] name[sample]
call[call[name[er_sample_data]][name[sample]]][constant[er_site_name]] assign[=] call[name[MagRec]][constant[er_site_name]]
call[call[name[er_sample_data]][name[sample]]][constant[er_location_name]] assign[=] call[name[MagRec]][constant[er_location_name]]
call[name[MagRecs_this_specimen].append, parameter[name[MagRec]]]
<ast.AugAssign object at 0x7da1b03cab00>
variable[LP_this_specimen] assign[=] list[[]]
for taget[name[MagRec]] in starred[name[MagRecs_this_specimen]] begin[:]
variable[magic_method_codes] assign[=] call[call[name[MagRec]][constant[magic_method_codes]].split, parameter[constant[:]]]
for taget[name[code]] in starred[name[magic_method_codes]] begin[:]
if <ast.BoolOp object at 0x7da1b03caef0> begin[:]
call[name[LP_this_specimen].append, parameter[name[code]]]
if <ast.BoolOp object at 0x7da1b03cb190> begin[:]
call[name[LP_this_specimen].remove, parameter[constant[LP-PI-ZI]]]
call[name[LP_this_specimen].remove, parameter[constant[LP-PI-IZ]]]
call[name[LP_this_specimen].append, parameter[constant[LP-PI-BT-IZZI]]]
for taget[name[MagRec]] in starred[name[MagRecs_this_specimen]] begin[:]
call[name[MagRec]][constant[magic_experiment_name]] assign[=] binary_operation[binary_operation[call[name[MagRec]][constant[er_specimen_name]] + constant[:]] + call[constant[:].join, parameter[name[LP_this_specimen]]]]
variable[magic_method_codes] assign[=] call[call[name[MagRec]][constant[magic_method_codes]].split, parameter[constant[:]]]
variable[LT] assign[=] constant[]
for taget[name[code]] in starred[name[magic_method_codes]] begin[:]
if compare[call[name[code]][<ast.Slice object at 0x7da1b03cbd00>] equal[==] constant[LT-]] begin[:]
variable[LT] assign[=] name[code]
break
call[name[MagRec]][constant[magic_method_codes]] assign[=] binary_operation[binary_operation[name[LT] + constant[:]] + call[constant[:].join, parameter[name[LP_this_specimen]]]]
call[name[MagRecs].append, parameter[name[MagRec]]]
variable[MagRecs_fixed] assign[=] call[name[merge_pmag_recs], parameter[name[MagRecs]]]
call[name[pmag].magic_write, parameter[name[meas_file], name[MagRecs_fixed], constant[magic_measurements]]]
call[name[print], parameter[binary_operation[constant[-I- MagIC file is saved in %s] <ast.Mod object at 0x7da2590d6920> name[meas_file]]]]
variable[ErSamplesRecs] assign[=] list[[]]
variable[samples] assign[=] call[name[list], parameter[call[name[er_sample_data].keys, parameter[]]]]
call[name[samples].sort, parameter[]]
for taget[name[sample]] in starred[name[samples]] begin[:]
call[name[ErSamplesRecs].append, parameter[call[name[er_sample_data]][name[sample]]]]
variable[ErSamplesRecs_fixed] assign[=] call[name[merge_pmag_recs], parameter[name[ErSamplesRecs]]]
call[name[pmag].magic_write, parameter[name[samp_file], name[ErSamplesRecs_fixed], constant[er_samples]]]
return[tuple[[<ast.Constant object at 0x7da1b023cd90>, <ast.Name object at 0x7da1b023cdc0>]]] | keyword[def] identifier[main] ( identifier[command_line] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[sort_magic_file] ( identifier[path] , identifier[ignore_lines_n] , identifier[sort_by_this_name] ):
literal[string]
identifier[DATA] ={}
identifier[fin] = identifier[open] ( identifier[path] , literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ignore_lines_n] ):
identifier[fin] . identifier[readline] ()
identifier[line] = identifier[fin] . identifier[readline] ()
identifier[header] = identifier[line] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[line] keyword[in] identifier[fin] . identifier[readlines] ():
keyword[if] identifier[line] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[tmp_data] ={}
identifier[tmp_line] = identifier[line] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tmp_line] )):
keyword[if] identifier[i] >= identifier[len] ( identifier[header] ):
keyword[continue]
identifier[tmp_data] [ identifier[header] [ identifier[i] ]]= identifier[tmp_line] [ identifier[i] ]
identifier[DATA] [ identifier[tmp_data] [ identifier[sort_by_this_name] ]]= identifier[tmp_data]
identifier[fin] . identifier[close] ()
keyword[return] ( identifier[DATA] )
keyword[def] identifier[read_generic_file] ( identifier[path] , identifier[average_replicates] ):
literal[string]
identifier[Data] ={}
identifier[Fin] = identifier[open] ( identifier[path] , literal[string] )
identifier[header] = identifier[Fin] . identifier[readline] (). identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[duplicates] =[]
keyword[for] identifier[line] keyword[in] identifier[Fin] . identifier[readlines] ():
identifier[tmp_data] ={}
identifier[l] = identifier[line] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[min] ( identifier[len] ( identifier[header] ), identifier[len] ( identifier[l] ))):
identifier[tmp_data] [ identifier[header] [ identifier[i] ]]= identifier[l] [ identifier[i] ]
identifier[specimen] = identifier[tmp_data] [ literal[string] ]
keyword[if] identifier[specimen] keyword[not] keyword[in] identifier[list] ( identifier[Data] . identifier[keys] ()):
identifier[Data] [ identifier[specimen] ]=[]
identifier[Data] [ identifier[specimen] ]. identifier[append] ( identifier[tmp_data] )
keyword[for] identifier[specimen] keyword[in] identifier[list] ( identifier[Data] . identifier[keys] ()):
identifier[x] = identifier[len] ( identifier[Data] [ identifier[specimen] ])- literal[int]
identifier[new_data] =[]
identifier[duplicates] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[x] ):
keyword[while] identifier[i] < identifier[len] ( identifier[Data] [ identifier[specimen] ]) keyword[and] identifier[Data] [ identifier[specimen] ][ identifier[i] ][ literal[string] ]== identifier[Data] [ identifier[specimen] ][ identifier[i] - literal[int] ][ literal[string] ] keyword[and] identifier[Data] [ identifier[specimen] ][ identifier[i] ][ literal[string] ]== identifier[Data] [ identifier[specimen] ][ identifier[i] - literal[int] ][ literal[string] ]:
identifier[duplicates] . identifier[append] ( identifier[Data] [ identifier[specimen] ][ identifier[i] ])
keyword[del] ( identifier[Data] [ identifier[specimen] ][ identifier[i] ])
keyword[if] identifier[len] ( identifier[duplicates] )> literal[int] :
keyword[if] identifier[average_replicates] :
identifier[duplicates] . identifier[append] ( identifier[Data] [ identifier[specimen] ][ identifier[i] - literal[int] ])
identifier[Data] [ identifier[specimen] ][ identifier[i] - literal[int] ]= identifier[average_duplicates] ( identifier[duplicates] )
identifier[print] ( literal[string] %( identifier[len] ( identifier[duplicates] ), identifier[specimen] , identifier[duplicates] [- literal[int] ][ literal[string] ]))
identifier[duplicates] =[]
keyword[else] :
identifier[Data] [ identifier[specimen] ][ identifier[i] - literal[int] ]= identifier[duplicates] [- literal[int] ]
identifier[print] ( literal[string] %( identifier[len] ( identifier[duplicates] ), identifier[specimen] , identifier[duplicates] [- literal[int] ][ literal[string] ]))
identifier[duplicates] =[]
keyword[if] identifier[i] == identifier[len] ( identifier[Data] [ identifier[specimen] ])- literal[int] :
keyword[break]
keyword[return] ( identifier[Data] )
keyword[def] identifier[average_duplicates] ( identifier[duplicates] ):
literal[string]
identifier[carts_s] , identifier[carts_g] , identifier[carts_t] =[],[],[]
keyword[for] identifier[rec] keyword[in] identifier[duplicates] :
identifier[moment] = identifier[float] ( identifier[rec] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()) keyword[and] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()):
keyword[if] identifier[rec] [ literal[string] ]!= literal[string] keyword[and] identifier[rec] [ literal[string] ]!= literal[string] :
identifier[dec_s] = identifier[float] ( identifier[rec] [ literal[string] ])
identifier[inc_s] = identifier[float] ( identifier[rec] [ literal[string] ])
identifier[cart_s] = identifier[pmag] . identifier[dir2cart] ([ identifier[dec_s] , identifier[inc_s] , identifier[moment] ])
identifier[carts_s] . identifier[append] ( identifier[cart_s] )
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()) keyword[and] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()):
keyword[if] identifier[rec] [ literal[string] ]!= literal[string] keyword[and] identifier[rec] [ literal[string] ]!= literal[string] :
identifier[dec_g] = identifier[float] ( identifier[rec] [ literal[string] ])
identifier[inc_g] = identifier[float] ( identifier[rec] [ literal[string] ])
identifier[cart_g] = identifier[pmag] . identifier[dir2cart] ([ identifier[dec_g] , identifier[inc_g] , identifier[moment] ])
identifier[carts_g] . identifier[append] ( identifier[cart_g] )
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()) keyword[and] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()):
keyword[if] identifier[rec] [ literal[string] ]!= literal[string] keyword[and] identifier[rec] [ literal[string] ]!= literal[string] :
identifier[dec_t] = identifier[float] ( identifier[rec] [ literal[string] ])
identifier[inc_t] = identifier[float] ( identifier[rec] [ literal[string] ])
identifier[cart_t] = identifier[pmag] . identifier[dir2cart] ([ identifier[dec_t] , identifier[inc_t] , identifier[moment] ])
identifier[carts_t] . identifier[append] ( identifier[cart_t] )
keyword[if] identifier[len] ( identifier[carts_s] )> literal[int] :
identifier[carts] = identifier[scipy] . identifier[array] ( identifier[carts_s] )
identifier[x_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[y_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[z_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[mean_dir] = identifier[pmag] . identifier[cart2dir] ([ identifier[x_mean] , identifier[y_mean] , identifier[z_mean] ])
identifier[mean_dec_s] = literal[string] % identifier[mean_dir] [ literal[int] ]
identifier[mean_inc_s] = literal[string] % identifier[mean_dir] [ literal[int] ]
identifier[mean_moment] = literal[string] % identifier[mean_dir] [ literal[int] ]
keyword[else] :
identifier[mean_dec_s] , identifier[mean_inc_s] = literal[string] , literal[string]
keyword[if] identifier[len] ( identifier[carts_g] )> literal[int] :
identifier[carts] = identifier[scipy] . identifier[array] ( identifier[carts_g] )
identifier[x_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[y_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[z_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[mean_dir] = identifier[pmag] . identifier[cart2dir] ([ identifier[x_mean] , identifier[y_mean] , identifier[z_mean] ])
identifier[mean_dec_g] = literal[string] % identifier[mean_dir] [ literal[int] ]
identifier[mean_inc_g] = literal[string] % identifier[mean_dir] [ literal[int] ]
identifier[mean_moment] = literal[string] % identifier[mean_dir] [ literal[int] ]
keyword[else] :
identifier[mean_dec_g] , identifier[mean_inc_g] = literal[string] , literal[string]
keyword[if] identifier[len] ( identifier[carts_t] )> literal[int] :
identifier[carts] = identifier[scipy] . identifier[array] ( identifier[carts_t] )
identifier[x_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[y_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[z_mean] = identifier[scipy] . identifier[mean] ( identifier[carts] [:, literal[int] ])
identifier[mean_dir] = identifier[pmag] . identifier[cart2dir] ([ identifier[x_mean] , identifier[y_mean] , identifier[z_mean] ])
identifier[mean_dec_t] = literal[string] % identifier[mean_dir] [ literal[int] ]
identifier[mean_inc_t] = literal[string] % identifier[mean_dir] [ literal[int] ]
identifier[mean_moment] = literal[string] % identifier[mean_dir] [ literal[int] ]
keyword[else] :
identifier[mean_dec_t] , identifier[mean_inc_t] = literal[string] , literal[string]
identifier[meanrec] ={}
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[duplicates] [ literal[int] ]. identifier[keys] ()):
keyword[if] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[continue]
keyword[else] :
identifier[meanrec] [ identifier[key] ]= identifier[duplicates] [ literal[int] ][ identifier[key] ]
identifier[meanrec] [ literal[string] ]= identifier[mean_dec_s]
identifier[meanrec] [ literal[string] ]= identifier[mean_dec_g]
identifier[meanrec] [ literal[string] ]= identifier[mean_dec_t]
identifier[meanrec] [ literal[string] ]= identifier[mean_inc_s]
identifier[meanrec] [ literal[string] ]= identifier[mean_inc_g]
identifier[meanrec] [ literal[string] ]= identifier[mean_inc_t]
identifier[meanrec] [ literal[string] ]= identifier[mean_moment]
keyword[return] identifier[meanrec]
keyword[def] identifier[get_upper_level_name] ( identifier[name] , identifier[nc] ):
literal[string]
keyword[if] identifier[float] ( identifier[nc] [ literal[int] ])== literal[int] :
keyword[if] identifier[float] ( identifier[nc] [ literal[int] ])!= literal[int] :
identifier[number_of_char] = identifier[int] ( identifier[nc] [ literal[int] ])
identifier[high_name] = identifier[name] [: identifier[number_of_char] ]
keyword[else] :
identifier[high_name] = identifier[name]
keyword[elif] identifier[float] ( identifier[nc] [ literal[int] ])== literal[int] :
keyword[if] identifier[float] ( identifier[nc] [ literal[int] ])!= literal[int] :
identifier[number_of_char] = identifier[int] ( identifier[nc] [ literal[int] ])*- literal[int]
identifier[high_name] = identifier[name] [: identifier[number_of_char] ]
keyword[else] :
identifier[high_name] = identifier[name]
keyword[elif] identifier[float] ( identifier[nc] [ literal[int] ])== literal[int] :
identifier[d] = identifier[str] ( identifier[nc] [ literal[int] ])
identifier[name_splitted] = identifier[name] . identifier[split] ( identifier[d] )
keyword[if] identifier[len] ( identifier[name_splitted] )== literal[int] :
identifier[high_name] = identifier[name_splitted] [ literal[int] ]
keyword[else] :
identifier[high_name] = identifier[d] . identifier[join] ( identifier[name_splitted] [:- literal[int] ])
keyword[else] :
identifier[high_name] = identifier[name]
keyword[return] identifier[high_name]
keyword[def] identifier[merge_pmag_recs] ( identifier[old_recs] ):
identifier[recs] ={}
identifier[recs] = identifier[copy] . identifier[deepcopy] ( identifier[old_recs] )
identifier[headers] =[]
keyword[for] identifier[rec] keyword[in] identifier[recs] :
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()):
keyword[if] identifier[key] keyword[not] keyword[in] identifier[headers] :
identifier[headers] . identifier[append] ( identifier[key] )
keyword[for] identifier[rec] keyword[in] identifier[recs] :
keyword[for] identifier[header] keyword[in] identifier[headers] :
keyword[if] identifier[header] keyword[not] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()):
identifier[rec] [ identifier[header] ]= literal[string]
keyword[return] identifier[recs]
identifier[experiment] = literal[string]
identifier[sample_nc] =[ literal[int] , literal[int] ]
identifier[site_nc] =[ literal[int] , literal[int] ]
identifier[meas_file] = literal[string]
identifier[labfield] = literal[int]
keyword[if] identifier[command_line] :
identifier[args] = identifier[sys] . identifier[argv]
identifier[user] = literal[string]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[print] ( identifier[main] . identifier[__doc__] )
keyword[return] keyword[False]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[user] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[else] :
identifier[user] = literal[string]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[meas_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[samp_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[else] :
identifier[samp_file] = literal[string]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[magfile] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[labfield] = identifier[float] ( identifier[args] [ identifier[ind] + literal[int] ])* literal[int]
identifier[labfield_phi] = identifier[float] ( identifier[args] [ identifier[ind] + literal[int] ])
identifier[labfield_theta] = identifier[float] ( identifier[args] [ identifier[ind] + literal[int] ])
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[experiment] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[sample_nc] =[]
identifier[sample_nc] . identifier[append] ( identifier[args] [ identifier[ind] + literal[int] ])
identifier[sample_nc] . identifier[append] ( identifier[args] [ identifier[ind] + literal[int] ])
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[site_nc] =[]
identifier[site_nc] . identifier[append] ( identifier[args] [ identifier[ind] + literal[int] ])
identifier[site_nc] . identifier[append] ( identifier[args] [ identifier[ind] + literal[int] ])
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[er_location_name] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[else] :
identifier[er_location_name] = literal[string]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[noave] = literal[int]
keyword[else] :
identifier[noave] = literal[int]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[WD] = identifier[args] [ identifier[ind] + literal[int] ]
identifier[os] . identifier[chdir] ( identifier[WD] )
keyword[if] keyword[not] identifier[command_line] :
identifier[user] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[meas_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[samp_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[magfile] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[labfield] = identifier[int] ( identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ))
keyword[if] identifier[labfield] :
identifier[labfield] *= literal[int]
identifier[labfield_phi] = identifier[int] ( identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ))
identifier[labfield_theta] = identifier[int] ( identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ))
identifier[experiment] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[cooling_times] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[sample_nc] = identifier[kwargs] . identifier[get] ( literal[string] ,[ literal[int] , literal[int] ])
identifier[site_nc] = identifier[kwargs] . identifier[get] ( literal[string] ,[ literal[int] , literal[int] ])
identifier[er_location_name] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[noave] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[WD] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[magfile] :
keyword[try] :
identifier[input] = identifier[open] ( identifier[magfile] , literal[string] )
keyword[except] :
identifier[print] ( literal[string] , identifier[magfile] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[main] . identifier[__doc__] )
keyword[return] keyword[False] , literal[string]
keyword[if] keyword[not] identifier[experiment] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[main] . identifier[__doc__] )
keyword[return] keyword[False] , literal[string]
keyword[if] identifier[experiment] == literal[string] :
keyword[if] identifier[command_line] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[atrm_n_pos] = identifier[int] ( identifier[args] [ identifier[ind] + literal[int] ])
keyword[else] :
identifier[atrm_n_pos] = literal[int]
keyword[if] identifier[experiment] == literal[string] :
keyword[if] identifier[command_line] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[aarm_n_pos] = identifier[int] ( identifier[args] [ identifier[ind] + literal[int] ])
keyword[else] :
identifier[aarm_n_pos] = literal[int]
keyword[if] identifier[experiment] == literal[string] :
keyword[if] identifier[command_line] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[cooling_times] = identifier[args] [ identifier[ind] + literal[int] ]
identifier[cooling_times_list] = identifier[cooling_times] . identifier[split] ( literal[string] )
identifier[er_sample_data] ={}
keyword[try] :
identifier[er_sample_data] = identifier[sort_magic_file] ( identifier[samp_file] , literal[int] , literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[except] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[if] identifier[noave] :
identifier[mag_data] = identifier[read_generic_file] ( identifier[magfile] , keyword[False] )
keyword[else] :
identifier[mag_data] = identifier[read_generic_file] ( identifier[magfile] , keyword[True] )
identifier[ErSamplesRecs] =[]
identifier[MagRecs] =[]
identifier[specimens_list] = identifier[list] ( identifier[mag_data] . identifier[keys] ())
identifier[specimens_list] . identifier[sort] ()
keyword[for] identifier[specimen] keyword[in] identifier[specimens_list] :
identifier[measurement_running_number] = literal[int]
identifier[this_specimen_treatments] =[]
identifier[MagRecs_this_specimen] =[]
identifier[LP_this_specimen] =[]
identifier[IZ] , identifier[ZI] = literal[int] , literal[int]
keyword[for] identifier[meas_line] keyword[in] identifier[mag_data] [ identifier[specimen] ]:
identifier[MagRec] ={}
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= identifier[meas_line] [ literal[string] ]
identifier[MagRec] [ literal[string] ]= identifier[get_upper_level_name] ( identifier[MagRec] [ literal[string] ], identifier[sample_nc] )
identifier[MagRec] [ literal[string] ]= identifier[get_upper_level_name] ( identifier[MagRec] [ literal[string] ], identifier[site_nc] )
identifier[MagRec] [ literal[string] ]= identifier[er_location_name]
identifier[MagRec] [ literal[string] ]= identifier[user]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] % identifier[measurement_running_number]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[meas_line] [ literal[string] ])* literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[treatment] =[]
identifier[treatment_code] = identifier[str] ( identifier[meas_line] [ literal[string] ]). identifier[split] ( literal[string] )
identifier[treatment] . identifier[append] ( identifier[float] ( identifier[treatment_code] [ literal[int] ]))
keyword[if] identifier[len] ( identifier[treatment_code] )== literal[int] :
identifier[treatment] . identifier[append] ( literal[int] )
keyword[else] :
identifier[treatment] . identifier[append] ( identifier[float] ( identifier[treatment_code] [ literal[int] ]))
keyword[if] identifier[experiment] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[float] ( identifier[treatment] [ literal[int] ])== literal[int] :
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[elif] keyword[not] identifier[labfield] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[labfield] ))
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[labfield_phi] ))
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[labfield_theta] ))
keyword[else] :
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[if] identifier[experiment] == literal[string] :
keyword[if] identifier[meas_line] [ literal[string] ]== literal[string] :
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[treatment] [ literal[int] ]* literal[int] )
keyword[elif] identifier[meas_line] [ literal[string] ]== literal[string] :
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[else] :
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[treatment] [ literal[int] ]+ literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[else] :
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[treatment] [ literal[int] ]+ literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[if] identifier[float] ( identifier[meas_line] [ literal[string] ])== literal[int] :
identifier[LT] = literal[string]
identifier[LP] = literal[string]
keyword[elif] identifier[experiment] == literal[string] :
identifier[LP] = literal[string]
keyword[if] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
keyword[elif] identifier[treatment] [ literal[int] ]== literal[int] keyword[or] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
keyword[elif] identifier[treatment] [ literal[int] ]== literal[int] keyword[or] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
identifier[LP] = identifier[LP] + literal[string] + literal[string]
keyword[elif] identifier[treatment] [ literal[int] ]== literal[int] keyword[or] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
identifier[LP] = identifier[LP] + literal[string] + literal[string]
keyword[elif] identifier[treatment] [ literal[int] ]== literal[int] keyword[or] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
identifier[LP] = identifier[LP] + literal[string] + literal[string]
keyword[elif] identifier[treatment] [ literal[int] ]== literal[int] keyword[or] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
identifier[LP] = identifier[LP] + literal[string] + literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %(( identifier[float] ( identifier[labfield_phi] )+ literal[int] )% literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[labfield_theta] )*- literal[int] )
keyword[else] :
identifier[print] ( literal[string] %( identifier[meas_line] [ literal[string] ], identifier[meas_line] [ literal[string] ]))
identifier[MagRec] ={}
keyword[continue]
identifier[this_specimen_treatments] . identifier[append] ( identifier[float] ( identifier[meas_line] [ literal[string] ]))
keyword[if] identifier[LT] == literal[string] :
keyword[if] identifier[float] ( identifier[treatment] [ literal[int] ]+ literal[int] ) keyword[in] identifier[this_specimen_treatments] :
identifier[LP] = identifier[LP] + literal[string] + literal[string]
keyword[if] identifier[LT] == literal[string] :
keyword[if] identifier[float] ( identifier[treatment] [ literal[int] ]+ literal[int] ) keyword[in] identifier[this_specimen_treatments] :
identifier[LP] = identifier[LP] + literal[string] + literal[string]
keyword[elif] literal[string] keyword[in] identifier[experiment] :
keyword[if] identifier[meas_line] [ literal[string] ]== literal[string] :
identifier[LT] = literal[string]
identifier[LP] = literal[string]
keyword[else] :
identifier[LT] = literal[string]
identifier[LP] = literal[string]
keyword[elif] identifier[experiment] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[experiment] == literal[string] :
identifier[LP] = literal[string]
identifier[n_pos] = identifier[atrm_n_pos]
keyword[if] identifier[n_pos] != literal[int] :
identifier[print] ( literal[string] % identifier[n_pos] )
keyword[continue]
keyword[if] identifier[experiment] == literal[string] :
identifier[LP] = literal[string]
identifier[n_pos] = identifier[aarm_n_pos]
keyword[if] identifier[n_pos] != literal[int] :
identifier[print] ( literal[string] % identifier[n_pos] )
keyword[continue]
keyword[if] identifier[treatment] [ literal[int] ]== literal[int] :
keyword[if] identifier[experiment] == literal[string] :
identifier[LT] = literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[treatment] [ literal[int] ]+ literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[else] :
identifier[LT] = literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[treatment] [ literal[int] ]* literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[else] :
keyword[if] identifier[experiment] == literal[string] :
keyword[if] identifier[float] ( identifier[treatment] [ literal[int] ])== literal[int] keyword[or] identifier[float] ( identifier[treatment] [ literal[int] ])== literal[int] :
identifier[LT] = literal[string]
keyword[else] :
identifier[LT] = literal[string]
keyword[else] :
identifier[LT] = literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[labfield] ))
identifier[tdec] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[tinc] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ,- literal[int] , literal[int] , literal[int] , literal[int] ]
keyword[if] identifier[treatment] [ literal[int] ]< literal[int] :
identifier[ipos_code] = identifier[int] ( identifier[treatment] [ literal[int] ])- literal[int]
keyword[else] :
identifier[ipos_code] = identifier[int] ( identifier[old_div] ( identifier[treatment] [ literal[int] ], literal[int] ))- literal[int]
keyword[if] identifier[meas_line] [ literal[string] ]!= literal[string] :
identifier[DEC] = identifier[float] ( identifier[meas_line] [ literal[string] ])
identifier[INC] = identifier[float] ( identifier[meas_line] [ literal[string] ])
keyword[elif] identifier[meas_line] [ literal[string] ]!= literal[string] :
identifier[DEC] = identifier[float] ( identifier[meas_line] [ literal[string] ])
identifier[INC] = identifier[float] ( identifier[meas_line] [ literal[string] ])
keyword[elif] identifier[meas_line] [ literal[string] ]!= literal[string] :
identifier[DEC] = identifier[float] ( identifier[meas_line] [ literal[string] ])
identifier[INC] = identifier[float] ( identifier[meas_line] [ literal[string] ])
keyword[if] identifier[DEC] < literal[int] keyword[and] identifier[DEC] >- literal[int] :
identifier[DEC] = literal[int] + identifier[DEC]
keyword[if] identifier[INC] < literal[int] keyword[and] identifier[INC] >- literal[int] :
keyword[if] identifier[DEC] > literal[int] keyword[or] identifier[DEC] < literal[int] : identifier[ipos_guess] = literal[int]
keyword[if] identifier[DEC] > literal[int] keyword[and] identifier[DEC] < literal[int] : identifier[ipos_guess] = literal[int]
keyword[if] identifier[DEC] > literal[int] keyword[and] identifier[DEC] < literal[int] : identifier[ipos_guess] = literal[int]
keyword[if] identifier[DEC] > literal[int] keyword[and] identifier[DEC] < literal[int] : identifier[ipos_guess] = literal[int]
keyword[else] :
keyword[if] identifier[INC] > literal[int] : identifier[ipos_guess] = literal[int]
keyword[if] identifier[INC] <- literal[int] : identifier[ipos_guess] = literal[int]
identifier[ipos] = identifier[ipos_guess]
keyword[if] identifier[treatment] [ literal[int] ]!= literal[int] keyword[and] identifier[treatment] [ literal[int] ]!= literal[int] :
keyword[if] identifier[ipos_guess] != identifier[ipos_code] :
identifier[print] ( literal[string] %( identifier[specimen] , identifier[meas_line] [ literal[string] ]))
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[tdec] [ identifier[ipos] ])
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[tinc] [ identifier[ipos] ])
keyword[elif] identifier[experiment] == literal[string] :
identifier[cooling_times_list]
identifier[LP] = literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[treatment] [ literal[int] ])+ literal[int] )
keyword[if] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[else] :
keyword[if] identifier[treatment] [ literal[int] ]== literal[int] :
identifier[LT] = literal[string]
keyword[else] :
identifier[LT] = literal[string]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[labfield] )
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[labfield_phi] )
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[labfield_theta] )
identifier[indx] = identifier[int] ( identifier[treatment] [ literal[int] ])- literal[int]
keyword[if] identifier[indx] == literal[int] :
identifier[cooling_time] = identifier[cooling_times_list] [- literal[int] ]
keyword[else] :
identifier[cooling_time] = identifier[cooling_times_list] [ identifier[indx] ]
identifier[MagRec] [ literal[string] ]= literal[string] + literal[string] + identifier[cooling_time] + literal[string] + literal[string]
keyword[elif] literal[string] keyword[in] identifier[experiment] :
identifier[print] ( literal[string] )
identifier[MagRec] [ literal[string] ]= identifier[LT] + literal[string] + identifier[LP]
identifier[sample] = identifier[MagRec] [ literal[string] ]
identifier[found_sample_azimuth] , identifier[found_sample_dip] , identifier[found_sample_bed_dip_direction] , identifier[found_sample_bed_dip] = keyword[False] , keyword[False] , keyword[False] , keyword[False]
keyword[if] identifier[sample] keyword[in] identifier[list] ( identifier[er_sample_data] . identifier[keys] ()):
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[er_sample_data] [ identifier[sample] ]. identifier[keys] ()) keyword[and] identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]!= literal[string] :
identifier[sample_azimuth] = identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])
identifier[found_sample_azimuth] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[er_sample_data] [ identifier[sample] ]. identifier[keys] ()) keyword[and] identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]!= literal[string] :
identifier[sample_dip] = identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])
identifier[found_sample_dip] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[er_sample_data] [ identifier[sample] ]. identifier[keys] ()) keyword[and] identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]!= literal[string] :
identifier[sample_bed_dip_direction] = identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])
identifier[found_sample_bed_dip_direction] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[er_sample_data] [ identifier[sample] ]. identifier[keys] ()) keyword[and] identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]!= literal[string] :
identifier[sample_bed_dip] = identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])
identifier[found_sample_bed_dip] = keyword[True]
keyword[else] :
identifier[er_sample_data] [ identifier[sample] ]={}
identifier[found_s] , identifier[found_geo] , identifier[found_tilt] = keyword[False] , keyword[False] , keyword[False]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[meas_line] . identifier[keys] ()) keyword[and] literal[string] keyword[in] identifier[list] ( identifier[meas_line] . identifier[keys] ()):
keyword[if] identifier[meas_line] [ literal[string] ]!= literal[string] keyword[and] identifier[meas_line] [ literal[string] ]!= literal[string] :
identifier[found_s] = keyword[True]
identifier[MagRec] [ literal[string] ]= identifier[meas_line] [ literal[string] ]
identifier[MagRec] [ literal[string] ]= identifier[meas_line] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[meas_line] . identifier[keys] ()) keyword[and] literal[string] keyword[in] identifier[list] ( identifier[meas_line] . identifier[keys] ()):
keyword[if] identifier[meas_line] [ literal[string] ]!= literal[string] keyword[and] identifier[meas_line] [ literal[string] ]!= literal[string] :
identifier[found_geo] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[meas_line] . identifier[keys] ()) keyword[and] literal[string] keyword[in] identifier[list] ( identifier[meas_line] . identifier[keys] ()):
keyword[if] identifier[meas_line] [ literal[string] ]!= literal[string] keyword[and] identifier[meas_line] [ literal[string] ]!= literal[string] :
identifier[found_tilt] = keyword[True]
keyword[if] identifier[found_geo] keyword[and] keyword[not] identifier[found_s] :
identifier[MagRec] [ literal[string] ]= identifier[meas_line] [ literal[string] ]
identifier[MagRec] [ literal[string] ]= identifier[meas_line] [ literal[string] ]
keyword[if] keyword[not] identifier[found_sample_dip] keyword[or] keyword[not] identifier[found_sample_azimuth] :
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string]
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string]
keyword[else] :
identifier[sample_azimuth] = identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])
identifier[sample_dip] = identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])
keyword[if] identifier[sample_azimuth] != literal[int] keyword[and] identifier[sample_dip] != literal[int] :
identifier[print] ( literal[string] )
keyword[if] keyword[not] identifier[found_geo] keyword[and] keyword[not] identifier[found_s] :
identifier[print] ( literal[string] %( identifier[sample] , identifier[specimen] ))
keyword[break]
keyword[if] identifier[found_geo] keyword[and] identifier[found_s] :
identifier[cdec] , identifier[cinc] = identifier[float] ( identifier[meas_line] [ literal[string] ]), identifier[float] ( identifier[meas_line] [ literal[string] ])
identifier[gdec] , identifier[ginc] = identifier[float] ( identifier[meas_line] [ literal[string] ]), identifier[float] ( identifier[meas_line] [ literal[string] ])
identifier[az] , identifier[pl] = identifier[pmag] . identifier[get_azpl] ( identifier[cdec] , identifier[cinc] , identifier[gdec] , identifier[ginc] )
keyword[if] keyword[not] identifier[found_sample_dip] keyword[or] keyword[not] identifier[found_sample_azimuth] :
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string] % identifier[az]
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string] % identifier[pl]
keyword[else] :
keyword[if] identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])!= identifier[az] :
identifier[print] ( literal[string] % identifier[sample] )
keyword[if] identifier[float] ( identifier[er_sample_data] [ identifier[sample] ][ literal[string] ])!= identifier[pl] :
identifier[print] ( literal[string] % identifier[sample] )
keyword[if] keyword[not] identifier[found_geo] keyword[and] identifier[found_s] :
keyword[if] identifier[found_sample_dip] keyword[and] identifier[found_sample_azimuth] :
keyword[pass]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[experiment] :
identifier[print] ( literal[string] % identifier[sample] )
keyword[if] identifier[found_tilt] keyword[and] keyword[not] identifier[found_geo] :
identifier[print] ( literal[string] % identifier[sample] )
keyword[if] identifier[found_tilt] keyword[and] identifier[found_geo] :
identifier[dec_geo] , identifier[inc_geo] = identifier[float] ( identifier[meas_line] [ literal[string] ]), identifier[float] ( identifier[meas_line] [ literal[string] ])
identifier[dec_tilt] , identifier[inc_tilt] = identifier[float] ( identifier[meas_line] [ literal[string] ]), identifier[float] ( identifier[meas_line] [ literal[string] ])
keyword[if] identifier[dec_geo] == identifier[dec_tilt] keyword[and] identifier[inc_geo] == identifier[inc_tilt] :
identifier[DipDir] , identifier[Dip] = literal[int] , literal[int]
keyword[else] :
identifier[DipDir] , identifier[Dip] = identifier[pmag] . identifier[get_tilt] ( identifier[dec_geo] , identifier[inc_geo] , identifier[dec_tilt] , identifier[inc_tilt] )
keyword[if] keyword[not] identifier[found_sample_bed_dip_direction] keyword[or] keyword[not] identifier[found_sample_bed_dip] :
identifier[print] ( literal[string] % identifier[sample] )
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string] % identifier[DipDir]
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string] % identifier[Dip]
keyword[if] identifier[found_tilt] keyword[or] identifier[found_geo] :
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= literal[string]
keyword[if] identifier[sample] keyword[in] identifier[list] ( identifier[er_sample_data] . identifier[keys] ()):
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= identifier[sample]
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= identifier[MagRec] [ literal[string] ]
identifier[er_sample_data] [ identifier[sample] ][ literal[string] ]= identifier[MagRec] [ literal[string] ]
identifier[MagRecs_this_specimen] . identifier[append] ( identifier[MagRec] )
identifier[measurement_running_number] += literal[int]
identifier[LP_this_specimen] =[]
keyword[for] identifier[MagRec] keyword[in] identifier[MagRecs_this_specimen] :
identifier[magic_method_codes] = identifier[MagRec] [ literal[string] ]. identifier[split] ( literal[string] )
keyword[for] identifier[code] keyword[in] identifier[magic_method_codes] :
keyword[if] literal[string] keyword[in] identifier[code] keyword[and] identifier[code] keyword[not] keyword[in] identifier[LP_this_specimen] :
identifier[LP_this_specimen] . identifier[append] ( identifier[code] )
keyword[if] literal[string] keyword[in] identifier[LP_this_specimen] keyword[and] literal[string] keyword[in] identifier[LP_this_specimen] :
identifier[LP_this_specimen] . identifier[remove] ( literal[string] )
identifier[LP_this_specimen] . identifier[remove] ( literal[string] )
identifier[LP_this_specimen] . identifier[append] ( literal[string] )
keyword[for] identifier[MagRec] keyword[in] identifier[MagRecs_this_specimen] :
identifier[MagRec] [ literal[string] ]= identifier[MagRec] [ literal[string] ]+ literal[string] + literal[string] . identifier[join] ( identifier[LP_this_specimen] )
identifier[magic_method_codes] = identifier[MagRec] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[LT] = literal[string]
keyword[for] identifier[code] keyword[in] identifier[magic_method_codes] :
keyword[if] identifier[code] [: literal[int] ]== literal[string] :
identifier[LT] = identifier[code] ;
keyword[break]
identifier[MagRec] [ literal[string] ]= identifier[LT] + literal[string] + literal[string] . identifier[join] ( identifier[LP_this_specimen] )
identifier[MagRecs] . identifier[append] ( identifier[MagRec] )
identifier[MagRecs_fixed] = identifier[merge_pmag_recs] ( identifier[MagRecs] )
identifier[pmag] . identifier[magic_write] ( identifier[meas_file] , identifier[MagRecs_fixed] , literal[string] )
identifier[print] ( literal[string] % identifier[meas_file] )
identifier[ErSamplesRecs] =[]
identifier[samples] = identifier[list] ( identifier[er_sample_data] . identifier[keys] ())
identifier[samples] . identifier[sort] ()
keyword[for] identifier[sample] keyword[in] identifier[samples] :
identifier[ErSamplesRecs] . identifier[append] ( identifier[er_sample_data] [ identifier[sample] ])
identifier[ErSamplesRecs_fixed] = identifier[merge_pmag_recs] ( identifier[ErSamplesRecs] )
identifier[pmag] . identifier[magic_write] ( identifier[samp_file] , identifier[ErSamplesRecs_fixed] , literal[string] )
keyword[return] keyword[True] , identifier[meas_file] | def main(command_line=True, **kwargs):
"""
NAME
generic_magic.py
DESCRIPTION
converts magnetometer files in generic format to MagIC measurements format
SYNTAX
generic_magic.py [command line options]
OPTIONS
-h
prints the help message and quits.
-usr USER
identify user, default is ""
-f FILE:
specify path to input file, required
-fsa SAMPFILE:
specify the samples file for sample orientation data. default is er_samples.txt
-F FILE
specify output file, default is magic_measurements.txt
-Fsa FILE
specify output file, default is er_samples.txt
-exp EXPERIMENT-TYPE
Demag:
AF and/or Thermal
PI:
paleointenisty thermal experiment (ZI/IZ/IZZI/TT)
ATRM n:
ATRM in n positions (n=6)
AARM n:
AARM in n positions
CR:
cooling rate experiment
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,.....xx -A
where xx, yyy,zzz...xxx are cooling rates in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
No need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate meaurements in the file
NLT:
non-linear-TRM experiment
-samp X Y
specimen-sample naming convention.
X determines which kind of convention (initial characters, terminal characters, or delimiter
Y determines how many characters to remove to go from specimen --> sample OR which delimiter to use
X=0 Y=n: specimen is distinguished from sample by n initial characters.
(example: "generic_magic.py -samp 0 4"
if n=4 then and specimen = mgf13a then sample = mgf13)
X=1 Y=n: specimen is distiguished from sample by n terminate characters.
(example: "generic_magic.py -samp 1 1)
if n=1 then and specimen = mgf13a then sample = mgf13)
X=2 Y=c: specimen is distinguishing from sample by a delimiter.
(example: "generic_magic.py -samp 2 -"
if c=- then and specimen = mgf13-a then sample = mgf13)
default: sample is the same as specimen name
-site X Y
sample-site naming convention.
X determines which kind of convention (initial characters, terminal characters, or delimiter
Y determines how many characters to remove to go from sample --> site OR which delimiter to use
X=0 Y=n: sample is distiguished from site by n initial characters.
(example: "generic_magic.py --site 0 3"
if n=3 then and sample = mgf13 then sample = mgf)
X=1 Y=n: sample is distiguished from site by n terminate characters.
(example: "generic_magic.py --site 1 2"
if n=2 and sample = mgf13 then site = mgf)
X=2 Y=c: specimen is distiguishing from sample by a delimiter.
(example: "generic_magic.py -site 2 -"
if c='-' and sample = 'mgf-13' then site = mgf)
default: site name is the same as sample name
-loc LOCNAM
specify location/study name.
-dc B PHI THETA:
B: dc lab field (in micro tesla)
PHI (declination). takes numbers from 0 to 360
THETA (inclination). takes numbers from -90 to 90
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment.
-A: don't average replicate measurements. Take the last measurement from replicate measurements.
-WD working directory
INPUT
A generic file is a tab-delimited file. Each column should have a header.
The file must include the following headers (the order of the columns is not important):
specimen
string specifying specimen name
treatment:
a number with one or two decimal point (X.Y)
coding for thermal demagnetization:
0.0 or 0 is NRM.
X is temperature in celsius
Y is always 0
coding for AF demagnetization:
0.0 or 0 is NRM.
X is AF peak field in mT
Y is always 0
coding for Thellier-type experiment:
0.0 or 0 is NRM
X is temperature in celsius
Y=0: zerofield
Y=1: infield (IZZI, IZ, ZI, and Thellier protocol- first infield)
Y=2: pTRM check
Y=3: pTRM tail check
Y=4: Additivity check
Y=5: Thellier protocol: second infield
coding for ATRM experiment (6 poitions):
X is temperature in celsius
Y=0: zerofield baseline to be subtracted
Y=1: +x
Y=2: -x
Y=3: +y
Y=4: -y
Y=5: +z
Y=6: -z
Y=7: alteration check
coding for NLT experiment:
X is temperature in celsius
Y=0: zerofield baseline to be subtracted
Y!=0: oven field in microT
coding for CR experiment:
see "OPTIONS" list above
treatment_type:
N: NRM
A: AF
T: Thermal
moment:
magnetic moment in emu !!
In addition, at least one of the following headers are required:
dec_s:
declination in specimen coordinate system (0 to 360)
inc_s:
inclination in specimen coordinate system (-90 to 90)
dec_g:
declination in geographic coordinate system (0 to 360)
inc_g:
inclination in geographic coordinate system (-90 to 90)
dec_t:
declination in tilt-corrected coordinate system (0 to 360)
inc_t:
inclination in tilt-corrected coordinate system (-90 to 90)
"""
#--------------------------------------
# functions
#--------------------------------------
def sort_magic_file(path, ignore_lines_n, sort_by_this_name):
"""
reads a file with headers. Each line is stored as a dictionary following the headers.
Lines are sorted in DATA by the sort_by_this_name header
DATA[sort_by_this_name]=[dictionary1,dictionary2,...]
"""
DATA = {}
fin = open(path, 'r')
#ignore first lines
for i in range(ignore_lines_n):
fin.readline() # depends on [control=['for'], data=[]]
#header
line = fin.readline()
header = line.strip('\n').split('\t')
#print header
for line in fin.readlines():
if line[0] == '#':
continue # depends on [control=['if'], data=[]]
tmp_data = {}
tmp_line = line.strip('\n').split('\t')
#print tmp_line
for i in range(len(tmp_line)):
if i >= len(header):
continue # depends on [control=['if'], data=[]]
tmp_data[header[i]] = tmp_line[i] # depends on [control=['for'], data=['i']]
DATA[tmp_data[sort_by_this_name]] = tmp_data # depends on [control=['for'], data=['line']]
fin.close()
return DATA
def read_generic_file(path, average_replicates):
"""
reads a generic file format. If average_replicates==True average replicate measurements.
Rrturns a Data dictionary with measurements line sorted by specimen
Data[specimen_name][dict1,dict2,...]
"""
Data = {}
Fin = open(path, 'r')
header = Fin.readline().strip('\n').split('\t')
duplicates = []
for line in Fin.readlines():
tmp_data = {}
#found_duplicate=False
l = line.strip('\n').split('\t')
for i in range(min(len(header), len(l))):
tmp_data[header[i]] = l[i] # depends on [control=['for'], data=['i']]
specimen = tmp_data['specimen']
if specimen not in list(Data.keys()):
Data[specimen] = [] # depends on [control=['if'], data=['specimen']]
Data[specimen].append(tmp_data) # depends on [control=['for'], data=['line']]
# search fro duplicates
for specimen in list(Data.keys()):
x = len(Data[specimen]) - 1
new_data = []
duplicates = []
for i in range(1, x):
while i < len(Data[specimen]) and Data[specimen][i]['treatment'] == Data[specimen][i - 1]['treatment'] and (Data[specimen][i]['treatment_type'] == Data[specimen][i - 1]['treatment_type']):
duplicates.append(Data[specimen][i])
del Data[specimen][i] # depends on [control=['while'], data=[]]
if len(duplicates) > 0:
if average_replicates:
duplicates.append(Data[specimen][i - 1])
Data[specimen][i - 1] = average_duplicates(duplicates)
print('-W- WARNING: averaging %i duplicates for specimen %s treatmant %s' % (len(duplicates), specimen, duplicates[-1]['treatment']))
duplicates = [] # depends on [control=['if'], data=[]]
else:
Data[specimen][i - 1] = duplicates[-1]
print('-W- WARNING: found %i duplicates for specimen %s treatmant %s. Taking the last measurement only' % (len(duplicates), specimen, duplicates[-1]['treatment']))
duplicates = [] # depends on [control=['if'], data=[]]
if i == len(Data[specimen]) - 1:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['specimen']]
# if tmp_data['treatment']==Data[specimen][-1]['treatment'] and tmp_data['treatment_type']==Data[specimen][-1]['treatment_type']:
#
## check replicates
#if tmp_data['treatment']==Data[specimen][-1]['treatment'] and tmp_data['treatment_type']==Data[specimen][-1]['treatment_type']:
# #found_duplicate=True
# duplicates.append(Data[specimen][-1])
# duplicates.append(tmp_data)
# del(Data[specimen][-1])
# continue
#else:
# if len(duplicates)>0:
# if average_replicates:
# Data[specimen].append(average_duplicates(duplicates))
# print "-W- WARNING: averaging %i duplicates for specimen %s treatmant %s"%(len(duplicates),specimen,duplicates[-1]['treatment'])
# else:
# Data[specimen].append(duplicates[-1])
# print "-W- WARNING: found %i duplicates for specimen %s treatmant %s. Taking the last measurement only"%(len(duplicates),specimen,duplicates[-1]['treatment'])
# duplicates=[]
# Data[specimen].append(tmp_data)
return Data
def average_duplicates(duplicates):
"""
avarage replicate measurements.
"""
(carts_s, carts_g, carts_t) = ([], [], [])
for rec in duplicates:
moment = float(rec['moment'])
if 'dec_s' in list(rec.keys()) and 'inc_s' in list(rec.keys()):
if rec['dec_s'] != '' and rec['inc_s'] != '':
dec_s = float(rec['dec_s'])
inc_s = float(rec['inc_s'])
cart_s = pmag.dir2cart([dec_s, inc_s, moment])
carts_s.append(cart_s) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'dec_g' in list(rec.keys()) and 'inc_g' in list(rec.keys()):
if rec['dec_g'] != '' and rec['inc_g'] != '':
dec_g = float(rec['dec_g'])
inc_g = float(rec['inc_g'])
cart_g = pmag.dir2cart([dec_g, inc_g, moment])
carts_g.append(cart_g) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'dec_t' in list(rec.keys()) and 'inc_t' in list(rec.keys()):
if rec['dec_t'] != '' and rec['inc_t'] != '':
dec_t = float(rec['dec_t'])
inc_t = float(rec['inc_t'])
cart_t = pmag.dir2cart([dec_t, inc_t, moment])
carts_t.append(cart_t) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rec']]
if len(carts_s) > 0:
carts = scipy.array(carts_s)
x_mean = scipy.mean(carts[:, 0])
y_mean = scipy.mean(carts[:, 1])
z_mean = scipy.mean(carts[:, 2])
mean_dir = pmag.cart2dir([x_mean, y_mean, z_mean])
mean_dec_s = '%.2f' % mean_dir[0]
mean_inc_s = '%.2f' % mean_dir[1]
mean_moment = '%10.3e' % mean_dir[2] # depends on [control=['if'], data=[]]
else:
(mean_dec_s, mean_inc_s) = ('', '')
if len(carts_g) > 0:
carts = scipy.array(carts_g)
x_mean = scipy.mean(carts[:, 0])
y_mean = scipy.mean(carts[:, 1])
z_mean = scipy.mean(carts[:, 2])
mean_dir = pmag.cart2dir([x_mean, y_mean, z_mean])
mean_dec_g = '%.2f' % mean_dir[0]
mean_inc_g = '%.2f' % mean_dir[1]
mean_moment = '%10.3e' % mean_dir[2] # depends on [control=['if'], data=[]]
else:
(mean_dec_g, mean_inc_g) = ('', '')
if len(carts_t) > 0:
carts = scipy.array(carts_t)
x_mean = scipy.mean(carts[:, 0])
y_mean = scipy.mean(carts[:, 1])
z_mean = scipy.mean(carts[:, 2])
mean_dir = pmag.cart2dir([x_mean, y_mean, z_mean])
mean_dec_t = '%.2f' % mean_dir[0]
mean_inc_t = '%.2f' % mean_dir[1]
mean_moment = '%10.3e' % mean_dir[2] # depends on [control=['if'], data=[]]
else:
(mean_dec_t, mean_inc_t) = ('', '')
meanrec = {}
for key in list(duplicates[0].keys()):
if key in ['dec_s', 'inc_s', 'dec_g', 'inc_g', 'dec_t', 'inc_t', 'moment']:
continue # depends on [control=['if'], data=[]]
else:
meanrec[key] = duplicates[0][key] # depends on [control=['for'], data=['key']]
meanrec['dec_s'] = mean_dec_s
meanrec['dec_g'] = mean_dec_g
meanrec['dec_t'] = mean_dec_t
meanrec['inc_s'] = mean_inc_s
meanrec['inc_g'] = mean_inc_g
meanrec['inc_t'] = mean_inc_t
meanrec['moment'] = mean_moment
return meanrec
def get_upper_level_name(name, nc):
"""
get sample/site name from specimen/sample using naming convention
"""
if float(nc[0]) == 0:
if float(nc[1]) != 0:
number_of_char = int(nc[1])
high_name = name[:number_of_char] # depends on [control=['if'], data=[]]
else:
high_name = name # depends on [control=['if'], data=[]]
elif float(nc[0]) == 1:
if float(nc[1]) != 0:
number_of_char = int(nc[1]) * -1
high_name = name[:number_of_char] # depends on [control=['if'], data=[]]
else:
high_name = name # depends on [control=['if'], data=[]]
elif float(nc[0]) == 2:
d = str(nc[1])
name_splitted = name.split(d)
if len(name_splitted) == 1:
high_name = name_splitted[0] # depends on [control=['if'], data=[]]
else:
high_name = d.join(name_splitted[:-1]) # depends on [control=['if'], data=[]]
else:
high_name = name
return high_name
def merge_pmag_recs(old_recs):
recs = {}
recs = copy.deepcopy(old_recs)
headers = []
for rec in recs:
for key in list(rec.keys()):
if key not in headers:
headers.append(key) # depends on [control=['if'], data=['key', 'headers']] # depends on [control=['for'], data=['key']] # depends on [control=['for'], data=['rec']]
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header] = '' # depends on [control=['if'], data=['header']] # depends on [control=['for'], data=['header']] # depends on [control=['for'], data=['rec']]
return recs
# initialize some variables
experiment = ''
sample_nc = [1, 0]
site_nc = [1, 0]
meas_file = 'magic_measurements.txt'
labfield = 0
#--------------------------------------
# get command line arguments
#--------------------------------------
if command_line:
args = sys.argv
user = ''
if '-h' in args:
print(main.__doc__)
return False # depends on [control=['if'], data=[]]
if '-usr' in args:
ind = args.index('-usr')
user = args[ind + 1] # depends on [control=['if'], data=['args']]
else:
user = ''
if '-F' in args:
ind = args.index('-F')
meas_file = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-Fsa' in args:
ind = args.index('-Fsa')
samp_file = args[ind + 1] # depends on [control=['if'], data=['args']]
else:
samp_file = 'er_samples.txt'
if '-f' in args:
ind = args.index('-f')
magfile = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-dc' in args:
ind = args.index('-dc')
labfield = float(args[ind + 1]) * 1e-06
labfield_phi = float(args[ind + 2])
labfield_theta = float(args[ind + 3]) # depends on [control=['if'], data=['args']]
if '-exp' in args:
ind = args.index('-exp')
experiment = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-samp' in args:
ind = args.index('-samp')
sample_nc = []
sample_nc.append(args[ind + 1])
sample_nc.append(args[ind + 2]) # depends on [control=['if'], data=['args']]
if '-site' in args:
ind = args.index('-site')
site_nc = []
site_nc.append(args[ind + 1])
site_nc.append(args[ind + 2]) # depends on [control=['if'], data=['args']]
if '-loc' in args:
ind = args.index('-loc')
er_location_name = args[ind + 1] # depends on [control=['if'], data=['args']]
else:
er_location_name = ''
if '-A' in args:
noave = 1 # depends on [control=['if'], data=[]]
else:
noave = 0
if '-WD' in args:
ind = args.index('-WD')
WD = args[ind + 1]
os.chdir(WD) # depends on [control=['if'], data=['args']] # depends on [control=['if'], data=[]]
# unpack keyword args if using as module
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
magfile = kwargs.get('magfile', '')
labfield = int(kwargs.get('labfield', 0))
if labfield:
labfield *= 1e-06 # depends on [control=['if'], data=[]]
labfield_phi = int(kwargs.get('labfield_phi', 0))
labfield_theta = int(kwargs.get('labfield_theta', 0))
experiment = kwargs.get('experiment', '')
cooling_times = kwargs.get('cooling_times_list', '')
sample_nc = kwargs.get('sample_nc', [1, 0])
site_nc = kwargs.get('site_nc', [1, 0])
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # 0 is default, means do average
WD = kwargs.get('WD', '.') # depends on [control=['if'], data=[]]
#os.chdir(WD)
# format and validate variables
if magfile:
try:
input = open(magfile, 'r') # depends on [control=['try'], data=[]]
except:
print('bad mag file:', magfile)
return (False, 'bad mag file') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
print('mag_file field is required option')
print(main.__doc__)
return (False, 'mag_file field is required option')
if not experiment:
print('-exp is required option. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see below for format), NLT')
print(main.__doc__)
return (False, '-exp is required option') # depends on [control=['if'], data=[]]
if experiment == 'ATRM':
if command_line:
ind = args.index('ATRM')
atrm_n_pos = int(args[ind + 1]) # depends on [control=['if'], data=[]]
else:
atrm_n_pos = 6 # depends on [control=['if'], data=[]]
if experiment == 'AARM':
if command_line:
ind = args.index('AARM')
aarm_n_pos = int(args[ind + 1]) # depends on [control=['if'], data=[]]
else:
aarm_n_pos = 6 # depends on [control=['if'], data=[]]
if experiment == 'CR':
if command_line:
ind = args.index('CR')
cooling_times = args[ind + 1] # depends on [control=['if'], data=[]]
cooling_times_list = cooling_times.split(',') # depends on [control=['if'], data=[]]
# if not command line, cooling_times_list is already set
#--------------------------------------
# read data from er_samples.txt
#--------------------------------------
#if "-fsa" in args:
# ind=args.index("-fsa")
# er_sample_file=args[ind+1]
#else:
# er_sample_file="er_samples.txt"
er_sample_data = {}
#er_sample_data=sort_magic_file(samp_file,1,'er_sample_name')
try:
er_sample_data = sort_magic_file(samp_file, 1, 'er_sample_name')
print('-I- Found er_samples.txt')
print('-I- sample information will be appended to existing er_samples.txt file') # depends on [control=['try'], data=[]]
except:
print('-I- Cant find file er_samples.txt')
print('-I- sample information will be stored in new er_samples.txt file') # depends on [control=['except'], data=[]]
#--------------------------------------
# read data from generic file
#--------------------------------------
if noave:
mag_data = read_generic_file(magfile, False) # depends on [control=['if'], data=[]]
else:
mag_data = read_generic_file(magfile, True)
#--------------------------------------
# for each specimen get the data, and translate it to MagIC format
#--------------------------------------
ErSamplesRecs = []
MagRecs = []
specimens_list = list(mag_data.keys())
specimens_list.sort()
for specimen in specimens_list:
measurement_running_number = 0
this_specimen_treatments = [] # a list of all treatments
MagRecs_this_specimen = []
LP_this_specimen = [] # a list of all lab protocols
(IZ, ZI) = (0, 0) # counter for IZ and ZI steps
for meas_line in mag_data[specimen]:
#------------------
# trivial MagRec data
#------------------
MagRec = {}
MagRec['er_citation_names'] = 'This study'
MagRec['er_specimen_name'] = meas_line['specimen']
MagRec['er_sample_name'] = get_upper_level_name(MagRec['er_specimen_name'], sample_nc)
MagRec['er_site_name'] = get_upper_level_name(MagRec['er_sample_name'], site_nc)
MagRec['er_location_name'] = er_location_name
MagRec['er_analyst_mail_names'] = user
MagRec['magic_instrument_codes'] = ''
MagRec['measurement_flag'] = 'g'
MagRec['measurement_number'] = '%i' % measurement_running_number
MagRec['measurement_magn_moment'] = '%10.3e' % (float(meas_line['moment']) * 0.001) # in Am^2
MagRec['measurement_temp'] = '273.' # room temp in kelvin
#------------------
# decode treatments from treatment column in the generic file
#------------------
treatment = []
treatment_code = str(meas_line['treatment']).split('.')
treatment.append(float(treatment_code[0]))
if len(treatment_code) == 1:
treatment.append(0) # depends on [control=['if'], data=[]]
else:
treatment.append(float(treatment_code[1]))
#------------------
# lab field direction
#------------------
if experiment in ['PI', 'NLT', 'CR']:
if float(treatment[1]) == 0:
MagRec['treatment_dc_field'] = '0'
MagRec['treatment_dc_field_phi'] = '0'
MagRec['treatment_dc_field_theta'] = '0' # depends on [control=['if'], data=[]]
elif not labfield:
print('-W- WARNING: labfield (-dc) is a required argument for this experiment type')
return (False, 'labfield (-dc) is a required argument for this experiment type') # depends on [control=['if'], data=[]]
else:
MagRec['treatment_dc_field'] = '%8.3e' % float(labfield)
MagRec['treatment_dc_field_phi'] = '%.2f' % float(labfield_phi)
MagRec['treatment_dc_field_theta'] = '%.2f' % float(labfield_theta) # depends on [control=['if'], data=[]]
else:
MagRec['treatment_dc_field'] = ''
MagRec['treatment_dc_field_phi'] = ''
MagRec['treatment_dc_field_theta'] = ''
#------------------
# treatment temperature/peak field
#------------------
if experiment == 'Demag':
if meas_line['treatment_type'] == 'A':
MagRec['treatment_temp'] = '273.'
MagRec['treatment_ac_field'] = '%.3e' % (treatment[0] * 0.001) # depends on [control=['if'], data=[]]
elif meas_line['treatment_type'] == 'N':
MagRec['treatment_temp'] = '273.'
MagRec['treatment_ac_field'] = '' # depends on [control=['if'], data=[]]
else:
MagRec['treatment_temp'] = '%.2f' % (treatment[0] + 273.0)
MagRec['treatment_ac_field'] = '' # depends on [control=['if'], data=[]]
else:
MagRec['treatment_temp'] = '%.2f' % (treatment[0] + 273.0)
MagRec['treatment_ac_field'] = ''
#---------------------
# Lab treatment
# Lab protocol
#---------------------
#---------------------
# Lab treatment and lab protocoal for NRM:
#---------------------
if float(meas_line['treatment']) == 0:
LT = 'LT-NO'
LP = '' # will be filled later after finishing reading all measurements line # depends on [control=['if'], data=[]]
#---------------------
# Lab treatment and lab protocoal for paleointensity experiment
#---------------------
elif experiment == 'PI':
LP = 'LP-PI-TRM'
if treatment[1] == 0:
LT = 'LT-T-Z' # depends on [control=['if'], data=[]]
elif treatment[1] == 1 or treatment[1] == 10: # infield
LT = 'LT-T-I' # depends on [control=['if'], data=[]]
elif treatment[1] == 2 or treatment[1] == 20: # pTRM check
LT = 'LT-PTRM-I'
LP = LP + ':' + 'LP-PI-ALT-PTRM' # depends on [control=['if'], data=[]]
elif treatment[1] == 3 or treatment[1] == 30: # Tail check
LT = 'LT-PTRM-MD'
LP = LP + ':' + 'LP-PI-BT-MD' # depends on [control=['if'], data=[]]
elif treatment[1] == 4 or treatment[1] == 40: # Additivity check
LT = 'LT-PTRM-AC'
LP = LP + ':' + 'LP-PI-BT-MD' # depends on [control=['if'], data=[]]
elif treatment[1] == 5 or treatment[1] == 50: # Thellier protocol, second infield step
LT = 'LT-T-I'
LP = LP + ':' + 'LP-PI-II' # adjust field direction in thellier protocol
MagRec['treatment_dc_field_phi'] = '%.2f' % ((float(labfield_phi) + 180.0) % 360.0)
MagRec['treatment_dc_field_theta'] = '%.2f' % (float(labfield_theta) * -1) # depends on [control=['if'], data=[]]
else:
print('-E- unknown measurement code specimen %s treatmemt %s' % (meas_line['specimen'], meas_line['treatment']))
MagRec = {}
continue
# save all treatment in a list
# we will use this later to distinguidh between ZI / IZ / and IZZI
this_specimen_treatments.append(float(meas_line['treatment']))
if LT == 'LT-T-Z':
if float(treatment[0] + 0.1) in this_specimen_treatments:
LP = LP + ':' + 'LP-PI-IZ' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if LT == 'LT-T-I':
if float(treatment[0] + 0.0) in this_specimen_treatments:
LP = LP + ':' + 'LP-PI-ZI' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#---------------------
# Lab treatment and lab protocoal for demag experiment
#---------------------
elif 'Demag' in experiment:
if meas_line['treatment_type'] == 'A':
LT = 'LT-AF-Z'
LP = 'LP-DIR-AF' # depends on [control=['if'], data=[]]
else:
LT = 'LT-T-Z'
LP = 'LP-DIR-T' # depends on [control=['if'], data=[]]
#---------------------
# Lab treatment and lab protocoal for ATRM experiment
#---------------------
elif experiment in ['ATRM', 'AARM']:
if experiment == 'ATRM':
LP = 'LP-AN-TRM'
n_pos = atrm_n_pos
if n_pos != 6:
print('the program does not support ATRM in %i position.' % n_pos)
continue # depends on [control=['if'], data=['n_pos']] # depends on [control=['if'], data=[]]
if experiment == 'AARM':
#MagRec['treatment_temp']="273."
#MagRec["treatment_ac_field"]=""
LP = 'LP-AN-ARM'
n_pos = aarm_n_pos
if n_pos != 6:
print('the program does not support AARM in %i position.' % n_pos)
continue # depends on [control=['if'], data=['n_pos']] # depends on [control=['if'], data=[]]
if treatment[1] == 0:
if experiment == 'ATRM':
LT = 'LT-T-Z'
MagRec['treatment_temp'] = '%.2f' % (treatment[0] + 273.0)
MagRec['treatment_ac_field'] = '' # depends on [control=['if'], data=[]]
else:
LT = 'LT-AF-Z'
MagRec['treatment_temp'] = '273.'
MagRec['treatment_ac_field'] = '%.3e' % (treatment[0] * 0.001)
MagRec['treatment_dc_field'] = '0'
MagRec['treatment_dc_field_phi'] = '0'
MagRec['treatment_dc_field_theta'] = '0' # depends on [control=['if'], data=[]]
else:
if experiment == 'ATRM':
if float(treatment[1]) == 70 or float(treatment[1]) == 7: # alteration check as final measurement
LT = 'LT-PTRM-I' # depends on [control=['if'], data=[]]
else:
LT = 'LT-T-I' # depends on [control=['if'], data=[]]
else:
LT = 'LT-AF-I'
MagRec['treatment_dc_field'] = '%8.3e' % float(labfield)
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
tdec = [0, 90, 0, 180, 270, 0, 0, 90, 0]
tinc = [0, 0, 90, 0, 0, -90, 0, 0, 90]
if treatment[1] < 10:
ipos_code = int(treatment[1]) - 1 # depends on [control=['if'], data=[]]
else:
ipos_code = int(old_div(treatment[1], 10)) - 1
# (2) using the magnetization
if meas_line['dec_s'] != '':
DEC = float(meas_line['dec_s'])
INC = float(meas_line['inc_s']) # depends on [control=['if'], data=[]]
elif meas_line['dec_g'] != '':
DEC = float(meas_line['dec_g'])
INC = float(meas_line['inc_g']) # depends on [control=['if'], data=[]]
elif meas_line['dec_t'] != '':
DEC = float(meas_line['dec_t'])
INC = float(meas_line['inc_t']) # depends on [control=['if'], data=[]]
if DEC < 0 and DEC > -359:
DEC = 360.0 + DEC # depends on [control=['if'], data=[]]
if INC < 45 and INC > -45:
if DEC > 315 or DEC < 45:
ipos_guess = 0 # depends on [control=['if'], data=[]]
if DEC > 45 and DEC < 135:
ipos_guess = 1 # depends on [control=['if'], data=[]]
if DEC > 135 and DEC < 225:
ipos_guess = 3 # depends on [control=['if'], data=[]]
if DEC > 225 and DEC < 315:
ipos_guess = 4 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if INC > 45:
ipos_guess = 2 # depends on [control=['if'], data=[]]
if INC < -45:
ipos_guess = 5 # depends on [control=['if'], data=[]]
# prefer the guess over the code
ipos = ipos_guess
# check it
if treatment[1] != 7 and treatment[1] != 70:
if ipos_guess != ipos_code:
print('-W- WARNING: check specimen %s step %s, anistropy measurements, coding does not match the direction of the lab field' % (specimen, meas_line['treatment'])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
MagRec['treatment_dc_field_phi'] = '%7.1f' % tdec[ipos]
MagRec['treatment_dc_field_theta'] = '%7.1f' % tinc[ipos] # depends on [control=['if'], data=['experiment']]
#---------------------
# Lab treatment and lab protocoal for cooling rate experiment
#---------------------
elif experiment == 'CR':
cooling_times_list
LP = 'LP-CR-TRM'
MagRec['treatment_temp'] = '%8.3e' % (float(treatment[0]) + 273.0) # temp in kelvin
if treatment[1] == 0:
LT = 'LT-T-Z'
MagRec['treatment_dc_field'] = '0'
MagRec['treatment_dc_field_phi'] = '0'
MagRec['treatment_dc_field_theta'] = '0' # depends on [control=['if'], data=[]]
else:
if treatment[1] == 7: # alteration check as final measurement
LT = 'LT-PTRM-I' # depends on [control=['if'], data=[]]
else:
LT = 'LT-T-I'
MagRec['treatment_dc_field'] = '%8.3e' % labfield
MagRec['treatment_dc_field_phi'] = '%7.1f' % labfield_phi # labfield phi
MagRec['treatment_dc_field_theta'] = '%7.1f' % labfield_theta # labfield theta
indx = int(treatment[1]) - 1
# alteration check matjed as 0.7 in the measurement file
if indx == 6:
cooling_time = cooling_times_list[-1] # depends on [control=['if'], data=[]]
else:
cooling_time = cooling_times_list[indx]
MagRec['measurement_description'] = 'cooling_rate' + ':' + cooling_time + ':' + 'K/min' # depends on [control=['if'], data=[]]
#---------------------
# Lab treatment and lab protocoal for NLT experiment
#---------------------
elif 'NLT' in experiment:
print('Dont support yet NLT rate experiment file. Contact rshaar@ucsd.edu') # depends on [control=['if'], data=[]]
#---------------------
# magic_method_codes for this measurement only
# LP will be fixed after all measurement lines are read
#---------------------
MagRec['magic_method_codes'] = LT + ':' + LP
#---------------------
# Demag experiments only:
# search if orientation data exists in er_samples.txt
# if not: create one and save
#---------------------
# see if core azimuth and tilt-corrected data are in er_samples.txt
sample = MagRec['er_sample_name']
(found_sample_azimuth, found_sample_dip, found_sample_bed_dip_direction, found_sample_bed_dip) = (False, False, False, False)
if sample in list(er_sample_data.keys()):
if 'sample_azimuth' in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_azimuth'] != '':
sample_azimuth = float(er_sample_data[sample]['sample_azimuth'])
found_sample_azimuth = True # depends on [control=['if'], data=[]]
if 'sample_dip' in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_dip'] != '':
sample_dip = float(er_sample_data[sample]['sample_dip'])
found_sample_dip = True # depends on [control=['if'], data=[]]
if 'sample_bed_dip_direction' in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_bed_dip_direction'] != '':
sample_bed_dip_direction = float(er_sample_data[sample]['sample_bed_dip_direction'])
found_sample_bed_dip_direction = True # depends on [control=['if'], data=[]]
if 'sample_bed_dip' in list(er_sample_data[sample].keys()) and er_sample_data[sample]['sample_bed_dip'] != '':
sample_bed_dip = float(er_sample_data[sample]['sample_bed_dip'])
found_sample_bed_dip = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['sample']]
else:
er_sample_data[sample] = {}
#--------------------
# deal with specimen orientation and different coordinate system
#--------------------
(found_s, found_geo, found_tilt) = (False, False, False)
if 'dec_s' in list(meas_line.keys()) and 'inc_s' in list(meas_line.keys()):
if meas_line['dec_s'] != '' and meas_line['inc_s'] != '':
found_s = True # depends on [control=['if'], data=[]]
MagRec['measurement_dec'] = meas_line['dec_s']
MagRec['measurement_inc'] = meas_line['inc_s'] # depends on [control=['if'], data=[]]
if 'dec_g' in list(meas_line.keys()) and 'inc_g' in list(meas_line.keys()):
if meas_line['dec_g'] != '' and meas_line['inc_g'] != '':
found_geo = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'dec_t' in list(meas_line.keys()) and 'inc_t' in list(meas_line.keys()):
if meas_line['dec_t'] != '' and meas_line['inc_t'] != '':
found_tilt = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#-----------------------------
# specimen coordinates: no
# geographic coordinates: yes
#-----------------------------
if found_geo and (not found_s):
MagRec['measurement_dec'] = meas_line['dec_g']
MagRec['measurement_inc'] = meas_line['inc_g']
# core azimuth/plunge is not in er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
er_sample_data[sample]['sample_azimuth'] = '0'
er_sample_data[sample]['sample_dip'] = '0' # depends on [control=['if'], data=[]]
else:
# core azimuth/plunge is in er_samples.txt
sample_azimuth = float(er_sample_data[sample]['sample_azimuth'])
sample_dip = float(er_sample_data[sample]['sample_dip'])
if sample_azimuth != 0 and sample_dip != 0:
print('-W- WARNING: delete core azimuth/plunge in er_samples.txt\n becasue dec_s and inc_s are unavaialable') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#-----------------------------
# specimen coordinates: no
# geographic coordinates: no
#-----------------------------
if not found_geo and (not found_s):
print('-E- ERROR: sample %s does not have dec_s/inc_s or dec_g/inc_g. Ignore specimen %s ' % (sample, specimen))
break # depends on [control=['if'], data=[]]
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: yes
#
# commant: Ron, this need to be tested !!
#-----------------------------
if found_geo and found_s:
(cdec, cinc) = (float(meas_line['dec_s']), float(meas_line['inc_s']))
(gdec, ginc) = (float(meas_line['dec_g']), float(meas_line['inc_g']))
(az, pl) = pmag.get_azpl(cdec, cinc, gdec, ginc)
# core azimuth/plunge is not in er_samples.txt:
# calculate core az/pl and add it to er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
er_sample_data[sample]['sample_azimuth'] = '%.1f' % az
er_sample_data[sample]['sample_dip'] = '%.1f' % pl # depends on [control=['if'], data=[]]
else:
# core azimuth/plunge is in er_samples.txt
if float(er_sample_data[sample]['sample_azimuth']) != az:
print('-E- ERROR in sample_azimuth sample %s. Check it! using the value in er_samples.txt' % sample) # depends on [control=['if'], data=[]]
if float(er_sample_data[sample]['sample_dip']) != pl:
print('-E- ERROR in sample_dip sample %s. Check it! using the value in er_samples.txt' % sample) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: no
#-----------------------------
if not found_geo and found_s:
if found_sample_dip and found_sample_azimuth:
pass # depends on [control=['if'], data=[]]
# (nothing to do)
elif 'Demag' in experiment:
print('-W- WARNING: missing sample_dip or sample_azimuth for sample %s' % sample) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#-----------------------------
# tilt-corrected coordinates: yes
# geographic coordinates: no
#-----------------------------
if found_tilt and (not found_geo):
print('-E- ERROR: missing geographic data for sample %s. Ignoring tilt-corrected data ' % sample) # depends on [control=['if'], data=[]]
if found_tilt and found_geo:
(dec_geo, inc_geo) = (float(meas_line['dec_g']), float(meas_line['inc_g']))
(dec_tilt, inc_tilt) = (float(meas_line['dec_t']), float(meas_line['inc_t']))
if dec_geo == dec_tilt and inc_geo == inc_tilt:
(DipDir, Dip) = (0.0, 0.0) # depends on [control=['if'], data=[]]
else:
(DipDir, Dip) = pmag.get_tilt(dec_geo, inc_geo, dec_tilt, inc_tilt)
if not found_sample_bed_dip_direction or not found_sample_bed_dip:
print('-I- calculating dip and dip direction used for tilt correction sample %s. results are put in er_samples.txt' % sample)
er_sample_data[sample]['sample_bed_dip_direction'] = '%.1f' % DipDir
er_sample_data[sample]['sample_bed_dip'] = '%.1f' % Dip # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#-----------------------------
# er_samples method codes
# geographic coordinates: no
#-----------------------------
if found_tilt or found_geo:
er_sample_data[sample]['magic_method_codes'] = 'SO-NO' # depends on [control=['if'], data=[]]
#-----------------
# er_samples_data
#-----------------
if sample in list(er_sample_data.keys()):
er_sample_data[sample]['er_sample_name'] = sample
er_sample_data[sample]['er_site_name'] = MagRec['er_site_name']
er_sample_data[sample]['er_location_name'] = MagRec['er_location_name'] # depends on [control=['if'], data=['sample']]
#MagRec["magic_method_codes"]=LT
MagRecs_this_specimen.append(MagRec)
#if LP!="" and LP not in LP_this_specimen:
# LP_this_specimen.append(LP)
measurement_running_number += 1 # depends on [control=['for'], data=['meas_line']]
#-------
#-------
# after reading all the measurements lines for this specimen
# 1) add magic_experiment_name
# 2) fix magic_method_codes with the correct lab protocol
#-------
LP_this_specimen = []
for MagRec in MagRecs_this_specimen:
magic_method_codes = MagRec['magic_method_codes'].split(':')
for code in magic_method_codes:
if 'LP' in code and code not in LP_this_specimen:
LP_this_specimen.append(code) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['code']] # depends on [control=['for'], data=['MagRec']]
# check IZ/ZI/IZZI
if 'LP-PI-ZI' in LP_this_specimen and 'LP-PI-IZ' in LP_this_specimen:
LP_this_specimen.remove('LP-PI-ZI')
LP_this_specimen.remove('LP-PI-IZ')
LP_this_specimen.append('LP-PI-BT-IZZI') # depends on [control=['if'], data=[]]
# add the right LP codes and fix experiment name
for MagRec in MagRecs_this_specimen:
MagRec['magic_experiment_name'] = MagRec['er_specimen_name'] + ':' + ':'.join(LP_this_specimen)
magic_method_codes = MagRec['magic_method_codes'].split(':')
LT = ''
for code in magic_method_codes:
if code[:3] == 'LT-':
LT = code
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['code']]
MagRec['magic_method_codes'] = LT + ':' + ':'.join(LP_this_specimen)
MagRecs.append(MagRec) # depends on [control=['for'], data=['MagRec']] # depends on [control=['for'], data=['specimen']]
#--
# write magic_measurements.txt
#--
MagRecs_fixed = merge_pmag_recs(MagRecs)
pmag.magic_write(meas_file, MagRecs_fixed, 'magic_measurements')
print('-I- MagIC file is saved in %s' % meas_file)
#--
# write er_samples.txt
#--
ErSamplesRecs = []
samples = list(er_sample_data.keys())
samples.sort()
for sample in samples:
ErSamplesRecs.append(er_sample_data[sample]) # depends on [control=['for'], data=['sample']]
ErSamplesRecs_fixed = merge_pmag_recs(ErSamplesRecs)
pmag.magic_write(samp_file, ErSamplesRecs_fixed, 'er_samples')
return (True, meas_file) |
def createRandomSequences(self,
numSequences,
sequenceLength):
"""
Creates a set of random sequences, each with sequenceLength elements,
and adds them to the machine.
"""
for _ in xrange(numSequences):
self.addObject(
[numpy.random.randint(0, self.numFeatures)
for _ in xrange(sequenceLength)]
) | def function[createRandomSequences, parameter[self, numSequences, sequenceLength]]:
constant[
Creates a set of random sequences, each with sequenceLength elements,
and adds them to the machine.
]
for taget[name[_]] in starred[call[name[xrange], parameter[name[numSequences]]]] begin[:]
call[name[self].addObject, parameter[<ast.ListComp object at 0x7da18f09f100>]] | keyword[def] identifier[createRandomSequences] ( identifier[self] ,
identifier[numSequences] ,
identifier[sequenceLength] ):
literal[string]
keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[numSequences] ):
identifier[self] . identifier[addObject] (
[ identifier[numpy] . identifier[random] . identifier[randint] ( literal[int] , identifier[self] . identifier[numFeatures] )
keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[sequenceLength] )]
) | def createRandomSequences(self, numSequences, sequenceLength):
"""
Creates a set of random sequences, each with sequenceLength elements,
and adds them to the machine.
"""
for _ in xrange(numSequences):
self.addObject([numpy.random.randint(0, self.numFeatures) for _ in xrange(sequenceLength)]) # depends on [control=['for'], data=['_']] |
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port,
inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num,
scheduler_cls, threads, get_object=False):
"""
Run Scheduler, only one scheduler is allowed.
"""
g = ctx.obj
Scheduler = load_cls(None, None, scheduler_cls)
kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb,
newtask_queue=g.newtask_queue, status_queue=g.status_queue,
out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data'))
if threads:
kwargs['threads'] = int(threads)
scheduler = Scheduler(**kwargs)
scheduler.INQUEUE_LIMIT = inqueue_limit
scheduler.DELETE_TIME = delete_time
scheduler.ACTIVE_TASKS = active_tasks
scheduler.LOOP_LIMIT = loop_limit
scheduler.FAIL_PAUSE_NUM = fail_pause_num
g.instances.append(scheduler)
if g.get('testing_mode') or get_object:
return scheduler
if xmlrpc:
utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
scheduler.run() | def function[scheduler, parameter[ctx, xmlrpc, xmlrpc_host, xmlrpc_port, inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num, scheduler_cls, threads, get_object]]:
constant[
Run Scheduler, only one scheduler is allowed.
]
variable[g] assign[=] name[ctx].obj
variable[Scheduler] assign[=] call[name[load_cls], parameter[constant[None], constant[None], name[scheduler_cls]]]
variable[kwargs] assign[=] call[name[dict], parameter[]]
if name[threads] begin[:]
call[name[kwargs]][constant[threads]] assign[=] call[name[int], parameter[name[threads]]]
variable[scheduler] assign[=] call[name[Scheduler], parameter[]]
name[scheduler].INQUEUE_LIMIT assign[=] name[inqueue_limit]
name[scheduler].DELETE_TIME assign[=] name[delete_time]
name[scheduler].ACTIVE_TASKS assign[=] name[active_tasks]
name[scheduler].LOOP_LIMIT assign[=] name[loop_limit]
name[scheduler].FAIL_PAUSE_NUM assign[=] name[fail_pause_num]
call[name[g].instances.append, parameter[name[scheduler]]]
if <ast.BoolOp object at 0x7da1b208f0d0> begin[:]
return[name[scheduler]]
if name[xmlrpc] begin[:]
call[name[utils].run_in_thread, parameter[name[scheduler].xmlrpc_run]]
call[name[scheduler].run, parameter[]] | keyword[def] identifier[scheduler] ( identifier[ctx] , identifier[xmlrpc] , identifier[xmlrpc_host] , identifier[xmlrpc_port] ,
identifier[inqueue_limit] , identifier[delete_time] , identifier[active_tasks] , identifier[loop_limit] , identifier[fail_pause_num] ,
identifier[scheduler_cls] , identifier[threads] , identifier[get_object] = keyword[False] ):
literal[string]
identifier[g] = identifier[ctx] . identifier[obj]
identifier[Scheduler] = identifier[load_cls] ( keyword[None] , keyword[None] , identifier[scheduler_cls] )
identifier[kwargs] = identifier[dict] ( identifier[taskdb] = identifier[g] . identifier[taskdb] , identifier[projectdb] = identifier[g] . identifier[projectdb] , identifier[resultdb] = identifier[g] . identifier[resultdb] ,
identifier[newtask_queue] = identifier[g] . identifier[newtask_queue] , identifier[status_queue] = identifier[g] . identifier[status_queue] ,
identifier[out_queue] = identifier[g] . identifier[scheduler2fetcher] , identifier[data_path] = identifier[g] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] identifier[threads] :
identifier[kwargs] [ literal[string] ]= identifier[int] ( identifier[threads] )
identifier[scheduler] = identifier[Scheduler] (** identifier[kwargs] )
identifier[scheduler] . identifier[INQUEUE_LIMIT] = identifier[inqueue_limit]
identifier[scheduler] . identifier[DELETE_TIME] = identifier[delete_time]
identifier[scheduler] . identifier[ACTIVE_TASKS] = identifier[active_tasks]
identifier[scheduler] . identifier[LOOP_LIMIT] = identifier[loop_limit]
identifier[scheduler] . identifier[FAIL_PAUSE_NUM] = identifier[fail_pause_num]
identifier[g] . identifier[instances] . identifier[append] ( identifier[scheduler] )
keyword[if] identifier[g] . identifier[get] ( literal[string] ) keyword[or] identifier[get_object] :
keyword[return] identifier[scheduler]
keyword[if] identifier[xmlrpc] :
identifier[utils] . identifier[run_in_thread] ( identifier[scheduler] . identifier[xmlrpc_run] , identifier[port] = identifier[xmlrpc_port] , identifier[bind] = identifier[xmlrpc_host] )
identifier[scheduler] . identifier[run] () | def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num, scheduler_cls, threads, get_object=False):
"""
Run Scheduler, only one scheduler is allowed.
"""
g = ctx.obj
Scheduler = load_cls(None, None, scheduler_cls)
kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb, newtask_queue=g.newtask_queue, status_queue=g.status_queue, out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data'))
if threads:
kwargs['threads'] = int(threads) # depends on [control=['if'], data=[]]
scheduler = Scheduler(**kwargs)
scheduler.INQUEUE_LIMIT = inqueue_limit
scheduler.DELETE_TIME = delete_time
scheduler.ACTIVE_TASKS = active_tasks
scheduler.LOOP_LIMIT = loop_limit
scheduler.FAIL_PAUSE_NUM = fail_pause_num
g.instances.append(scheduler)
if g.get('testing_mode') or get_object:
return scheduler # depends on [control=['if'], data=[]]
if xmlrpc:
utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host) # depends on [control=['if'], data=[]]
scheduler.run() |
def get_value(self) -> ScalarType:
"""Returns the value of a Scalar node.
Use is_scalar(type) to check which type the node has.
"""
if self.yaml_node.tag == 'tag:yaml.org,2002:str':
return self.yaml_node.value
if self.yaml_node.tag == 'tag:yaml.org,2002:int':
return int(self.yaml_node.value)
if self.yaml_node.tag == 'tag:yaml.org,2002:float':
return float(self.yaml_node.value)
if self.yaml_node.tag == 'tag:yaml.org,2002:bool':
return self.yaml_node.value in ['TRUE', 'True', 'true']
if self.yaml_node.tag == 'tag:yaml.org,2002:null':
return None
raise RuntimeError('This node with tag {} is not of the right type'
' for get_value()'.format(self.yaml_node.tag)) | def function[get_value, parameter[self]]:
constant[Returns the value of a Scalar node.
Use is_scalar(type) to check which type the node has.
]
if compare[name[self].yaml_node.tag equal[==] constant[tag:yaml.org,2002:str]] begin[:]
return[name[self].yaml_node.value]
if compare[name[self].yaml_node.tag equal[==] constant[tag:yaml.org,2002:int]] begin[:]
return[call[name[int], parameter[name[self].yaml_node.value]]]
if compare[name[self].yaml_node.tag equal[==] constant[tag:yaml.org,2002:float]] begin[:]
return[call[name[float], parameter[name[self].yaml_node.value]]]
if compare[name[self].yaml_node.tag equal[==] constant[tag:yaml.org,2002:bool]] begin[:]
return[compare[name[self].yaml_node.value in list[[<ast.Constant object at 0x7da18eb553f0>, <ast.Constant object at 0x7da18eb57c10>, <ast.Constant object at 0x7da18eb56710>]]]]
if compare[name[self].yaml_node.tag equal[==] constant[tag:yaml.org,2002:null]] begin[:]
return[constant[None]]
<ast.Raise object at 0x7da18eb54c10> | keyword[def] identifier[get_value] ( identifier[self] )-> identifier[ScalarType] :
literal[string]
keyword[if] identifier[self] . identifier[yaml_node] . identifier[tag] == literal[string] :
keyword[return] identifier[self] . identifier[yaml_node] . identifier[value]
keyword[if] identifier[self] . identifier[yaml_node] . identifier[tag] == literal[string] :
keyword[return] identifier[int] ( identifier[self] . identifier[yaml_node] . identifier[value] )
keyword[if] identifier[self] . identifier[yaml_node] . identifier[tag] == literal[string] :
keyword[return] identifier[float] ( identifier[self] . identifier[yaml_node] . identifier[value] )
keyword[if] identifier[self] . identifier[yaml_node] . identifier[tag] == literal[string] :
keyword[return] identifier[self] . identifier[yaml_node] . identifier[value] keyword[in] [ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[self] . identifier[yaml_node] . identifier[tag] == literal[string] :
keyword[return] keyword[None]
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[yaml_node] . identifier[tag] )) | def get_value(self) -> ScalarType:
"""Returns the value of a Scalar node.
Use is_scalar(type) to check which type the node has.
"""
if self.yaml_node.tag == 'tag:yaml.org,2002:str':
return self.yaml_node.value # depends on [control=['if'], data=[]]
if self.yaml_node.tag == 'tag:yaml.org,2002:int':
return int(self.yaml_node.value) # depends on [control=['if'], data=[]]
if self.yaml_node.tag == 'tag:yaml.org,2002:float':
return float(self.yaml_node.value) # depends on [control=['if'], data=[]]
if self.yaml_node.tag == 'tag:yaml.org,2002:bool':
return self.yaml_node.value in ['TRUE', 'True', 'true'] # depends on [control=['if'], data=[]]
if self.yaml_node.tag == 'tag:yaml.org,2002:null':
return None # depends on [control=['if'], data=[]]
raise RuntimeError('This node with tag {} is not of the right type for get_value()'.format(self.yaml_node.tag)) |
def is_multi_target(target):
"""
Determine if pipeline manager's run target is multiple.
:param None or str or Sequence of str target: 0, 1, or multiple targets
:return bool: Whether there are multiple targets
:raise TypeError: if the argument is neither None nor string nor Sequence
"""
if target is None or isinstance(target, str):
return False
elif isinstance(target, Sequence):
return len(target) > 1
else:
raise TypeError("Could not interpret argument as a target: {} ({})".
format(target, type(target))) | def function[is_multi_target, parameter[target]]:
constant[
Determine if pipeline manager's run target is multiple.
:param None or str or Sequence of str target: 0, 1, or multiple targets
:return bool: Whether there are multiple targets
:raise TypeError: if the argument is neither None nor string nor Sequence
]
if <ast.BoolOp object at 0x7da1b03db610> begin[:]
return[constant[False]] | keyword[def] identifier[is_multi_target] ( identifier[target] ):
literal[string]
keyword[if] identifier[target] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[target] , identifier[str] ):
keyword[return] keyword[False]
keyword[elif] identifier[isinstance] ( identifier[target] , identifier[Sequence] ):
keyword[return] identifier[len] ( identifier[target] )> literal[int]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] .
identifier[format] ( identifier[target] , identifier[type] ( identifier[target] ))) | def is_multi_target(target):
"""
Determine if pipeline manager's run target is multiple.
:param None or str or Sequence of str target: 0, 1, or multiple targets
:return bool: Whether there are multiple targets
:raise TypeError: if the argument is neither None nor string nor Sequence
"""
if target is None or isinstance(target, str):
return False # depends on [control=['if'], data=[]]
elif isinstance(target, Sequence):
return len(target) > 1 # depends on [control=['if'], data=[]]
else:
raise TypeError('Could not interpret argument as a target: {} ({})'.format(target, type(target))) |
def create(self, name=None, prefix=None, pkgs=None, channels=None):
"""Create an environment with a specified set of packages."""
logger.debug(str((prefix, pkgs, channels)))
# TODO: Fix temporal hack
if (not pkgs or (not isinstance(pkgs, (list, tuple)) and
not is_text_string(pkgs))):
raise TypeError('must specify a list of one or more packages to '
'install into new environment')
cmd_list = ['create', '--yes', '--json', '--mkdir']
if name:
ref = name
search = [os.path.join(d, name) for d in
self.info().communicate()[0]['envs_dirs']]
cmd_list.extend(['--name', name])
elif prefix:
ref = prefix
search = [prefix]
cmd_list.extend(['--prefix', prefix])
else:
raise TypeError('must specify either an environment name or a '
'path for new environment')
if any(os.path.exists(prefix) for prefix in search):
raise CondaEnvExistsError('Conda environment {0} already '
'exists'.format(ref))
# TODO: Fix temporal hack
if isinstance(pkgs, (list, tuple)):
cmd_list.extend(pkgs)
elif is_text_string(pkgs):
cmd_list.extend(['--file', pkgs])
# TODO: Check if correct
if channels:
cmd_list.extend(['--override-channels'])
for channel in channels:
cmd_list.extend(['--channel'])
cmd_list.extend([channel])
return self._call_and_parse(cmd_list) | def function[create, parameter[self, name, prefix, pkgs, channels]]:
constant[Create an environment with a specified set of packages.]
call[name[logger].debug, parameter[call[name[str], parameter[tuple[[<ast.Name object at 0x7da1b27387c0>, <ast.Name object at 0x7da1b2738760>, <ast.Name object at 0x7da1b2738af0>]]]]]]
if <ast.BoolOp object at 0x7da1b2738940> begin[:]
<ast.Raise object at 0x7da1b2738580>
variable[cmd_list] assign[=] list[[<ast.Constant object at 0x7da1b27386d0>, <ast.Constant object at 0x7da1b27382b0>, <ast.Constant object at 0x7da1b27384f0>, <ast.Constant object at 0x7da1b2738310>]]
if name[name] begin[:]
variable[ref] assign[=] name[name]
variable[search] assign[=] <ast.ListComp object at 0x7da1b2738160>
call[name[cmd_list].extend, parameter[list[[<ast.Constant object at 0x7da1b27381c0>, <ast.Name object at 0x7da1b27aafe0>]]]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b27781c0>]] begin[:]
<ast.Raise object at 0x7da1b2778ac0>
if call[name[isinstance], parameter[name[pkgs], tuple[[<ast.Name object at 0x7da1b2778a00>, <ast.Name object at 0x7da1b2778490>]]]] begin[:]
call[name[cmd_list].extend, parameter[name[pkgs]]]
if name[channels] begin[:]
call[name[cmd_list].extend, parameter[list[[<ast.Constant object at 0x7da1b2778130>]]]]
for taget[name[channel]] in starred[name[channels]] begin[:]
call[name[cmd_list].extend, parameter[list[[<ast.Constant object at 0x7da1b2778370>]]]]
call[name[cmd_list].extend, parameter[list[[<ast.Name object at 0x7da18bcc9b40>]]]]
return[call[name[self]._call_and_parse, parameter[name[cmd_list]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[name] = keyword[None] , identifier[prefix] = keyword[None] , identifier[pkgs] = keyword[None] , identifier[channels] = keyword[None] ):
literal[string]
identifier[logger] . identifier[debug] ( identifier[str] (( identifier[prefix] , identifier[pkgs] , identifier[channels] )))
keyword[if] ( keyword[not] identifier[pkgs] keyword[or] ( keyword[not] identifier[isinstance] ( identifier[pkgs] ,( identifier[list] , identifier[tuple] )) keyword[and]
keyword[not] identifier[is_text_string] ( identifier[pkgs] ))):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
identifier[cmd_list] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[name] :
identifier[ref] = identifier[name]
identifier[search] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[name] ) keyword[for] identifier[d] keyword[in]
identifier[self] . identifier[info] (). identifier[communicate] ()[ literal[int] ][ literal[string] ]]
identifier[cmd_list] . identifier[extend] ([ literal[string] , identifier[name] ])
keyword[elif] identifier[prefix] :
identifier[ref] = identifier[prefix]
identifier[search] =[ identifier[prefix] ]
identifier[cmd_list] . identifier[extend] ([ literal[string] , identifier[prefix] ])
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[if] identifier[any] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[prefix] ) keyword[for] identifier[prefix] keyword[in] identifier[search] ):
keyword[raise] identifier[CondaEnvExistsError] ( literal[string]
literal[string] . identifier[format] ( identifier[ref] ))
keyword[if] identifier[isinstance] ( identifier[pkgs] ,( identifier[list] , identifier[tuple] )):
identifier[cmd_list] . identifier[extend] ( identifier[pkgs] )
keyword[elif] identifier[is_text_string] ( identifier[pkgs] ):
identifier[cmd_list] . identifier[extend] ([ literal[string] , identifier[pkgs] ])
keyword[if] identifier[channels] :
identifier[cmd_list] . identifier[extend] ([ literal[string] ])
keyword[for] identifier[channel] keyword[in] identifier[channels] :
identifier[cmd_list] . identifier[extend] ([ literal[string] ])
identifier[cmd_list] . identifier[extend] ([ identifier[channel] ])
keyword[return] identifier[self] . identifier[_call_and_parse] ( identifier[cmd_list] ) | def create(self, name=None, prefix=None, pkgs=None, channels=None):
"""Create an environment with a specified set of packages."""
logger.debug(str((prefix, pkgs, channels)))
# TODO: Fix temporal hack
if not pkgs or (not isinstance(pkgs, (list, tuple)) and (not is_text_string(pkgs))):
raise TypeError('must specify a list of one or more packages to install into new environment') # depends on [control=['if'], data=[]]
cmd_list = ['create', '--yes', '--json', '--mkdir']
if name:
ref = name
search = [os.path.join(d, name) for d in self.info().communicate()[0]['envs_dirs']]
cmd_list.extend(['--name', name]) # depends on [control=['if'], data=[]]
elif prefix:
ref = prefix
search = [prefix]
cmd_list.extend(['--prefix', prefix]) # depends on [control=['if'], data=[]]
else:
raise TypeError('must specify either an environment name or a path for new environment')
if any((os.path.exists(prefix) for prefix in search)):
raise CondaEnvExistsError('Conda environment {0} already exists'.format(ref)) # depends on [control=['if'], data=[]]
# TODO: Fix temporal hack
if isinstance(pkgs, (list, tuple)):
cmd_list.extend(pkgs) # depends on [control=['if'], data=[]]
elif is_text_string(pkgs):
cmd_list.extend(['--file', pkgs]) # depends on [control=['if'], data=[]]
# TODO: Check if correct
if channels:
cmd_list.extend(['--override-channels'])
for channel in channels:
cmd_list.extend(['--channel'])
cmd_list.extend([channel]) # depends on [control=['for'], data=['channel']] # depends on [control=['if'], data=[]]
return self._call_and_parse(cmd_list) |
def hilbert_array(xint):
"""Compute Hilbert indices.
Parameters
----------
xint: (N, d) int numpy.ndarray
Returns
-------
h: (N,) int numpy.ndarray
Hilbert indices
"""
N, d = xint.shape
h = np.zeros(N, int64)
for n in range(N):
h[n] = Hilbert_to_int(xint[n, :])
return h | def function[hilbert_array, parameter[xint]]:
constant[Compute Hilbert indices.
Parameters
----------
xint: (N, d) int numpy.ndarray
Returns
-------
h: (N,) int numpy.ndarray
Hilbert indices
]
<ast.Tuple object at 0x7da18bcc8670> assign[=] name[xint].shape
variable[h] assign[=] call[name[np].zeros, parameter[name[N], name[int64]]]
for taget[name[n]] in starred[call[name[range], parameter[name[N]]]] begin[:]
call[name[h]][name[n]] assign[=] call[name[Hilbert_to_int], parameter[call[name[xint]][tuple[[<ast.Name object at 0x7da18f812620>, <ast.Slice object at 0x7da18f811960>]]]]]
return[name[h]] | keyword[def] identifier[hilbert_array] ( identifier[xint] ):
literal[string]
identifier[N] , identifier[d] = identifier[xint] . identifier[shape]
identifier[h] = identifier[np] . identifier[zeros] ( identifier[N] , identifier[int64] )
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[N] ):
identifier[h] [ identifier[n] ]= identifier[Hilbert_to_int] ( identifier[xint] [ identifier[n] ,:])
keyword[return] identifier[h] | def hilbert_array(xint):
"""Compute Hilbert indices.
Parameters
----------
xint: (N, d) int numpy.ndarray
Returns
-------
h: (N,) int numpy.ndarray
Hilbert indices
"""
(N, d) = xint.shape
h = np.zeros(N, int64)
for n in range(N):
h[n] = Hilbert_to_int(xint[n, :]) # depends on [control=['for'], data=['n']]
return h |
def is_holiday(self, day, extra_holidays=None):
"""Return True if it's an holiday.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
"""
day = cleaned_date(day)
if extra_holidays:
extra_holidays = tuple(map(cleaned_date, extra_holidays))
if extra_holidays and day in extra_holidays:
return True
return day in self.holidays_set(day.year) | def function[is_holiday, parameter[self, day, extra_holidays]]:
constant[Return True if it's an holiday.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
]
variable[day] assign[=] call[name[cleaned_date], parameter[name[day]]]
if name[extra_holidays] begin[:]
variable[extra_holidays] assign[=] call[name[tuple], parameter[call[name[map], parameter[name[cleaned_date], name[extra_holidays]]]]]
if <ast.BoolOp object at 0x7da20c992d10> begin[:]
return[constant[True]]
return[compare[name[day] in call[name[self].holidays_set, parameter[name[day].year]]]] | keyword[def] identifier[is_holiday] ( identifier[self] , identifier[day] , identifier[extra_holidays] = keyword[None] ):
literal[string]
identifier[day] = identifier[cleaned_date] ( identifier[day] )
keyword[if] identifier[extra_holidays] :
identifier[extra_holidays] = identifier[tuple] ( identifier[map] ( identifier[cleaned_date] , identifier[extra_holidays] ))
keyword[if] identifier[extra_holidays] keyword[and] identifier[day] keyword[in] identifier[extra_holidays] :
keyword[return] keyword[True]
keyword[return] identifier[day] keyword[in] identifier[self] . identifier[holidays_set] ( identifier[day] . identifier[year] ) | def is_holiday(self, day, extra_holidays=None):
"""Return True if it's an holiday.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
"""
day = cleaned_date(day)
if extra_holidays:
extra_holidays = tuple(map(cleaned_date, extra_holidays)) # depends on [control=['if'], data=[]]
if extra_holidays and day in extra_holidays:
return True # depends on [control=['if'], data=[]]
return day in self.holidays_set(day.year) |
def cfg(self):
"""Load the application configuration.
This method loads configuration from python module.
"""
config = LStruct(self.defaults)
module = config['CONFIG'] = os.environ.get(
CONFIGURATION_ENVIRON_VARIABLE, config['CONFIG'])
if module:
try:
module = import_module(module)
config.update({
name: getattr(module, name) for name in dir(module)
if name == name.upper() and not name.startswith('_')
})
except ImportError as exc:
config.CONFIG = None
self.logger.error("Error importing %s: %s", module, exc)
# Patch configuration from ENV
for name in config:
if name.startswith('_') or name != name.upper() or name not in os.environ:
continue
try:
config[name] = json.loads(os.environ[name])
except ValueError:
pass
return config | def function[cfg, parameter[self]]:
constant[Load the application configuration.
This method loads configuration from python module.
]
variable[config] assign[=] call[name[LStruct], parameter[name[self].defaults]]
variable[module] assign[=] call[name[os].environ.get, parameter[name[CONFIGURATION_ENVIRON_VARIABLE], call[name[config]][constant[CONFIG]]]]
if name[module] begin[:]
<ast.Try object at 0x7da1b23479a0>
for taget[name[name]] in starred[name[config]] begin[:]
if <ast.BoolOp object at 0x7da1b2346fb0> begin[:]
continue
<ast.Try object at 0x7da1b2344850>
return[name[config]] | keyword[def] identifier[cfg] ( identifier[self] ):
literal[string]
identifier[config] = identifier[LStruct] ( identifier[self] . identifier[defaults] )
identifier[module] = identifier[config] [ literal[string] ]= identifier[os] . identifier[environ] . identifier[get] (
identifier[CONFIGURATION_ENVIRON_VARIABLE] , identifier[config] [ literal[string] ])
keyword[if] identifier[module] :
keyword[try] :
identifier[module] = identifier[import_module] ( identifier[module] )
identifier[config] . identifier[update] ({
identifier[name] : identifier[getattr] ( identifier[module] , identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[dir] ( identifier[module] )
keyword[if] identifier[name] == identifier[name] . identifier[upper] () keyword[and] keyword[not] identifier[name] . identifier[startswith] ( literal[string] )
})
keyword[except] identifier[ImportError] keyword[as] identifier[exc] :
identifier[config] . identifier[CONFIG] = keyword[None]
identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[module] , identifier[exc] )
keyword[for] identifier[name] keyword[in] identifier[config] :
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ) keyword[or] identifier[name] != identifier[name] . identifier[upper] () keyword[or] identifier[name] keyword[not] keyword[in] identifier[os] . identifier[environ] :
keyword[continue]
keyword[try] :
identifier[config] [ identifier[name] ]= identifier[json] . identifier[loads] ( identifier[os] . identifier[environ] [ identifier[name] ])
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[config] | def cfg(self):
"""Load the application configuration.
This method loads configuration from python module.
"""
config = LStruct(self.defaults)
module = config['CONFIG'] = os.environ.get(CONFIGURATION_ENVIRON_VARIABLE, config['CONFIG'])
if module:
try:
module = import_module(module)
config.update({name: getattr(module, name) for name in dir(module) if name == name.upper() and (not name.startswith('_'))}) # depends on [control=['try'], data=[]]
except ImportError as exc:
config.CONFIG = None
self.logger.error('Error importing %s: %s', module, exc) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
# Patch configuration from ENV
for name in config:
if name.startswith('_') or name != name.upper() or name not in os.environ:
continue # depends on [control=['if'], data=[]]
try:
config[name] = json.loads(os.environ[name]) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['name']]
return config |
def acl_present(name, id=None, token=None, type="client", rules="", consul_url='http://localhost:8500'):
'''
Ensure the ACL is present
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
type: client
Specifies the type of ACL token. Valid values are: client and management.
rules
Specifies rules for this ACL token.
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#create-acl-token, https://www.consul.io/api/acl.html#update-acl-token
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'ACL "{0}" exists and is up to date'.format(name)}
exists = _acl_exists(name, id, token, consul_url)
if not exists['result']:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "the acl doesn't exist, it will be created"
return ret
create = __salt__['consul.acl_create'](name=name, id=id, token=token, type=type, rules=rules, consul_url=consul_url)
if create['res']:
ret['result'] = True
ret['comment'] = "the acl has been created"
elif not create['res']:
ret['result'] = False
ret['comment'] = "failed to create the acl"
elif exists['result']:
changes = _acl_changes(name=name, id=exists['id'], token=token, type=type, rules=rules, consul_url=consul_url)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "the acl exists and will be updated"
return ret
update = __salt__['consul.acl_update'](name=name, id=exists['id'], token=token, type=type, rules=rules, consul_url=consul_url)
if update['res']:
ret['result'] = True
ret['comment'] = "the acl has been updated"
elif not update['res']:
ret['result'] = False
ret['comment'] = "failed to update the acl"
return ret | def function[acl_present, parameter[name, id, token, type, rules, consul_url]]:
constant[
Ensure the ACL is present
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
type: client
Specifies the type of ACL token. Valid values are: client and management.
rules
Specifies rules for this ACL token.
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#create-acl-token, https://www.consul.io/api/acl.html#update-acl-token
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21a1a80>, <ast.Constant object at 0x7da1b21a01f0>, <ast.Constant object at 0x7da1b21a3940>, <ast.Constant object at 0x7da1b21a30d0>], [<ast.Name object at 0x7da1b21a12a0>, <ast.Dict object at 0x7da1b21a3190>, <ast.Constant object at 0x7da1b21a2320>, <ast.Call object at 0x7da1b21a02b0>]]
variable[exists] assign[=] call[name[_acl_exists], parameter[name[name], name[id], name[token], name[consul_url]]]
if <ast.UnaryOp object at 0x7da1b21a1e40> begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] constant[the acl doesn't exist, it will be created]
return[name[ret]]
variable[create] assign[=] call[call[name[__salt__]][constant[consul.acl_create]], parameter[]]
if call[name[create]][constant[res]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] constant[the acl has been created]
return[name[ret]] | keyword[def] identifier[acl_present] ( identifier[name] , identifier[id] = keyword[None] , identifier[token] = keyword[None] , identifier[type] = literal[string] , identifier[rules] = literal[string] , identifier[consul_url] = literal[string] ):
literal[string]
identifier[ret] ={
literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] . identifier[format] ( identifier[name] )}
identifier[exists] = identifier[_acl_exists] ( identifier[name] , identifier[id] , identifier[token] , identifier[consul_url] )
keyword[if] keyword[not] identifier[exists] [ literal[string] ]:
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[create] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[id] = identifier[id] , identifier[token] = identifier[token] , identifier[type] = identifier[type] , identifier[rules] = identifier[rules] , identifier[consul_url] = identifier[consul_url] )
keyword[if] identifier[create] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
keyword[elif] keyword[not] identifier[create] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[elif] identifier[exists] [ literal[string] ]:
identifier[changes] = identifier[_acl_changes] ( identifier[name] = identifier[name] , identifier[id] = identifier[exists] [ literal[string] ], identifier[token] = identifier[token] , identifier[type] = identifier[type] , identifier[rules] = identifier[rules] , identifier[consul_url] = identifier[consul_url] )
keyword[if] identifier[changes] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[update] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[id] = identifier[exists] [ literal[string] ], identifier[token] = identifier[token] , identifier[type] = identifier[type] , identifier[rules] = identifier[rules] , identifier[consul_url] = identifier[consul_url] )
keyword[if] identifier[update] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
keyword[elif] keyword[not] identifier[update] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def acl_present(name, id=None, token=None, type='client', rules='', consul_url='http://localhost:8500'):
"""
Ensure the ACL is present
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
type: client
Specifies the type of ACL token. Valid values are: client and management.
rules
Specifies rules for this ACL token.
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#create-acl-token, https://www.consul.io/api/acl.html#update-acl-token
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'ACL "{0}" exists and is up to date'.format(name)}
exists = _acl_exists(name, id, token, consul_url)
if not exists['result']:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "the acl doesn't exist, it will be created"
return ret # depends on [control=['if'], data=[]]
create = __salt__['consul.acl_create'](name=name, id=id, token=token, type=type, rules=rules, consul_url=consul_url)
if create['res']:
ret['result'] = True
ret['comment'] = 'the acl has been created' # depends on [control=['if'], data=[]]
elif not create['res']:
ret['result'] = False
ret['comment'] = 'failed to create the acl' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif exists['result']:
changes = _acl_changes(name=name, id=exists['id'], token=token, type=type, rules=rules, consul_url=consul_url)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'the acl exists and will be updated'
return ret # depends on [control=['if'], data=[]]
update = __salt__['consul.acl_update'](name=name, id=exists['id'], token=token, type=type, rules=rules, consul_url=consul_url)
if update['res']:
ret['result'] = True
ret['comment'] = 'the acl has been updated' # depends on [control=['if'], data=[]]
elif not update['res']:
ret['result'] = False
ret['comment'] = 'failed to update the acl' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return ret |
def to_element(change):
"""
@param change: An L{txaws.route53.interface.IRRSetChange} provider.
@return: The L{twisted.web.template} element which describes this
change.
"""
return tags.Change(
tags.Action(
change.action,
),
tags.ResourceRecordSet(
tags.Name(
unicode(change.rrset.label),
),
tags.Type(
change.rrset.type,
),
tags.TTL(
u"{}".format(change.rrset.ttl),
),
tags.ResourceRecords(list(
tags.ResourceRecord(tags.Value(rr.to_text()))
for rr
in sorted(change.rrset.records)
))
),
) | def function[to_element, parameter[change]]:
constant[
@param change: An L{txaws.route53.interface.IRRSetChange} provider.
@return: The L{twisted.web.template} element which describes this
change.
]
return[call[name[tags].Change, parameter[call[name[tags].Action, parameter[name[change].action]], call[name[tags].ResourceRecordSet, parameter[call[name[tags].Name, parameter[call[name[unicode], parameter[name[change].rrset.label]]]], call[name[tags].Type, parameter[name[change].rrset.type]], call[name[tags].TTL, parameter[call[constant[{}].format, parameter[name[change].rrset.ttl]]]], call[name[tags].ResourceRecords, parameter[call[name[list], parameter[<ast.GeneratorExp object at 0x7da2041dae90>]]]]]]]]] | keyword[def] identifier[to_element] ( identifier[change] ):
literal[string]
keyword[return] identifier[tags] . identifier[Change] (
identifier[tags] . identifier[Action] (
identifier[change] . identifier[action] ,
),
identifier[tags] . identifier[ResourceRecordSet] (
identifier[tags] . identifier[Name] (
identifier[unicode] ( identifier[change] . identifier[rrset] . identifier[label] ),
),
identifier[tags] . identifier[Type] (
identifier[change] . identifier[rrset] . identifier[type] ,
),
identifier[tags] . identifier[TTL] (
literal[string] . identifier[format] ( identifier[change] . identifier[rrset] . identifier[ttl] ),
),
identifier[tags] . identifier[ResourceRecords] ( identifier[list] (
identifier[tags] . identifier[ResourceRecord] ( identifier[tags] . identifier[Value] ( identifier[rr] . identifier[to_text] ()))
keyword[for] identifier[rr]
keyword[in] identifier[sorted] ( identifier[change] . identifier[rrset] . identifier[records] )
))
),
) | def to_element(change):
"""
@param change: An L{txaws.route53.interface.IRRSetChange} provider.
@return: The L{twisted.web.template} element which describes this
change.
"""
return tags.Change(tags.Action(change.action), tags.ResourceRecordSet(tags.Name(unicode(change.rrset.label)), tags.Type(change.rrset.type), tags.TTL(u'{}'.format(change.rrset.ttl)), tags.ResourceRecords(list((tags.ResourceRecord(tags.Value(rr.to_text())) for rr in sorted(change.rrset.records)))))) |
async def _do_tp(self, pip, mount) -> top_types.Point:
""" Execute the work of tip probe.
This is a separate function so that it can be encapsulated in
a context manager that ensures the state of the pipette tip tracking
is reset properly. It should not be called outside of
:py:meth:`locate_tip_probe_center`.
:param pip: The pipette to use
:type pip: opentrons.hardware_control.pipette.Pipette
:param mount: The mount on which the pipette is attached
:type mount: opentrons.types.Mount
"""
# Clear the old offset during calibration
pip.update_instrument_offset(top_types.Point())
# Hotspots based on our expectation of tip length and config
hotspots = robot_configs.calculate_tip_probe_hotspots(
pip.current_tip_length, self._config.tip_probe)
new_pos: Dict[Axis, List[float]] = {
ax: [] for ax in Axis.gantry_axes() if ax != Axis.A}
safe_z = self._config.tip_probe.z_clearance.crossover + \
self._config.tip_probe.center[2]
for hs in hotspots:
ax_en = Axis[hs.axis.upper()]
overridden_center = {
ax: sum(vals)/len(vals)
if len(vals) == 2
else self._config.tip_probe.center[ax.value]
for ax, vals in new_pos.items()
}
x0 = overridden_center[Axis.X] + hs.x_start_offs
y0 = overridden_center[Axis.Y] + hs.y_start_offs
z0 = hs.z_start_abs
pos = await self.current_position(mount)
# Move safely to the setup point for the probe
await self.move_to(mount,
top_types.Point(pos[Axis.X],
pos[Axis.Y],
safe_z))
await self.move_to(mount,
top_types.Point(x0, y0, safe_z))
await self.move_to(mount,
top_types.Point(x0, y0, z0))
if ax_en == Axis.Z:
to_probe = Axis.by_mount(mount)
else:
to_probe = ax_en
# Probe and retrieve the position afterwards
async with self._motion_lock:
self._current_position = self._deck_from_smoothie(
self._backend.probe(
to_probe.name.lower(), hs.probe_distance))
xyz = await self.gantry_position(mount)
# Store the upated position.
self._log.debug(
"tip probe: hs {}: start: ({} {} {}) status {} will add {}"
.format(hs, x0, y0, z0, new_pos, xyz[ax_en.value]))
new_pos[ax_en].append(xyz[ax_en.value])
# Before moving up, move back to clear the switches
bounce = self._config.tip_probe.bounce_distance\
* (-1.0 if hs.probe_distance > 0 else 1.0)
await self.move_rel(mount,
top_types.Point(
**{hs.axis: bounce}))
await self.move_to(mount, xyz._replace(z=safe_z))
to_ret = top_types.Point(**{ax.name.lower(): sum(vals)/len(vals)
for ax, vals in new_pos.items()})
self._log.info("Tip probe complete with {} {} on {}. "
"New position: {} (default {}), averaged from {}"
.format(pip.name, pip.pipette_id, mount.name,
to_ret, self._config.tip_probe.center,
new_pos))
return to_ret | <ast.AsyncFunctionDef object at 0x7da18eb577c0> | keyword[async] keyword[def] identifier[_do_tp] ( identifier[self] , identifier[pip] , identifier[mount] )-> identifier[top_types] . identifier[Point] :
literal[string]
identifier[pip] . identifier[update_instrument_offset] ( identifier[top_types] . identifier[Point] ())
identifier[hotspots] = identifier[robot_configs] . identifier[calculate_tip_probe_hotspots] (
identifier[pip] . identifier[current_tip_length] , identifier[self] . identifier[_config] . identifier[tip_probe] )
identifier[new_pos] : identifier[Dict] [ identifier[Axis] , identifier[List] [ identifier[float] ]]={
identifier[ax] :[] keyword[for] identifier[ax] keyword[in] identifier[Axis] . identifier[gantry_axes] () keyword[if] identifier[ax] != identifier[Axis] . identifier[A] }
identifier[safe_z] = identifier[self] . identifier[_config] . identifier[tip_probe] . identifier[z_clearance] . identifier[crossover] + identifier[self] . identifier[_config] . identifier[tip_probe] . identifier[center] [ literal[int] ]
keyword[for] identifier[hs] keyword[in] identifier[hotspots] :
identifier[ax_en] = identifier[Axis] [ identifier[hs] . identifier[axis] . identifier[upper] ()]
identifier[overridden_center] ={
identifier[ax] : identifier[sum] ( identifier[vals] )/ identifier[len] ( identifier[vals] )
keyword[if] identifier[len] ( identifier[vals] )== literal[int]
keyword[else] identifier[self] . identifier[_config] . identifier[tip_probe] . identifier[center] [ identifier[ax] . identifier[value] ]
keyword[for] identifier[ax] , identifier[vals] keyword[in] identifier[new_pos] . identifier[items] ()
}
identifier[x0] = identifier[overridden_center] [ identifier[Axis] . identifier[X] ]+ identifier[hs] . identifier[x_start_offs]
identifier[y0] = identifier[overridden_center] [ identifier[Axis] . identifier[Y] ]+ identifier[hs] . identifier[y_start_offs]
identifier[z0] = identifier[hs] . identifier[z_start_abs]
identifier[pos] = keyword[await] identifier[self] . identifier[current_position] ( identifier[mount] )
keyword[await] identifier[self] . identifier[move_to] ( identifier[mount] ,
identifier[top_types] . identifier[Point] ( identifier[pos] [ identifier[Axis] . identifier[X] ],
identifier[pos] [ identifier[Axis] . identifier[Y] ],
identifier[safe_z] ))
keyword[await] identifier[self] . identifier[move_to] ( identifier[mount] ,
identifier[top_types] . identifier[Point] ( identifier[x0] , identifier[y0] , identifier[safe_z] ))
keyword[await] identifier[self] . identifier[move_to] ( identifier[mount] ,
identifier[top_types] . identifier[Point] ( identifier[x0] , identifier[y0] , identifier[z0] ))
keyword[if] identifier[ax_en] == identifier[Axis] . identifier[Z] :
identifier[to_probe] = identifier[Axis] . identifier[by_mount] ( identifier[mount] )
keyword[else] :
identifier[to_probe] = identifier[ax_en]
keyword[async] keyword[with] identifier[self] . identifier[_motion_lock] :
identifier[self] . identifier[_current_position] = identifier[self] . identifier[_deck_from_smoothie] (
identifier[self] . identifier[_backend] . identifier[probe] (
identifier[to_probe] . identifier[name] . identifier[lower] (), identifier[hs] . identifier[probe_distance] ))
identifier[xyz] = keyword[await] identifier[self] . identifier[gantry_position] ( identifier[mount] )
identifier[self] . identifier[_log] . identifier[debug] (
literal[string]
. identifier[format] ( identifier[hs] , identifier[x0] , identifier[y0] , identifier[z0] , identifier[new_pos] , identifier[xyz] [ identifier[ax_en] . identifier[value] ]))
identifier[new_pos] [ identifier[ax_en] ]. identifier[append] ( identifier[xyz] [ identifier[ax_en] . identifier[value] ])
identifier[bounce] = identifier[self] . identifier[_config] . identifier[tip_probe] . identifier[bounce_distance] *(- literal[int] keyword[if] identifier[hs] . identifier[probe_distance] > literal[int] keyword[else] literal[int] )
keyword[await] identifier[self] . identifier[move_rel] ( identifier[mount] ,
identifier[top_types] . identifier[Point] (
**{ identifier[hs] . identifier[axis] : identifier[bounce] }))
keyword[await] identifier[self] . identifier[move_to] ( identifier[mount] , identifier[xyz] . identifier[_replace] ( identifier[z] = identifier[safe_z] ))
identifier[to_ret] = identifier[top_types] . identifier[Point] (**{ identifier[ax] . identifier[name] . identifier[lower] (): identifier[sum] ( identifier[vals] )/ identifier[len] ( identifier[vals] )
keyword[for] identifier[ax] , identifier[vals] keyword[in] identifier[new_pos] . identifier[items] ()})
identifier[self] . identifier[_log] . identifier[info] ( literal[string]
literal[string]
. identifier[format] ( identifier[pip] . identifier[name] , identifier[pip] . identifier[pipette_id] , identifier[mount] . identifier[name] ,
identifier[to_ret] , identifier[self] . identifier[_config] . identifier[tip_probe] . identifier[center] ,
identifier[new_pos] ))
keyword[return] identifier[to_ret] | async def _do_tp(self, pip, mount) -> top_types.Point:
""" Execute the work of tip probe.
This is a separate function so that it can be encapsulated in
a context manager that ensures the state of the pipette tip tracking
is reset properly. It should not be called outside of
:py:meth:`locate_tip_probe_center`.
:param pip: The pipette to use
:type pip: opentrons.hardware_control.pipette.Pipette
:param mount: The mount on which the pipette is attached
:type mount: opentrons.types.Mount
"""
# Clear the old offset during calibration
pip.update_instrument_offset(top_types.Point())
# Hotspots based on our expectation of tip length and config
hotspots = robot_configs.calculate_tip_probe_hotspots(pip.current_tip_length, self._config.tip_probe)
new_pos: Dict[Axis, List[float]] = {ax: [] for ax in Axis.gantry_axes() if ax != Axis.A}
safe_z = self._config.tip_probe.z_clearance.crossover + self._config.tip_probe.center[2]
for hs in hotspots:
ax_en = Axis[hs.axis.upper()]
overridden_center = {ax: sum(vals) / len(vals) if len(vals) == 2 else self._config.tip_probe.center[ax.value] for (ax, vals) in new_pos.items()}
x0 = overridden_center[Axis.X] + hs.x_start_offs
y0 = overridden_center[Axis.Y] + hs.y_start_offs
z0 = hs.z_start_abs
pos = await self.current_position(mount)
# Move safely to the setup point for the probe
await self.move_to(mount, top_types.Point(pos[Axis.X], pos[Axis.Y], safe_z))
await self.move_to(mount, top_types.Point(x0, y0, safe_z))
await self.move_to(mount, top_types.Point(x0, y0, z0))
if ax_en == Axis.Z:
to_probe = Axis.by_mount(mount) # depends on [control=['if'], data=[]]
else:
to_probe = ax_en
# Probe and retrieve the position afterwards
async with self._motion_lock:
self._current_position = self._deck_from_smoothie(self._backend.probe(to_probe.name.lower(), hs.probe_distance))
xyz = await self.gantry_position(mount)
# Store the upated position.
self._log.debug('tip probe: hs {}: start: ({} {} {}) status {} will add {}'.format(hs, x0, y0, z0, new_pos, xyz[ax_en.value]))
new_pos[ax_en].append(xyz[ax_en.value])
# Before moving up, move back to clear the switches
bounce = self._config.tip_probe.bounce_distance * (-1.0 if hs.probe_distance > 0 else 1.0)
await self.move_rel(mount, top_types.Point(**{hs.axis: bounce}))
await self.move_to(mount, xyz._replace(z=safe_z)) # depends on [control=['for'], data=['hs']]
to_ret = top_types.Point(**{ax.name.lower(): sum(vals) / len(vals) for (ax, vals) in new_pos.items()})
self._log.info('Tip probe complete with {} {} on {}. New position: {} (default {}), averaged from {}'.format(pip.name, pip.pipette_id, mount.name, to_ret, self._config.tip_probe.center, new_pos))
return to_ret |
def get_sdk_vc_script(self,host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if (host_arch == 'amd64' and target_arch == 'x86'):
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if (host_arch != target_arch):
arch_string='%s_%s'%(host_arch,target_arch)
debug("sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("sdk.py: get_sdk_vc_script():file:%s"%file)
return file | def function[get_sdk_vc_script, parameter[self, host_arch, target_arch]]:
constant[ Return the script to initialize the VC compiler installed by SDK
]
if <ast.BoolOp object at 0x7da18f00fd00> begin[:]
variable[host_arch] assign[=] name[target_arch]
variable[arch_string] assign[=] name[target_arch]
if compare[name[host_arch] not_equal[!=] name[target_arch]] begin[:]
variable[arch_string] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00ddb0>, <ast.Name object at 0x7da18f00c1f0>]]]
call[name[debug], parameter[binary_operation[constant[sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00ccd0>, <ast.Name object at 0x7da18f00ead0>, <ast.Name object at 0x7da18f00ed40>]]]]]
variable[file] assign[=] call[name[self].vc_setup_scripts.get, parameter[name[arch_string], constant[None]]]
call[name[debug], parameter[binary_operation[constant[sdk.py: get_sdk_vc_script():file:%s] <ast.Mod object at 0x7da2590d6920> name[file]]]]
return[name[file]] | keyword[def] identifier[get_sdk_vc_script] ( identifier[self] , identifier[host_arch] , identifier[target_arch] ):
literal[string]
keyword[if] ( identifier[host_arch] == literal[string] keyword[and] identifier[target_arch] == literal[string] ):
identifier[host_arch] = identifier[target_arch]
identifier[arch_string] = identifier[target_arch]
keyword[if] ( identifier[host_arch] != identifier[target_arch] ):
identifier[arch_string] = literal[string] %( identifier[host_arch] , identifier[target_arch] )
identifier[debug] ( literal[string] %( identifier[arch_string] ,
identifier[host_arch] ,
identifier[target_arch] ))
identifier[file] = identifier[self] . identifier[vc_setup_scripts] . identifier[get] ( identifier[arch_string] , keyword[None] )
identifier[debug] ( literal[string] % identifier[file] )
keyword[return] identifier[file] | def get_sdk_vc_script(self, host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if host_arch == 'amd64' and target_arch == 'x86':
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch = target_arch # depends on [control=['if'], data=[]]
arch_string = target_arch
if host_arch != target_arch:
arch_string = '%s_%s' % (host_arch, target_arch) # depends on [control=['if'], data=['host_arch', 'target_arch']]
debug('sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s' % (arch_string, host_arch, target_arch))
file = self.vc_setup_scripts.get(arch_string, None)
debug('sdk.py: get_sdk_vc_script():file:%s' % file)
return file |
def kill(container, rm=True):
'''
Kill a container
Args:
* container: Container name or ID
* rm=True: Remove the container or not
'''
container = get_container(container)
if not container:
raise Exception('No such container: %s' % container)
unbind_all(container['ip']) # legacy, only here for backwards compatibility
sudo('docker kill %s' % container['name'])
if rm:
sudo('docker rm %s' % container['name']) | def function[kill, parameter[container, rm]]:
constant[
Kill a container
Args:
* container: Container name or ID
* rm=True: Remove the container or not
]
variable[container] assign[=] call[name[get_container], parameter[name[container]]]
if <ast.UnaryOp object at 0x7da1b0ab9c30> begin[:]
<ast.Raise object at 0x7da1b0ab86d0>
call[name[unbind_all], parameter[call[name[container]][constant[ip]]]]
call[name[sudo], parameter[binary_operation[constant[docker kill %s] <ast.Mod object at 0x7da2590d6920> call[name[container]][constant[name]]]]]
if name[rm] begin[:]
call[name[sudo], parameter[binary_operation[constant[docker rm %s] <ast.Mod object at 0x7da2590d6920> call[name[container]][constant[name]]]]] | keyword[def] identifier[kill] ( identifier[container] , identifier[rm] = keyword[True] ):
literal[string]
identifier[container] = identifier[get_container] ( identifier[container] )
keyword[if] keyword[not] identifier[container] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[container] )
identifier[unbind_all] ( identifier[container] [ literal[string] ])
identifier[sudo] ( literal[string] % identifier[container] [ literal[string] ])
keyword[if] identifier[rm] :
identifier[sudo] ( literal[string] % identifier[container] [ literal[string] ]) | def kill(container, rm=True):
"""
Kill a container
Args:
* container: Container name or ID
* rm=True: Remove the container or not
"""
container = get_container(container)
if not container:
raise Exception('No such container: %s' % container) # depends on [control=['if'], data=[]]
unbind_all(container['ip']) # legacy, only here for backwards compatibility
sudo('docker kill %s' % container['name'])
if rm:
sudo('docker rm %s' % container['name']) # depends on [control=['if'], data=[]] |
def update_confirmation_comment(self, confirmation_comment_id, confirmation_comment_dict):
"""
Updates a confirmation comment
:param confirmation_comment_id: the confirmation comment id
:param confirmation_comment_dict: dict
:return: dict
"""
return self._create_put_request(
resource=CONFIRMATION_COMMENTS,
billomat_id=confirmation_comment_id,
send_data=confirmation_comment_dict
) | def function[update_confirmation_comment, parameter[self, confirmation_comment_id, confirmation_comment_dict]]:
constant[
Updates a confirmation comment
:param confirmation_comment_id: the confirmation comment id
:param confirmation_comment_dict: dict
:return: dict
]
return[call[name[self]._create_put_request, parameter[]]] | keyword[def] identifier[update_confirmation_comment] ( identifier[self] , identifier[confirmation_comment_id] , identifier[confirmation_comment_dict] ):
literal[string]
keyword[return] identifier[self] . identifier[_create_put_request] (
identifier[resource] = identifier[CONFIRMATION_COMMENTS] ,
identifier[billomat_id] = identifier[confirmation_comment_id] ,
identifier[send_data] = identifier[confirmation_comment_dict]
) | def update_confirmation_comment(self, confirmation_comment_id, confirmation_comment_dict):
"""
Updates a confirmation comment
:param confirmation_comment_id: the confirmation comment id
:param confirmation_comment_dict: dict
:return: dict
"""
return self._create_put_request(resource=CONFIRMATION_COMMENTS, billomat_id=confirmation_comment_id, send_data=confirmation_comment_dict) |
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)." | def function[_check_required_fields, parameter[self]]:
constant[
Make sure, that internal dictionary contains all fields, which are
required by the webform.
]
assert[compare[call[name[self]._POST][constant[P0501010__a]] not_equal[!=] constant[]]]
for taget[name[isbn_field_name]] in starred[tuple[[<ast.Constant object at 0x7da1b13575b0>, <ast.Constant object at 0x7da1b1356cb0>]]] begin[:]
variable[check] assign[=] call[name[PostData]._czech_isbn_check, parameter[call[name[self]._POST][name[isbn_field_name]]]]
assert[name[check]]
assert[compare[call[name[self]._POST][constant[P1601ISB__a]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P07012001_a]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P0901210__a]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P0903210__d]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P0801205__a]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P1501IST1_a]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P1502IST1_b]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P0502010__b]] not_equal[!=] constant[]]]
if compare[call[name[self]._POST][constant[P0502010__b]] equal[==] name[FormatEnum].ONLINE] begin[:]
assert[compare[call[name[self]._POST][constant[P0503010__x]] not_equal[!=] constant[]]]
assert[compare[call[name[self]._POST][constant[P0902210__c]] not_equal[!=] constant[]]]
def function[to_unicode, parameter[inp]]:
<ast.Try object at 0x7da1b133d270>
variable[annotation_length] assign[=] call[name[len], parameter[call[name[to_unicode], parameter[call[name[self]._POST][constant[P1001330__a]]]]]]
<ast.AugAssign object at 0x7da1b133c1c0>
assert[compare[name[annotation_length] less_or_equal[<=] constant[500]]] | keyword[def] identifier[_check_required_fields] ( identifier[self] ):
literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[for] identifier[isbn_field_name] keyword[in] ( literal[string] , literal[string] ):
identifier[check] = identifier[PostData] . identifier[_czech_isbn_check] ( identifier[self] . identifier[_POST] [ identifier[isbn_field_name] ])
keyword[assert] identifier[check] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[if] identifier[self] . identifier[_POST] [ literal[string] ]== identifier[FormatEnum] . identifier[ONLINE] :
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[assert] identifier[self] . identifier[_POST] [ literal[string] ]!= literal[string] , literal[string]
keyword[def] identifier[to_unicode] ( identifier[inp] ):
keyword[try] :
keyword[return] identifier[unicode] ( identifier[inp] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[return] identifier[unicode] ( identifier[inp] , literal[string] )
identifier[annotation_length] = identifier[len] ( identifier[to_unicode] ( identifier[self] . identifier[_POST] [ literal[string] ]))
identifier[annotation_length] -= identifier[len] ( identifier[to_unicode] ( identifier[ANNOTATION_PREFIX] ))
keyword[assert] identifier[annotation_length] <= literal[int] , literal[string] | def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST['P0501010__a'] != '', 'ISBN is required!'
# export script accepts only czech ISBNs
for isbn_field_name in ('P0501010__a', 'P1601ISB__a'):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, 'Only czech ISBN is accepted!' # depends on [control=['for'], data=['isbn_field_name']]
assert self._POST['P1601ISB__a'] != '', 'Hidden ISBN field is required!'
assert self._POST['P07012001_a'] != '', 'Nazev is required!'
assert self._POST['P0901210__a'] != '', 'Místo vydání is required!'
assert self._POST['P0903210__d'] != '', 'Datum vydání is required!'
assert self._POST['P0801205__a'] != '', 'Pořadí vydání is required!'
# Zpracovatel záznamu
assert self._POST['P1501IST1_a'] != '', 'Zpracovatel is required! (H)'
assert self._POST['P1502IST1_b'] != '', 'Zpracovatel is required! (V)'
# vazba/forma
assert self._POST['P0502010__b'] != '', 'Vazba/forma is required!'
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST['P0502010__b'] == FormatEnum.ONLINE:
assert self._POST['P0503010__x'] != '', 'Format is required!' # depends on [control=['if'], data=[]]
assert self._POST['P0902210__c'] != '', 'Nakladatel is required!'
def to_unicode(inp):
try:
return unicode(inp) # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
return unicode(inp, 'utf-8') # depends on [control=['except'], data=[]]
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST['P1001330__a']))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, 'Annotation is too long (> 500).' |
def print_dot(docgraph):
"""
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
"""
stripped_graph = preprocess_for_pydot(docgraph)
return nx.drawing.nx_pydot.to_pydot(stripped_graph).to_string() | def function[print_dot, parameter[docgraph]]:
constant[
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
]
variable[stripped_graph] assign[=] call[name[preprocess_for_pydot], parameter[name[docgraph]]]
return[call[call[name[nx].drawing.nx_pydot.to_pydot, parameter[name[stripped_graph]]].to_string, parameter[]]] | keyword[def] identifier[print_dot] ( identifier[docgraph] ):
literal[string]
identifier[stripped_graph] = identifier[preprocess_for_pydot] ( identifier[docgraph] )
keyword[return] identifier[nx] . identifier[drawing] . identifier[nx_pydot] . identifier[to_pydot] ( identifier[stripped_graph] ). identifier[to_string] () | def print_dot(docgraph):
"""
converts a document graph into a dot file and returns it as a string.
If this function call is prepended by %dotstr,
it will display the given document graph as a dot/graphviz graph
in the currently running IPython notebook session.
To use this function, the gvmagic IPython notebook extension
needs to be installed once::
%install_ext https://raw.github.com/cjdrake/ipython-magic/master/gvmagic.py
In order to visualize dot graphs in your currently running
IPython notebook, run this command once::
%load_ext gvmagic
"""
stripped_graph = preprocess_for_pydot(docgraph)
return nx.drawing.nx_pydot.to_pydot(stripped_graph).to_string() |
def logout(self):
""" Logout from the remote account
:return: None
"""
url = self.address + "/logout"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error"))) | def function[logout, parameter[self]]:
constant[ Logout from the remote account
:return: None
]
variable[url] assign[=] binary_operation[name[self].address + constant[/logout]]
variable[header] assign[=] call[name[self].__check_authentication, parameter[]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[response].status_code not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da1b1b0ebf0> | keyword[def] identifier[logout] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[address] + literal[string]
identifier[header] = identifier[self] . identifier[__check_authentication] ()
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[header] )
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[response] . identifier[status_code] , identifier[response] . identifier[json] (). identifier[get] ( literal[string] ))) | def logout(self):
""" Logout from the remote account
:return: None
"""
url = self.address + '/logout'
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError('Code {}. {}'.format(response.status_code, response.json().get('error'))) # depends on [control=['if'], data=[]] |
def convert_default(self, field, **params):
"""Return raw field."""
for klass, ma_field in self.TYPE_MAPPING:
if isinstance(field, klass):
return ma_field(**params)
return fields.Raw(**params) | def function[convert_default, parameter[self, field]]:
constant[Return raw field.]
for taget[tuple[[<ast.Name object at 0x7da204961630>, <ast.Name object at 0x7da2049638b0>]]] in starred[name[self].TYPE_MAPPING] begin[:]
if call[name[isinstance], parameter[name[field], name[klass]]] begin[:]
return[call[name[ma_field], parameter[]]]
return[call[name[fields].Raw, parameter[]]] | keyword[def] identifier[convert_default] ( identifier[self] , identifier[field] ,** identifier[params] ):
literal[string]
keyword[for] identifier[klass] , identifier[ma_field] keyword[in] identifier[self] . identifier[TYPE_MAPPING] :
keyword[if] identifier[isinstance] ( identifier[field] , identifier[klass] ):
keyword[return] identifier[ma_field] (** identifier[params] )
keyword[return] identifier[fields] . identifier[Raw] (** identifier[params] ) | def convert_default(self, field, **params):
"""Return raw field."""
for (klass, ma_field) in self.TYPE_MAPPING:
if isinstance(field, klass):
return ma_field(**params) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return fields.Raw(**params) |
def slinkChunk(key, lines):
"""
Parse Super Link (SLINK) Chunk Method
"""
KEYWORDS = ('SLINK',
'NODE',
'PIPE')
result = {'slinkNumber':None,
'numPipes':None,
'nodes':[],
'pipes':[]}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'SLINK':
# SLINK handler
result['slinkNumber'] = schunk[1]
result['numPipes'] = schunk[2]
elif card == 'NODE':
# NODE handler
node = {'nodeNumber': schunk[1],
'groundSurfaceElev': schunk[2],
'invertElev': schunk[3],
'manholeSA': schunk[4],
'inletCode': schunk[5],
'cellI': schunk[6],
'cellJ': schunk[7],
'weirSideLength': schunk[8],
'orificeDiameter': schunk[9]}
result['nodes'].append(node)
elif card == 'PIPE':
# PIPE handler
pipe = {'pipeNumber': schunk[1],
'xSecType': schunk[2],
'diameterOrHeight': schunk[3],
'width': schunk[4],
'slope': schunk[5],
'roughness': schunk[6],
'length': schunk[7],
'conductance': schunk[8],
'drainSpacing': schunk[9]}
result['pipes'].append(pipe)
return result | def function[slinkChunk, parameter[key, lines]]:
constant[
Parse Super Link (SLINK) Chunk Method
]
variable[KEYWORDS] assign[=] tuple[[<ast.Constant object at 0x7da20c6a8640>, <ast.Constant object at 0x7da20c6aae30>, <ast.Constant object at 0x7da20c6abf70>]]
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a86a0>, <ast.Constant object at 0x7da20c6abfa0>, <ast.Constant object at 0x7da20c6aa590>, <ast.Constant object at 0x7da20c6aaad0>], [<ast.Constant object at 0x7da20c6a9c30>, <ast.Constant object at 0x7da20c6aba90>, <ast.List object at 0x7da20c6ab850>, <ast.List object at 0x7da20c6a82b0>]]
variable[chunks] assign[=] call[name[pt].chunk, parameter[name[KEYWORDS], name[lines]]]
for taget[tuple[[<ast.Name object at 0x7da20c6a9f30>, <ast.Name object at 0x7da20c6a9210>]]] in starred[call[name[iteritems], parameter[name[chunks]]]] begin[:]
for taget[name[chunk]] in starred[name[chunkList]] begin[:]
variable[schunk] assign[=] call[call[call[name[chunk]][constant[0]].strip, parameter[]].split, parameter[]]
if compare[name[card] equal[==] constant[SLINK]] begin[:]
call[name[result]][constant[slinkNumber]] assign[=] call[name[schunk]][constant[1]]
call[name[result]][constant[numPipes]] assign[=] call[name[schunk]][constant[2]]
return[name[result]] | keyword[def] identifier[slinkChunk] ( identifier[key] , identifier[lines] ):
literal[string]
identifier[KEYWORDS] =( literal[string] ,
literal[string] ,
literal[string] )
identifier[result] ={ literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] :[],
literal[string] :[]}
identifier[chunks] = identifier[pt] . identifier[chunk] ( identifier[KEYWORDS] , identifier[lines] )
keyword[for] identifier[card] , identifier[chunkList] keyword[in] identifier[iteritems] ( identifier[chunks] ):
keyword[for] identifier[chunk] keyword[in] identifier[chunkList] :
identifier[schunk] = identifier[chunk] [ literal[int] ]. identifier[strip] (). identifier[split] ()
keyword[if] identifier[card] == literal[string] :
identifier[result] [ literal[string] ]= identifier[schunk] [ literal[int] ]
identifier[result] [ literal[string] ]= identifier[schunk] [ literal[int] ]
keyword[elif] identifier[card] == literal[string] :
identifier[node] ={ literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ]}
identifier[result] [ literal[string] ]. identifier[append] ( identifier[node] )
keyword[elif] identifier[card] == literal[string] :
identifier[pipe] ={ literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ],
literal[string] : identifier[schunk] [ literal[int] ]}
identifier[result] [ literal[string] ]. identifier[append] ( identifier[pipe] )
keyword[return] identifier[result] | def slinkChunk(key, lines):
"""
Parse Super Link (SLINK) Chunk Method
"""
KEYWORDS = ('SLINK', 'NODE', 'PIPE')
result = {'slinkNumber': None, 'numPipes': None, 'nodes': [], 'pipes': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for (card, chunkList) in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'SLINK':
# SLINK handler
result['slinkNumber'] = schunk[1]
result['numPipes'] = schunk[2] # depends on [control=['if'], data=[]]
elif card == 'NODE':
# NODE handler
node = {'nodeNumber': schunk[1], 'groundSurfaceElev': schunk[2], 'invertElev': schunk[3], 'manholeSA': schunk[4], 'inletCode': schunk[5], 'cellI': schunk[6], 'cellJ': schunk[7], 'weirSideLength': schunk[8], 'orificeDiameter': schunk[9]}
result['nodes'].append(node) # depends on [control=['if'], data=[]]
elif card == 'PIPE':
# PIPE handler
pipe = {'pipeNumber': schunk[1], 'xSecType': schunk[2], 'diameterOrHeight': schunk[3], 'width': schunk[4], 'slope': schunk[5], 'roughness': schunk[6], 'length': schunk[7], 'conductance': schunk[8], 'drainSpacing': schunk[9]}
result['pipes'].append(pipe) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] # depends on [control=['for'], data=[]]
return result |
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['assignedObjectiveBankIds'] = [str(kwargs['objective_bank_id'])]
self._my_map['cognitiveProcessId'] = self._cognitive_process_default
self._my_map['assessmentId'] = self._assessment_default
self._my_map['knowledgeCategoryId'] = self._knowledge_category_default | def function[_init_map, parameter[self, record_types]]:
constant[Initialize form map]
call[name[osid_objects].OsidObjectForm._init_map, parameter[name[self]]]
call[name[self]._my_map][constant[assignedObjectiveBankIds]] assign[=] list[[<ast.Call object at 0x7da18dc05210>]]
call[name[self]._my_map][constant[cognitiveProcessId]] assign[=] name[self]._cognitive_process_default
call[name[self]._my_map][constant[assessmentId]] assign[=] name[self]._assessment_default
call[name[self]._my_map][constant[knowledgeCategoryId]] assign[=] name[self]._knowledge_category_default | keyword[def] identifier[_init_map] ( identifier[self] , identifier[record_types] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[osid_objects] . identifier[OsidObjectForm] . identifier[_init_map] ( identifier[self] , identifier[record_types] = identifier[record_types] )
identifier[self] . identifier[_my_map] [ literal[string] ]=[ identifier[str] ( identifier[kwargs] [ literal[string] ])]
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[self] . identifier[_cognitive_process_default]
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[self] . identifier[_assessment_default]
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[self] . identifier[_knowledge_category_default] | def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['assignedObjectiveBankIds'] = [str(kwargs['objective_bank_id'])]
self._my_map['cognitiveProcessId'] = self._cognitive_process_default
self._my_map['assessmentId'] = self._assessment_default
self._my_map['knowledgeCategoryId'] = self._knowledge_category_default |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'classifier_id') and self.classifier_id is not None:
_dict['classifier_id'] = self.classifier_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'owner') and self.owner is not None:
_dict['owner'] = self.owner
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self,
'core_ml_enabled') and self.core_ml_enabled is not None:
_dict['core_ml_enabled'] = self.core_ml_enabled
if hasattr(self, 'explanation') and self.explanation is not None:
_dict['explanation'] = self.explanation
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'classes') and self.classes is not None:
_dict['classes'] = [x._to_dict() for x in self.classes]
if hasattr(self, 'retrained') and self.retrained is not None:
_dict['retrained'] = datetime_to_string(self.retrained)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da18dc054e0> begin[:]
call[name[_dict]][constant[classifier_id]] assign[=] name[self].classifier_id
if <ast.BoolOp object at 0x7da18dc045e0> begin[:]
call[name[_dict]][constant[name]] assign[=] name[self].name
if <ast.BoolOp object at 0x7da18dc07190> begin[:]
call[name[_dict]][constant[owner]] assign[=] name[self].owner
if <ast.BoolOp object at 0x7da18dc07940> begin[:]
call[name[_dict]][constant[status]] assign[=] name[self].status
if <ast.BoolOp object at 0x7da18dc07e80> begin[:]
call[name[_dict]][constant[core_ml_enabled]] assign[=] name[self].core_ml_enabled
if <ast.BoolOp object at 0x7da18dc056f0> begin[:]
call[name[_dict]][constant[explanation]] assign[=] name[self].explanation
if <ast.BoolOp object at 0x7da18dc04dc0> begin[:]
call[name[_dict]][constant[created]] assign[=] call[name[datetime_to_string], parameter[name[self].created]]
if <ast.BoolOp object at 0x7da1b1b44be0> begin[:]
call[name[_dict]][constant[classes]] assign[=] <ast.ListComp object at 0x7da1b1b459c0>
if <ast.BoolOp object at 0x7da1b1b45c00> begin[:]
call[name[_dict]][constant[retrained]] assign[=] call[name[datetime_to_string], parameter[name[self].retrained]]
if <ast.BoolOp object at 0x7da1b1b44400> begin[:]
call[name[_dict]][constant[updated]] assign[=] call[name[datetime_to_string], parameter[name[self].updated]]
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[classifier_id] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[classifier_id]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[name]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[owner] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[owner]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[status] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[status]
keyword[if] identifier[hasattr] ( identifier[self] ,
literal[string] ) keyword[and] identifier[self] . identifier[core_ml_enabled] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[core_ml_enabled]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[explanation] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[explanation]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[created] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[datetime_to_string] ( identifier[self] . identifier[created] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[classes] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[classes] ]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[retrained] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[datetime_to_string] ( identifier[self] . identifier[retrained] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[updated] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[datetime_to_string] ( identifier[self] . identifier[updated] )
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'classifier_id') and self.classifier_id is not None:
_dict['classifier_id'] = self.classifier_id # depends on [control=['if'], data=[]]
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name # depends on [control=['if'], data=[]]
if hasattr(self, 'owner') and self.owner is not None:
_dict['owner'] = self.owner # depends on [control=['if'], data=[]]
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status # depends on [control=['if'], data=[]]
if hasattr(self, 'core_ml_enabled') and self.core_ml_enabled is not None:
_dict['core_ml_enabled'] = self.core_ml_enabled # depends on [control=['if'], data=[]]
if hasattr(self, 'explanation') and self.explanation is not None:
_dict['explanation'] = self.explanation # depends on [control=['if'], data=[]]
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created) # depends on [control=['if'], data=[]]
if hasattr(self, 'classes') and self.classes is not None:
_dict['classes'] = [x._to_dict() for x in self.classes] # depends on [control=['if'], data=[]]
if hasattr(self, 'retrained') and self.retrained is not None:
_dict['retrained'] = datetime_to_string(self.retrained) # depends on [control=['if'], data=[]]
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated) # depends on [control=['if'], data=[]]
return _dict |
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
"inet": "ipv4",
"inet6": "ipv6"
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {"inet": 32, "inet6": 128}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split("/")[0]
address = napalm.base.helpers.convert(
napalm.base.helpers.ip, ip_address, ip_address
)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get("family")
interface = py23_compat.text_type(
interface_details_dict.get("interface")
)
except ValueError:
continue
prefix = napalm.base.helpers.convert(
int, ip_network.split("/")[-1], _FAMILY_MAX_PREFIXLEN.get(family_raw)
)
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address]["prefix_length"] = prefix
return interfaces_ip | def function[get_interfaces_ip, parameter[self]]:
constant[Return the configured IP addresses.]
variable[interfaces_ip] assign[=] dictionary[[], []]
variable[interface_table] assign[=] call[name[junos_views].junos_ip_interfaces_table, parameter[name[self].device]]
call[name[interface_table].get, parameter[]]
variable[interface_table_items] assign[=] call[name[interface_table].items, parameter[]]
variable[_FAMILY_VMAP_] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c1bd00>, <ast.Constant object at 0x7da1b1c198d0>], [<ast.Constant object at 0x7da1b1c1b5e0>, <ast.Constant object at 0x7da1b1c183a0>]]
variable[_FAMILY_MAX_PREFIXLEN] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c1bd90>, <ast.Constant object at 0x7da1b1c1b400>], [<ast.Constant object at 0x7da1b1c194e0>, <ast.Constant object at 0x7da1b1c19930>]]
for taget[name[interface_details]] in starred[name[interface_table_items]] begin[:]
variable[ip_network] assign[=] call[name[interface_details]][constant[0]]
variable[ip_address] assign[=] call[call[name[ip_network].split, parameter[constant[/]]]][constant[0]]
variable[address] assign[=] call[name[napalm].base.helpers.convert, parameter[name[napalm].base.helpers.ip, name[ip_address], name[ip_address]]]
<ast.Try object at 0x7da1b1c18040>
variable[prefix] assign[=] call[name[napalm].base.helpers.convert, parameter[name[int], call[call[name[ip_network].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b1c18490>], call[name[_FAMILY_MAX_PREFIXLEN].get, parameter[name[family_raw]]]]]
variable[family] assign[=] call[name[_FAMILY_VMAP_].get, parameter[name[family_raw]]]
if <ast.BoolOp object at 0x7da1b1c1aec0> begin[:]
continue
if compare[name[interface] <ast.NotIn object at 0x7da2590d7190> call[name[interfaces_ip].keys, parameter[]]] begin[:]
call[name[interfaces_ip]][name[interface]] assign[=] dictionary[[], []]
if compare[name[family] <ast.NotIn object at 0x7da2590d7190> call[call[name[interfaces_ip]][name[interface]].keys, parameter[]]] begin[:]
call[call[name[interfaces_ip]][name[interface]]][name[family]] assign[=] dictionary[[], []]
if compare[name[address] <ast.NotIn object at 0x7da2590d7190> call[call[call[name[interfaces_ip]][name[interface]]][name[family]].keys, parameter[]]] begin[:]
call[call[call[name[interfaces_ip]][name[interface]]][name[family]]][name[address]] assign[=] dictionary[[], []]
call[call[call[call[name[interfaces_ip]][name[interface]]][name[family]]][name[address]]][constant[prefix_length]] assign[=] name[prefix]
return[name[interfaces_ip]] | keyword[def] identifier[get_interfaces_ip] ( identifier[self] ):
literal[string]
identifier[interfaces_ip] ={}
identifier[interface_table] = identifier[junos_views] . identifier[junos_ip_interfaces_table] ( identifier[self] . identifier[device] )
identifier[interface_table] . identifier[get] ()
identifier[interface_table_items] = identifier[interface_table] . identifier[items] ()
identifier[_FAMILY_VMAP_] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[_FAMILY_MAX_PREFIXLEN] ={ literal[string] : literal[int] , literal[string] : literal[int] }
keyword[for] identifier[interface_details] keyword[in] identifier[interface_table_items] :
identifier[ip_network] = identifier[interface_details] [ literal[int] ]
identifier[ip_address] = identifier[ip_network] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[address] = identifier[napalm] . identifier[base] . identifier[helpers] . identifier[convert] (
identifier[napalm] . identifier[base] . identifier[helpers] . identifier[ip] , identifier[ip_address] , identifier[ip_address]
)
keyword[try] :
identifier[interface_details_dict] = identifier[dict] ( identifier[interface_details] [ literal[int] ])
identifier[family_raw] = identifier[interface_details_dict] . identifier[get] ( literal[string] )
identifier[interface] = identifier[py23_compat] . identifier[text_type] (
identifier[interface_details_dict] . identifier[get] ( literal[string] )
)
keyword[except] identifier[ValueError] :
keyword[continue]
identifier[prefix] = identifier[napalm] . identifier[base] . identifier[helpers] . identifier[convert] (
identifier[int] , identifier[ip_network] . identifier[split] ( literal[string] )[- literal[int] ], identifier[_FAMILY_MAX_PREFIXLEN] . identifier[get] ( identifier[family_raw] )
)
identifier[family] = identifier[_FAMILY_VMAP_] . identifier[get] ( identifier[family_raw] )
keyword[if] keyword[not] identifier[family] keyword[or] keyword[not] identifier[interface] :
keyword[continue]
keyword[if] identifier[interface] keyword[not] keyword[in] identifier[interfaces_ip] . identifier[keys] ():
identifier[interfaces_ip] [ identifier[interface] ]={}
keyword[if] identifier[family] keyword[not] keyword[in] identifier[interfaces_ip] [ identifier[interface] ]. identifier[keys] ():
identifier[interfaces_ip] [ identifier[interface] ][ identifier[family] ]={}
keyword[if] identifier[address] keyword[not] keyword[in] identifier[interfaces_ip] [ identifier[interface] ][ identifier[family] ]. identifier[keys] ():
identifier[interfaces_ip] [ identifier[interface] ][ identifier[family] ][ identifier[address] ]={}
identifier[interfaces_ip] [ identifier[interface] ][ identifier[family] ][ identifier[address] ][ literal[string] ]= identifier[prefix]
keyword[return] identifier[interfaces_ip] | def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
# can add more mappings
_FAMILY_VMAP_ = {'inet': 'ipv4', 'inet6': 'ipv6'}
_FAMILY_MAX_PREFIXLEN = {'inet': 32, 'inet6': 128}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm.base.helpers.convert(napalm.base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface')) # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
prefix = napalm.base.helpers.convert(int, ip_network.split('/')[-1], _FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue # depends on [control=['if'], data=[]]
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {} # depends on [control=['if'], data=['interface']]
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {} # depends on [control=['if'], data=['family']]
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {} # depends on [control=['if'], data=['address']]
interfaces_ip[interface][family][address]['prefix_length'] = prefix # depends on [control=['for'], data=['interface_details']]
return interfaces_ip |
def parse_reading(val: str) -> Optional[float]:
""" Convert reading value to float (if possible) """
try:
return float(val)
except ValueError:
logging.warning('Reading of "%s" is not a number', val)
return None | def function[parse_reading, parameter[val]]:
constant[ Convert reading value to float (if possible) ]
<ast.Try object at 0x7da1b0fe5090> | keyword[def] identifier[parse_reading] ( identifier[val] : identifier[str] )-> identifier[Optional] [ identifier[float] ]:
literal[string]
keyword[try] :
keyword[return] identifier[float] ( identifier[val] )
keyword[except] identifier[ValueError] :
identifier[logging] . identifier[warning] ( literal[string] , identifier[val] )
keyword[return] keyword[None] | def parse_reading(val: str) -> Optional[float]:
""" Convert reading value to float (if possible) """
try:
return float(val) # depends on [control=['try'], data=[]]
except ValueError:
logging.warning('Reading of "%s" is not a number', val)
return None # depends on [control=['except'], data=[]] |
def report_failures(error=False, display=True, clear=True):
""" Print details of logged failures in expect function
If no failures are detected, None is returned by the function.
Parameters
----------
error:bool
If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console
display: bool
If True, will print the failure report to console as well as returning it as a string. If
error = True do nothing.
clear: bool
If True, all logged failured will be cleared after being reported.
Returns
-------
string
The string formated failure report.
list of dict
The failed expectations. Each dictionary contains the keys:
idx - the number of the failed expectation in the list starting at one,
expression - Code that is evaluated
file - the file name where the validation function was defined,
funcname - the name of the validation function,
line - the line of the validation function that the expression was on
msg - the error message associated with the expression, if there was one.
"""
global _failed_expectations
output = []
# Copy as failures are returned
all_failed_expectations = _failed_expectations[:]
if all_failed_expectations:
output.append('\nFailed Expectations: %s\n\n' % len(all_failed_expectations))
for i, failure in enumerate(all_failed_expectations, start=1):
report_line = '{idx}: File {file}, line {line}, in {funcname}()\n "{expression}" is not True\n'
if failure['msg']:
report_line += ' -- {msg}\n'
report_line += '\n'
failure['idx'] = i
output.append(report_line.format(**failure))
if clear:
_failed_expectations = []
else:
output.append("All expectations met.")
if error:
raise FailedValidationError("\n" + ''.join(output))
elif display:
print(''.join(output))
if all_failed_expectations:
return (''.join(output), all_failed_expectations)
else:
return None | def function[report_failures, parameter[error, display, clear]]:
constant[ Print details of logged failures in expect function
If no failures are detected, None is returned by the function.
Parameters
----------
error:bool
If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console
display: bool
If True, will print the failure report to console as well as returning it as a string. If
error = True do nothing.
clear: bool
If True, all logged failured will be cleared after being reported.
Returns
-------
string
The string formated failure report.
list of dict
The failed expectations. Each dictionary contains the keys:
idx - the number of the failed expectation in the list starting at one,
expression - Code that is evaluated
file - the file name where the validation function was defined,
funcname - the name of the validation function,
line - the line of the validation function that the expression was on
msg - the error message associated with the expression, if there was one.
]
<ast.Global object at 0x7da1b26ae170>
variable[output] assign[=] list[[]]
variable[all_failed_expectations] assign[=] call[name[_failed_expectations]][<ast.Slice object at 0x7da1b26ac2b0>]
if name[all_failed_expectations] begin[:]
call[name[output].append, parameter[binary_operation[constant[
Failed Expectations: %s
] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[all_failed_expectations]]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b26ae8c0>, <ast.Name object at 0x7da1b26ade40>]]] in starred[call[name[enumerate], parameter[name[all_failed_expectations]]]] begin[:]
variable[report_line] assign[=] constant[{idx}: File {file}, line {line}, in {funcname}()
"{expression}" is not True
]
if call[name[failure]][constant[msg]] begin[:]
<ast.AugAssign object at 0x7da1b26ae7a0>
<ast.AugAssign object at 0x7da20c6a9990>
call[name[failure]][constant[idx]] assign[=] name[i]
call[name[output].append, parameter[call[name[report_line].format, parameter[]]]]
if name[clear] begin[:]
variable[_failed_expectations] assign[=] list[[]]
if name[error] begin[:]
<ast.Raise object at 0x7da20c6ab730>
if name[all_failed_expectations] begin[:]
return[tuple[[<ast.Call object at 0x7da20c6a9660>, <ast.Name object at 0x7da20c6a9300>]]] | keyword[def] identifier[report_failures] ( identifier[error] = keyword[False] , identifier[display] = keyword[True] , identifier[clear] = keyword[True] ):
literal[string]
keyword[global] identifier[_failed_expectations]
identifier[output] =[]
identifier[all_failed_expectations] = identifier[_failed_expectations] [:]
keyword[if] identifier[all_failed_expectations] :
identifier[output] . identifier[append] ( literal[string] % identifier[len] ( identifier[all_failed_expectations] ))
keyword[for] identifier[i] , identifier[failure] keyword[in] identifier[enumerate] ( identifier[all_failed_expectations] , identifier[start] = literal[int] ):
identifier[report_line] = literal[string]
keyword[if] identifier[failure] [ literal[string] ]:
identifier[report_line] += literal[string]
identifier[report_line] += literal[string]
identifier[failure] [ literal[string] ]= identifier[i]
identifier[output] . identifier[append] ( identifier[report_line] . identifier[format] (** identifier[failure] ))
keyword[if] identifier[clear] :
identifier[_failed_expectations] =[]
keyword[else] :
identifier[output] . identifier[append] ( literal[string] )
keyword[if] identifier[error] :
keyword[raise] identifier[FailedValidationError] ( literal[string] + literal[string] . identifier[join] ( identifier[output] ))
keyword[elif] identifier[display] :
identifier[print] ( literal[string] . identifier[join] ( identifier[output] ))
keyword[if] identifier[all_failed_expectations] :
keyword[return] ( literal[string] . identifier[join] ( identifier[output] ), identifier[all_failed_expectations] )
keyword[else] :
keyword[return] keyword[None] | def report_failures(error=False, display=True, clear=True):
""" Print details of logged failures in expect function
If no failures are detected, None is returned by the function.
Parameters
----------
error:bool
If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console
display: bool
If True, will print the failure report to console as well as returning it as a string. If
error = True do nothing.
clear: bool
If True, all logged failured will be cleared after being reported.
Returns
-------
string
The string formated failure report.
list of dict
The failed expectations. Each dictionary contains the keys:
idx - the number of the failed expectation in the list starting at one,
expression - Code that is evaluated
file - the file name where the validation function was defined,
funcname - the name of the validation function,
line - the line of the validation function that the expression was on
msg - the error message associated with the expression, if there was one.
"""
global _failed_expectations
output = [] # Copy as failures are returned
all_failed_expectations = _failed_expectations[:]
if all_failed_expectations:
output.append('\nFailed Expectations: %s\n\n' % len(all_failed_expectations))
for (i, failure) in enumerate(all_failed_expectations, start=1):
report_line = '{idx}: File {file}, line {line}, in {funcname}()\n "{expression}" is not True\n'
if failure['msg']:
report_line += ' -- {msg}\n' # depends on [control=['if'], data=[]]
report_line += '\n'
failure['idx'] = i
output.append(report_line.format(**failure)) # depends on [control=['for'], data=[]]
if clear:
_failed_expectations = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
output.append('All expectations met.')
if error:
raise FailedValidationError('\n' + ''.join(output)) # depends on [control=['if'], data=[]]
elif display:
print(''.join(output)) # depends on [control=['if'], data=[]]
if all_failed_expectations:
return (''.join(output), all_failed_expectations) # depends on [control=['if'], data=[]]
else:
return None |
def read_namespaced_lease(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_lease # noqa: E501
read the specified Lease # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_lease(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Lease (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1Lease
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_lease_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_lease_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[read_namespaced_lease, parameter[self, name, namespace]]:
constant[read_namespaced_lease # noqa: E501
read the specified Lease # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_lease(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Lease (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1Lease
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].read_namespaced_lease_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[read_namespaced_lease] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[read_namespaced_lease_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[read_namespaced_lease_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def read_namespaced_lease(self, name, namespace, **kwargs): # noqa: E501
"read_namespaced_lease # noqa: E501\n\n read the specified Lease # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.read_namespaced_lease(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Lease (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.\n :param bool export: Should this value be exported. Export strips fields that a user can not specify.\n :return: V1beta1Lease\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_lease_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.read_namespaced_lease_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def _create_json(self):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
"""
data_json = {
'name': self.name,
'email': self.email,
'password': self.password,
'admin': self.admin,
"profileUpdatable": self.profileUpdatable,
"internalPasswordDisabled": self.internalPasswordDisabled,
"groups": self._groups,
}
return data_json | def function[_create_json, parameter[self]]:
constant[
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
]
variable[data_json] assign[=] dictionary[[<ast.Constant object at 0x7da1b0980d30>, <ast.Constant object at 0x7da1b0980dc0>, <ast.Constant object at 0x7da1b0980310>, <ast.Constant object at 0x7da1b0980f40>, <ast.Constant object at 0x7da1b0983d60>, <ast.Constant object at 0x7da1b09800a0>, <ast.Constant object at 0x7da1b09805b0>], [<ast.Attribute object at 0x7da1b088ad70>, <ast.Attribute object at 0x7da1b0889630>, <ast.Attribute object at 0x7da1b088ac50>, <ast.Attribute object at 0x7da1b0889090>, <ast.Attribute object at 0x7da1b0926aa0>, <ast.Attribute object at 0x7da1b0927a30>, <ast.Attribute object at 0x7da1b0926500>]]
return[name[data_json]] | keyword[def] identifier[_create_json] ( identifier[self] ):
literal[string]
identifier[data_json] ={
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[self] . identifier[email] ,
literal[string] : identifier[self] . identifier[password] ,
literal[string] : identifier[self] . identifier[admin] ,
literal[string] : identifier[self] . identifier[profileUpdatable] ,
literal[string] : identifier[self] . identifier[internalPasswordDisabled] ,
literal[string] : identifier[self] . identifier[_groups] ,
}
keyword[return] identifier[data_json] | def _create_json(self):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
"""
data_json = {'name': self.name, 'email': self.email, 'password': self.password, 'admin': self.admin, 'profileUpdatable': self.profileUpdatable, 'internalPasswordDisabled': self.internalPasswordDisabled, 'groups': self._groups}
return data_json |
def zip_dicts(left, right, prefix=()):
"""
Modified zip through two dictionaries.
Iterate through all keys of left dictionary, returning:
- A nested path
- A value and parent for both dictionaries
"""
for key, left_value in left.items():
path = prefix + (key, )
right_value = right.get(key)
if isinstance(left_value, dict):
yield from zip_dicts(left_value, right_value or {}, path)
else:
yield path, left, left_value, right, right_value | def function[zip_dicts, parameter[left, right, prefix]]:
constant[
Modified zip through two dictionaries.
Iterate through all keys of left dictionary, returning:
- A nested path
- A value and parent for both dictionaries
]
for taget[tuple[[<ast.Name object at 0x7da1b0e16e60>, <ast.Name object at 0x7da1b0e146a0>]]] in starred[call[name[left].items, parameter[]]] begin[:]
variable[path] assign[=] binary_operation[name[prefix] + tuple[[<ast.Name object at 0x7da1b0e14d90>]]]
variable[right_value] assign[=] call[name[right].get, parameter[name[key]]]
if call[name[isinstance], parameter[name[left_value], name[dict]]] begin[:]
<ast.YieldFrom object at 0x7da1b0e173a0> | keyword[def] identifier[zip_dicts] ( identifier[left] , identifier[right] , identifier[prefix] =()):
literal[string]
keyword[for] identifier[key] , identifier[left_value] keyword[in] identifier[left] . identifier[items] ():
identifier[path] = identifier[prefix] +( identifier[key] ,)
identifier[right_value] = identifier[right] . identifier[get] ( identifier[key] )
keyword[if] identifier[isinstance] ( identifier[left_value] , identifier[dict] ):
keyword[yield] keyword[from] identifier[zip_dicts] ( identifier[left_value] , identifier[right_value] keyword[or] {}, identifier[path] )
keyword[else] :
keyword[yield] identifier[path] , identifier[left] , identifier[left_value] , identifier[right] , identifier[right_value] | def zip_dicts(left, right, prefix=()):
"""
Modified zip through two dictionaries.
Iterate through all keys of left dictionary, returning:
- A nested path
- A value and parent for both dictionaries
"""
for (key, left_value) in left.items():
path = prefix + (key,)
right_value = right.get(key)
if isinstance(left_value, dict):
yield from zip_dicts(left_value, right_value or {}, path) # depends on [control=['if'], data=[]]
else:
yield (path, left, left_value, right, right_value) # depends on [control=['for'], data=[]] |
def _ExpandUsersVariablePathSegments(
cls, path_segments, path_separator, user_accounts):
"""Expands path segments with a users variable, e.g. %%users.homedir%%.
Args:
path_segments (list[str]): path segments.
path_separator (str): path segment separator.
user_accounts (list[UserAccountArtifact]): user accounts.
Returns:
list[str]: paths for which the users variables have been expanded.
"""
if not path_segments:
return []
path_segments_lower = [
path_segment.lower() for path_segment in path_segments]
if path_segments_lower[0] in ('%%users.homedir%%', '%%users.userprofile%%'):
return cls._ExpandUsersHomeDirectoryPathSegments(
path_segments, path_separator, user_accounts)
path_expansions = cls._PATH_EXPANSIONS_PER_USERS_VARIABLE.get(
path_segments[0], None)
if path_expansions:
expanded_paths = []
for path_expansion in path_expansions:
expanded_path_segments = list(path_expansion)
expanded_path_segments.extend(path_segments[1:])
paths = cls._ExpandUsersVariablePathSegments(
expanded_path_segments, path_separator, user_accounts)
expanded_paths.extend(paths)
return expanded_paths
if cls._IsWindowsDrivePathSegment(path_segments[0]):
path_segments[0] = ''
# TODO: add support for %%users.username%%
path = path_separator.join(path_segments)
return [path] | def function[_ExpandUsersVariablePathSegments, parameter[cls, path_segments, path_separator, user_accounts]]:
constant[Expands path segments with a users variable, e.g. %%users.homedir%%.
Args:
path_segments (list[str]): path segments.
path_separator (str): path segment separator.
user_accounts (list[UserAccountArtifact]): user accounts.
Returns:
list[str]: paths for which the users variables have been expanded.
]
if <ast.UnaryOp object at 0x7da18ede44c0> begin[:]
return[list[[]]]
variable[path_segments_lower] assign[=] <ast.ListComp object at 0x7da18ede6890>
if compare[call[name[path_segments_lower]][constant[0]] in tuple[[<ast.Constant object at 0x7da18ede5210>, <ast.Constant object at 0x7da18ede4280>]]] begin[:]
return[call[name[cls]._ExpandUsersHomeDirectoryPathSegments, parameter[name[path_segments], name[path_separator], name[user_accounts]]]]
variable[path_expansions] assign[=] call[name[cls]._PATH_EXPANSIONS_PER_USERS_VARIABLE.get, parameter[call[name[path_segments]][constant[0]], constant[None]]]
if name[path_expansions] begin[:]
variable[expanded_paths] assign[=] list[[]]
for taget[name[path_expansion]] in starred[name[path_expansions]] begin[:]
variable[expanded_path_segments] assign[=] call[name[list], parameter[name[path_expansion]]]
call[name[expanded_path_segments].extend, parameter[call[name[path_segments]][<ast.Slice object at 0x7da18ede65f0>]]]
variable[paths] assign[=] call[name[cls]._ExpandUsersVariablePathSegments, parameter[name[expanded_path_segments], name[path_separator], name[user_accounts]]]
call[name[expanded_paths].extend, parameter[name[paths]]]
return[name[expanded_paths]]
if call[name[cls]._IsWindowsDrivePathSegment, parameter[call[name[path_segments]][constant[0]]]] begin[:]
call[name[path_segments]][constant[0]] assign[=] constant[]
variable[path] assign[=] call[name[path_separator].join, parameter[name[path_segments]]]
return[list[[<ast.Name object at 0x7da18c4ce080>]]] | keyword[def] identifier[_ExpandUsersVariablePathSegments] (
identifier[cls] , identifier[path_segments] , identifier[path_separator] , identifier[user_accounts] ):
literal[string]
keyword[if] keyword[not] identifier[path_segments] :
keyword[return] []
identifier[path_segments_lower] =[
identifier[path_segment] . identifier[lower] () keyword[for] identifier[path_segment] keyword[in] identifier[path_segments] ]
keyword[if] identifier[path_segments_lower] [ literal[int] ] keyword[in] ( literal[string] , literal[string] ):
keyword[return] identifier[cls] . identifier[_ExpandUsersHomeDirectoryPathSegments] (
identifier[path_segments] , identifier[path_separator] , identifier[user_accounts] )
identifier[path_expansions] = identifier[cls] . identifier[_PATH_EXPANSIONS_PER_USERS_VARIABLE] . identifier[get] (
identifier[path_segments] [ literal[int] ], keyword[None] )
keyword[if] identifier[path_expansions] :
identifier[expanded_paths] =[]
keyword[for] identifier[path_expansion] keyword[in] identifier[path_expansions] :
identifier[expanded_path_segments] = identifier[list] ( identifier[path_expansion] )
identifier[expanded_path_segments] . identifier[extend] ( identifier[path_segments] [ literal[int] :])
identifier[paths] = identifier[cls] . identifier[_ExpandUsersVariablePathSegments] (
identifier[expanded_path_segments] , identifier[path_separator] , identifier[user_accounts] )
identifier[expanded_paths] . identifier[extend] ( identifier[paths] )
keyword[return] identifier[expanded_paths]
keyword[if] identifier[cls] . identifier[_IsWindowsDrivePathSegment] ( identifier[path_segments] [ literal[int] ]):
identifier[path_segments] [ literal[int] ]= literal[string]
identifier[path] = identifier[path_separator] . identifier[join] ( identifier[path_segments] )
keyword[return] [ identifier[path] ] | def _ExpandUsersVariablePathSegments(cls, path_segments, path_separator, user_accounts):
"""Expands path segments with a users variable, e.g. %%users.homedir%%.
Args:
path_segments (list[str]): path segments.
path_separator (str): path segment separator.
user_accounts (list[UserAccountArtifact]): user accounts.
Returns:
list[str]: paths for which the users variables have been expanded.
"""
if not path_segments:
return [] # depends on [control=['if'], data=[]]
path_segments_lower = [path_segment.lower() for path_segment in path_segments]
if path_segments_lower[0] in ('%%users.homedir%%', '%%users.userprofile%%'):
return cls._ExpandUsersHomeDirectoryPathSegments(path_segments, path_separator, user_accounts) # depends on [control=['if'], data=[]]
path_expansions = cls._PATH_EXPANSIONS_PER_USERS_VARIABLE.get(path_segments[0], None)
if path_expansions:
expanded_paths = []
for path_expansion in path_expansions:
expanded_path_segments = list(path_expansion)
expanded_path_segments.extend(path_segments[1:])
paths = cls._ExpandUsersVariablePathSegments(expanded_path_segments, path_separator, user_accounts)
expanded_paths.extend(paths) # depends on [control=['for'], data=['path_expansion']]
return expanded_paths # depends on [control=['if'], data=[]]
if cls._IsWindowsDrivePathSegment(path_segments[0]):
path_segments[0] = '' # depends on [control=['if'], data=[]]
# TODO: add support for %%users.username%%
path = path_separator.join(path_segments)
return [path] |
def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show() | def function[show, parameter[self]]:
constant[
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
]
variable[plt] assign[=] call[name[self].get_pourbaix_plot, parameter[<ast.Starred object at 0x7da20c6e4190>]]
call[name[plt].show, parameter[]] | keyword[def] identifier[show] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[plt] = identifier[self] . identifier[get_pourbaix_plot] (* identifier[args] ,** identifier[kwargs] )
identifier[plt] . identifier[show] () | def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show() |
def request_release_milestone_payment(session, milestone_id):
"""
Release a milestone payment
"""
params_data = {
'action': 'request_release',
}
# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release
endpoint = 'milestones/{}'.format(milestone_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneNotRequestedReleaseException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | def function[request_release_milestone_payment, parameter[session, milestone_id]]:
constant[
Release a milestone payment
]
variable[params_data] assign[=] dictionary[[<ast.Constant object at 0x7da1aff55300>], [<ast.Constant object at 0x7da1aff55330>]]
variable[endpoint] assign[=] call[constant[milestones/{}].format, parameter[name[milestone_id]]]
variable[response] assign[=] call[name[make_put_request], parameter[name[session], name[endpoint]]]
variable[json_data] assign[=] call[name[response].json, parameter[]]
if compare[name[response].status_code equal[==] constant[200]] begin[:]
return[call[name[json_data]][constant[status]]] | keyword[def] identifier[request_release_milestone_payment] ( identifier[session] , identifier[milestone_id] ):
literal[string]
identifier[params_data] ={
literal[string] : literal[string] ,
}
identifier[endpoint] = literal[string] . identifier[format] ( identifier[milestone_id] )
identifier[response] = identifier[make_put_request] ( identifier[session] , identifier[endpoint] , identifier[params_data] = identifier[params_data] )
identifier[json_data] = identifier[response] . identifier[json] ()
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[json_data] [ literal[string] ]
keyword[else] :
keyword[raise] identifier[MilestoneNotRequestedReleaseException] (
identifier[message] = identifier[json_data] [ literal[string] ],
identifier[error_code] = identifier[json_data] [ literal[string] ],
identifier[request_id] = identifier[json_data] [ literal[string] ]) | def request_release_milestone_payment(session, milestone_id):
"""
Release a milestone payment
"""
params_data = {'action': 'request_release'}
# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release
endpoint = 'milestones/{}'.format(milestone_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status'] # depends on [control=['if'], data=[]]
else:
raise MilestoneNotRequestedReleaseException(message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id']) |
def events(cls, filters):
"""Retrieve events details from status.gandi.net."""
current = filters.pop('current', False)
current_params = []
if current:
current_params = [('current', 'true')]
filter_url = uparse.urlencode(sorted(list(filters.items())) + current_params) # noqa
events = cls.json_get('%s/events?%s' % (cls.api_url, filter_url),
empty_key=True, send_key=False)
return events | def function[events, parameter[cls, filters]]:
constant[Retrieve events details from status.gandi.net.]
variable[current] assign[=] call[name[filters].pop, parameter[constant[current], constant[False]]]
variable[current_params] assign[=] list[[]]
if name[current] begin[:]
variable[current_params] assign[=] list[[<ast.Tuple object at 0x7da20e9b0670>]]
variable[filter_url] assign[=] call[name[uparse].urlencode, parameter[binary_operation[call[name[sorted], parameter[call[name[list], parameter[call[name[filters].items, parameter[]]]]]] + name[current_params]]]]
variable[events] assign[=] call[name[cls].json_get, parameter[binary_operation[constant[%s/events?%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4cceb0>, <ast.Name object at 0x7da18c4ce590>]]]]]
return[name[events]] | keyword[def] identifier[events] ( identifier[cls] , identifier[filters] ):
literal[string]
identifier[current] = identifier[filters] . identifier[pop] ( literal[string] , keyword[False] )
identifier[current_params] =[]
keyword[if] identifier[current] :
identifier[current_params] =[( literal[string] , literal[string] )]
identifier[filter_url] = identifier[uparse] . identifier[urlencode] ( identifier[sorted] ( identifier[list] ( identifier[filters] . identifier[items] ()))+ identifier[current_params] )
identifier[events] = identifier[cls] . identifier[json_get] ( literal[string] %( identifier[cls] . identifier[api_url] , identifier[filter_url] ),
identifier[empty_key] = keyword[True] , identifier[send_key] = keyword[False] )
keyword[return] identifier[events] | def events(cls, filters):
"""Retrieve events details from status.gandi.net."""
current = filters.pop('current', False)
current_params = []
if current:
current_params = [('current', 'true')] # depends on [control=['if'], data=[]]
filter_url = uparse.urlencode(sorted(list(filters.items())) + current_params) # noqa
events = cls.json_get('%s/events?%s' % (cls.api_url, filter_url), empty_key=True, send_key=False)
return events |
def _move_leadership(self, state):
"""Attempt to move a random partition to a random broker. If the
chosen movement is not possible, None is returned.
:param state: The starting state.
:return: The resulting State object if a leader change is found. None
if no change is found.
"""
partition = random.randint(0, len(self.cluster_topology.partitions) - 1)
# Moving zero weight partitions will not improve balance for any of the
# balance criteria. Disallow these movements here to avoid wasted
# effort.
if state.partition_weights[partition] == 0:
return None
if len(state.replicas[partition]) <= 1:
return None
dest_index = random.randint(1, len(state.replicas[partition]) - 1)
dest = state.replicas[partition][dest_index]
if (self.args.max_leader_changes is not None and
state.leader_movement_count >= self.args.max_leader_changes):
return None
return state.move_leadership(partition, dest) | def function[_move_leadership, parameter[self, state]]:
constant[Attempt to move a random partition to a random broker. If the
chosen movement is not possible, None is returned.
:param state: The starting state.
:return: The resulting State object if a leader change is found. None
if no change is found.
]
variable[partition] assign[=] call[name[random].randint, parameter[constant[0], binary_operation[call[name[len], parameter[name[self].cluster_topology.partitions]] - constant[1]]]]
if compare[call[name[state].partition_weights][name[partition]] equal[==] constant[0]] begin[:]
return[constant[None]]
if compare[call[name[len], parameter[call[name[state].replicas][name[partition]]]] less_or_equal[<=] constant[1]] begin[:]
return[constant[None]]
variable[dest_index] assign[=] call[name[random].randint, parameter[constant[1], binary_operation[call[name[len], parameter[call[name[state].replicas][name[partition]]]] - constant[1]]]]
variable[dest] assign[=] call[call[name[state].replicas][name[partition]]][name[dest_index]]
if <ast.BoolOp object at 0x7da1b07cc850> begin[:]
return[constant[None]]
return[call[name[state].move_leadership, parameter[name[partition], name[dest]]]] | keyword[def] identifier[_move_leadership] ( identifier[self] , identifier[state] ):
literal[string]
identifier[partition] = identifier[random] . identifier[randint] ( literal[int] , identifier[len] ( identifier[self] . identifier[cluster_topology] . identifier[partitions] )- literal[int] )
keyword[if] identifier[state] . identifier[partition_weights] [ identifier[partition] ]== literal[int] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[state] . identifier[replicas] [ identifier[partition] ])<= literal[int] :
keyword[return] keyword[None]
identifier[dest_index] = identifier[random] . identifier[randint] ( literal[int] , identifier[len] ( identifier[state] . identifier[replicas] [ identifier[partition] ])- literal[int] )
identifier[dest] = identifier[state] . identifier[replicas] [ identifier[partition] ][ identifier[dest_index] ]
keyword[if] ( identifier[self] . identifier[args] . identifier[max_leader_changes] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[state] . identifier[leader_movement_count] >= identifier[self] . identifier[args] . identifier[max_leader_changes] ):
keyword[return] keyword[None]
keyword[return] identifier[state] . identifier[move_leadership] ( identifier[partition] , identifier[dest] ) | def _move_leadership(self, state):
"""Attempt to move a random partition to a random broker. If the
chosen movement is not possible, None is returned.
:param state: The starting state.
:return: The resulting State object if a leader change is found. None
if no change is found.
"""
partition = random.randint(0, len(self.cluster_topology.partitions) - 1)
# Moving zero weight partitions will not improve balance for any of the
# balance criteria. Disallow these movements here to avoid wasted
# effort.
if state.partition_weights[partition] == 0:
return None # depends on [control=['if'], data=[]]
if len(state.replicas[partition]) <= 1:
return None # depends on [control=['if'], data=[]]
dest_index = random.randint(1, len(state.replicas[partition]) - 1)
dest = state.replicas[partition][dest_index]
if self.args.max_leader_changes is not None and state.leader_movement_count >= self.args.max_leader_changes:
return None # depends on [control=['if'], data=[]]
return state.move_leadership(partition, dest) |
def create_level(self, depth):
"""Create and return a level for the given depth
The model and root of the level will be automatically set by the browser.
:param depth: the depth level that the level should handle
:type depth: int
:returns: a new level for the given depth
:rtype: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:raises: None
"""
ll = ListLevel(parent=self)
ll.setEditTriggers(ll.DoubleClicked | ll.SelectedClicked | ll.CurrentChanged)
#ll.setSelectionBehavior(ll.SelectRows)
ll.setResizeMode(ll.Adjust)
self.delegate = CommentDelegate(ll)
ll.setItemDelegate(self.delegate)
ll.setVerticalScrollMode(ll.ScrollPerPixel)
return ll | def function[create_level, parameter[self, depth]]:
constant[Create and return a level for the given depth
The model and root of the level will be automatically set by the browser.
:param depth: the depth level that the level should handle
:type depth: int
:returns: a new level for the given depth
:rtype: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:raises: None
]
variable[ll] assign[=] call[name[ListLevel], parameter[]]
call[name[ll].setEditTriggers, parameter[binary_operation[binary_operation[name[ll].DoubleClicked <ast.BitOr object at 0x7da2590d6aa0> name[ll].SelectedClicked] <ast.BitOr object at 0x7da2590d6aa0> name[ll].CurrentChanged]]]
call[name[ll].setResizeMode, parameter[name[ll].Adjust]]
name[self].delegate assign[=] call[name[CommentDelegate], parameter[name[ll]]]
call[name[ll].setItemDelegate, parameter[name[self].delegate]]
call[name[ll].setVerticalScrollMode, parameter[name[ll].ScrollPerPixel]]
return[name[ll]] | keyword[def] identifier[create_level] ( identifier[self] , identifier[depth] ):
literal[string]
identifier[ll] = identifier[ListLevel] ( identifier[parent] = identifier[self] )
identifier[ll] . identifier[setEditTriggers] ( identifier[ll] . identifier[DoubleClicked] | identifier[ll] . identifier[SelectedClicked] | identifier[ll] . identifier[CurrentChanged] )
identifier[ll] . identifier[setResizeMode] ( identifier[ll] . identifier[Adjust] )
identifier[self] . identifier[delegate] = identifier[CommentDelegate] ( identifier[ll] )
identifier[ll] . identifier[setItemDelegate] ( identifier[self] . identifier[delegate] )
identifier[ll] . identifier[setVerticalScrollMode] ( identifier[ll] . identifier[ScrollPerPixel] )
keyword[return] identifier[ll] | def create_level(self, depth):
"""Create and return a level for the given depth
The model and root of the level will be automatically set by the browser.
:param depth: the depth level that the level should handle
:type depth: int
:returns: a new level for the given depth
:rtype: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:raises: None
"""
ll = ListLevel(parent=self)
ll.setEditTriggers(ll.DoubleClicked | ll.SelectedClicked | ll.CurrentChanged)
#ll.setSelectionBehavior(ll.SelectRows)
ll.setResizeMode(ll.Adjust)
self.delegate = CommentDelegate(ll)
ll.setItemDelegate(self.delegate)
ll.setVerticalScrollMode(ll.ScrollPerPixel)
return ll |
def path_list(self, sep=os.pathsep):
''' Return list of Path objects. '''
from pathlib import Path
return [ Path(pathstr) for pathstr in self.split(sep) ] | def function[path_list, parameter[self, sep]]:
constant[ Return list of Path objects. ]
from relative_module[pathlib] import module[Path]
return[<ast.ListComp object at 0x7da1b13ce0b0>] | keyword[def] identifier[path_list] ( identifier[self] , identifier[sep] = identifier[os] . identifier[pathsep] ):
literal[string]
keyword[from] identifier[pathlib] keyword[import] identifier[Path]
keyword[return] [ identifier[Path] ( identifier[pathstr] ) keyword[for] identifier[pathstr] keyword[in] identifier[self] . identifier[split] ( identifier[sep] )] | def path_list(self, sep=os.pathsep):
""" Return list of Path objects. """
from pathlib import Path
return [Path(pathstr) for pathstr in self.split(sep)] |
def process_status(self, helper, sess, check):
""" get the snmp value, check the status and update the helper"""
if check == 'ntp_current_state':
ntp_status_int = helper.get_snmp_value(sess, helper, self.oids['oid_ntp_current_state_int'])
result = self.check_ntp_status(ntp_status_int)
elif check == 'gps_mode':
gps_status_int = helper.get_snmp_value(sess, helper, self.oids['oid_gps_mode_int'])
result = self.check_gps_status(gps_status_int)
else:
return
helper.update_status(helper, result) | def function[process_status, parameter[self, helper, sess, check]]:
constant[ get the snmp value, check the status and update the helper]
if compare[name[check] equal[==] constant[ntp_current_state]] begin[:]
variable[ntp_status_int] assign[=] call[name[helper].get_snmp_value, parameter[name[sess], name[helper], call[name[self].oids][constant[oid_ntp_current_state_int]]]]
variable[result] assign[=] call[name[self].check_ntp_status, parameter[name[ntp_status_int]]]
call[name[helper].update_status, parameter[name[helper], name[result]]] | keyword[def] identifier[process_status] ( identifier[self] , identifier[helper] , identifier[sess] , identifier[check] ):
literal[string]
keyword[if] identifier[check] == literal[string] :
identifier[ntp_status_int] = identifier[helper] . identifier[get_snmp_value] ( identifier[sess] , identifier[helper] , identifier[self] . identifier[oids] [ literal[string] ])
identifier[result] = identifier[self] . identifier[check_ntp_status] ( identifier[ntp_status_int] )
keyword[elif] identifier[check] == literal[string] :
identifier[gps_status_int] = identifier[helper] . identifier[get_snmp_value] ( identifier[sess] , identifier[helper] , identifier[self] . identifier[oids] [ literal[string] ])
identifier[result] = identifier[self] . identifier[check_gps_status] ( identifier[gps_status_int] )
keyword[else] :
keyword[return]
identifier[helper] . identifier[update_status] ( identifier[helper] , identifier[result] ) | def process_status(self, helper, sess, check):
""" get the snmp value, check the status and update the helper"""
if check == 'ntp_current_state':
ntp_status_int = helper.get_snmp_value(sess, helper, self.oids['oid_ntp_current_state_int'])
result = self.check_ntp_status(ntp_status_int) # depends on [control=['if'], data=[]]
elif check == 'gps_mode':
gps_status_int = helper.get_snmp_value(sess, helper, self.oids['oid_gps_mode_int'])
result = self.check_gps_status(gps_status_int) # depends on [control=['if'], data=[]]
else:
return
helper.update_status(helper, result) |
def adduser(name, username, root=None):
'''
Add a user in the group.
name
Name of the group to modify
username
Username to add to the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
'''
on_redhat_5 = __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '5'
on_suse_11 = __grains__.get('os_family') == 'Suse' and __grains__.get('osmajorrelease') == '11'
if __grains__['kernel'] == 'Linux':
if on_redhat_5:
cmd = ['gpasswd', '-a', username, name]
elif on_suse_11:
cmd = ['usermod', '-A', name, username]
else:
cmd = ['gpasswd', '--add', username, name]
if root is not None:
cmd.extend(('--root', root))
else:
cmd = ['usermod', '-G', name, username]
if root is not None:
cmd.extend(('-R', root))
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode | def function[adduser, parameter[name, username, root]]:
constant[
Add a user in the group.
name
Name of the group to modify
username
Username to add to the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
]
variable[on_redhat_5] assign[=] <ast.BoolOp object at 0x7da20c7cb3a0>
variable[on_suse_11] assign[=] <ast.BoolOp object at 0x7da20c7c8d30>
if compare[call[name[__grains__]][constant[kernel]] equal[==] constant[Linux]] begin[:]
if name[on_redhat_5] begin[:]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c7c8b80>, <ast.Constant object at 0x7da20c7cb0a0>, <ast.Name object at 0x7da20c7c89d0>, <ast.Name object at 0x7da20c7c8370>]]
if compare[name[root] is_not constant[None]] begin[:]
call[name[cmd].extend, parameter[tuple[[<ast.Constant object at 0x7da20c7ca710>, <ast.Name object at 0x7da20c7c8bb0>]]]]
variable[retcode] assign[=] call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[cmd]]]
return[<ast.UnaryOp object at 0x7da20c7c9e70>] | keyword[def] identifier[adduser] ( identifier[name] , identifier[username] , identifier[root] = keyword[None] ):
literal[string]
identifier[on_redhat_5] = identifier[__grains__] . identifier[get] ( literal[string] )== literal[string] keyword[and] identifier[__grains__] . identifier[get] ( literal[string] )== literal[string]
identifier[on_suse_11] = identifier[__grains__] . identifier[get] ( literal[string] )== literal[string] keyword[and] identifier[__grains__] . identifier[get] ( literal[string] )== literal[string]
keyword[if] identifier[__grains__] [ literal[string] ]== literal[string] :
keyword[if] identifier[on_redhat_5] :
identifier[cmd] =[ literal[string] , literal[string] , identifier[username] , identifier[name] ]
keyword[elif] identifier[on_suse_11] :
identifier[cmd] =[ literal[string] , literal[string] , identifier[name] , identifier[username] ]
keyword[else] :
identifier[cmd] =[ literal[string] , literal[string] , identifier[username] , identifier[name] ]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[cmd] . identifier[extend] (( literal[string] , identifier[root] ))
keyword[else] :
identifier[cmd] =[ literal[string] , literal[string] , identifier[name] , identifier[username] ]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[cmd] . identifier[extend] (( literal[string] , identifier[root] ))
identifier[retcode] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] )
keyword[return] keyword[not] identifier[retcode] | def adduser(name, username, root=None):
"""
Add a user in the group.
name
Name of the group to modify
username
Username to add to the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
"""
on_redhat_5 = __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '5'
on_suse_11 = __grains__.get('os_family') == 'Suse' and __grains__.get('osmajorrelease') == '11'
if __grains__['kernel'] == 'Linux':
if on_redhat_5:
cmd = ['gpasswd', '-a', username, name] # depends on [control=['if'], data=[]]
elif on_suse_11:
cmd = ['usermod', '-A', name, username] # depends on [control=['if'], data=[]]
else:
cmd = ['gpasswd', '--add', username, name]
if root is not None:
cmd.extend(('--root', root)) # depends on [control=['if'], data=['root']] # depends on [control=['if'], data=[]]
else:
cmd = ['usermod', '-G', name, username]
if root is not None:
cmd.extend(('-R', root)) # depends on [control=['if'], data=['root']]
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode |
def _files_preserve(self):
""" create a set of protected files
create a set of files, based on self.files_preserve and
self.stdin, self,stdout and self.stderr, that should not get
closed while daemonizing.
:return: set
"""
result = set()
files = [] if not self.files_preserve else self.files_preserve
files.extend([self.stdin, self.stdout, self.stderr])
for item in files:
if hasattr(item, 'fileno'):
result.add(item.fileno())
if isinstance(item, int):
result.add(item)
return result | def function[_files_preserve, parameter[self]]:
constant[ create a set of protected files
create a set of files, based on self.files_preserve and
self.stdin, self,stdout and self.stderr, that should not get
closed while daemonizing.
:return: set
]
variable[result] assign[=] call[name[set], parameter[]]
variable[files] assign[=] <ast.IfExp object at 0x7da18bcc9ae0>
call[name[files].extend, parameter[list[[<ast.Attribute object at 0x7da18bcc8af0>, <ast.Attribute object at 0x7da18bcc8d00>, <ast.Attribute object at 0x7da18bcc9090>]]]]
for taget[name[item]] in starred[name[files]] begin[:]
if call[name[hasattr], parameter[name[item], constant[fileno]]] begin[:]
call[name[result].add, parameter[call[name[item].fileno, parameter[]]]]
if call[name[isinstance], parameter[name[item], name[int]]] begin[:]
call[name[result].add, parameter[name[item]]]
return[name[result]] | keyword[def] identifier[_files_preserve] ( identifier[self] ):
literal[string]
identifier[result] = identifier[set] ()
identifier[files] =[] keyword[if] keyword[not] identifier[self] . identifier[files_preserve] keyword[else] identifier[self] . identifier[files_preserve]
identifier[files] . identifier[extend] ([ identifier[self] . identifier[stdin] , identifier[self] . identifier[stdout] , identifier[self] . identifier[stderr] ])
keyword[for] identifier[item] keyword[in] identifier[files] :
keyword[if] identifier[hasattr] ( identifier[item] , literal[string] ):
identifier[result] . identifier[add] ( identifier[item] . identifier[fileno] ())
keyword[if] identifier[isinstance] ( identifier[item] , identifier[int] ):
identifier[result] . identifier[add] ( identifier[item] )
keyword[return] identifier[result] | def _files_preserve(self):
""" create a set of protected files
create a set of files, based on self.files_preserve and
self.stdin, self,stdout and self.stderr, that should not get
closed while daemonizing.
:return: set
"""
result = set()
files = [] if not self.files_preserve else self.files_preserve
files.extend([self.stdin, self.stdout, self.stderr])
for item in files:
if hasattr(item, 'fileno'):
result.add(item.fileno()) # depends on [control=['if'], data=[]]
if isinstance(item, int):
result.add(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return result |
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0] | def function[log_message, parameter[self, user, message]]:
constant[Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
]
if call[name[isinstance], parameter[name[user], name[SeshetUser]]] begin[:]
variable[user] assign[=] name[user].nick
variable[time] assign[=] call[name[datetime].utcnow, parameter[]]
call[name[self].message_log.append, parameter[tuple[[<ast.Name object at 0x7da18bc70e20>, <ast.Name object at 0x7da18bc71570>, <ast.Name object at 0x7da18bc73dc0>]]]]
while compare[call[name[len], parameter[name[self].message_log]] greater[>] name[self]._log_size] begin[:]
<ast.Delete object at 0x7da18bc72d10> | keyword[def] identifier[log_message] ( identifier[self] , identifier[user] , identifier[message] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[user] , identifier[SeshetUser] ):
identifier[user] = identifier[user] . identifier[nick]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[user] , identifier[IRCstr] ):
identifier[user] = identifier[IRCstr] ( identifier[user] )
identifier[time] = identifier[datetime] . identifier[utcnow] ()
identifier[self] . identifier[message_log] . identifier[append] (( identifier[time] , identifier[user] , identifier[message] ))
keyword[while] identifier[len] ( identifier[self] . identifier[message_log] )> identifier[self] . identifier[_log_size] :
keyword[del] identifier[self] . identifier[message_log] [ literal[int] ] | def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick # depends on [control=['if'], data=[]]
elif not isinstance(user, IRCstr):
user = IRCstr(user) # depends on [control=['if'], data=[]]
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0] # depends on [control=['while'], data=[]] |
def get_role(self, role_name, mount_point='approle'):
"""GET /auth/<mount_point>/role/<role name>
:param role_name:
:type role_name:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
return self._adapter.get('/v1/auth/{0}/role/{1}'.format(mount_point, role_name)).json() | def function[get_role, parameter[self, role_name, mount_point]]:
constant[GET /auth/<mount_point>/role/<role name>
:param role_name:
:type role_name:
:param mount_point:
:type mount_point:
:return:
:rtype:
]
return[call[call[name[self]._adapter.get, parameter[call[constant[/v1/auth/{0}/role/{1}].format, parameter[name[mount_point], name[role_name]]]]].json, parameter[]]] | keyword[def] identifier[get_role] ( identifier[self] , identifier[role_name] , identifier[mount_point] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[_adapter] . identifier[get] ( literal[string] . identifier[format] ( identifier[mount_point] , identifier[role_name] )). identifier[json] () | def get_role(self, role_name, mount_point='approle'):
"""GET /auth/<mount_point>/role/<role name>
:param role_name:
:type role_name:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
return self._adapter.get('/v1/auth/{0}/role/{1}'.format(mount_point, role_name)).json() |
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):
""" Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
"""
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return
for state in button.getElementsByTagName('state'):
state_name = state.attributes['key'].value
state_entry_comment = button_entry_comment + " - " + state_name + " state of button"
if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):
try:
button_entry_key = state.attributes['title'].value
except KeyError:
try:
button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
continue
results.append((button_entry_key, state_entry_comment))
warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix) | def function[add_string_pairs_from_button_element, parameter[xib_file, results, button, special_ui_components_prefix]]:
constant[ Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
]
variable[button_entry_comment] assign[=] call[name[extract_element_internationalized_comment], parameter[name[button]]]
if compare[name[button_entry_comment] is constant[None]] begin[:]
return[None]
for taget[name[state]] in starred[call[name[button].getElementsByTagName, parameter[constant[state]]]] begin[:]
variable[state_name] assign[=] call[name[state].attributes][constant[key]].value
variable[state_entry_comment] assign[=] binary_operation[binary_operation[binary_operation[name[button_entry_comment] + constant[ - ]] + name[state_name]] + constant[ state of button]]
if <ast.UnaryOp object at 0x7da18fe92aa0> begin[:]
<ast.Try object at 0x7da18fe93400>
call[name[results].append, parameter[tuple[[<ast.Name object at 0x7da18fe92d10>, <ast.Name object at 0x7da18fe92860>]]]]
call[name[warn_if_element_not_of_class], parameter[name[button], constant[Button], name[special_ui_components_prefix]]] | keyword[def] identifier[add_string_pairs_from_button_element] ( identifier[xib_file] , identifier[results] , identifier[button] , identifier[special_ui_components_prefix] ):
literal[string]
identifier[button_entry_comment] = identifier[extract_element_internationalized_comment] ( identifier[button] )
keyword[if] identifier[button_entry_comment] keyword[is] keyword[None] :
keyword[return]
keyword[for] identifier[state] keyword[in] identifier[button] . identifier[getElementsByTagName] ( literal[string] ):
identifier[state_name] = identifier[state] . identifier[attributes] [ literal[string] ]. identifier[value]
identifier[state_entry_comment] = identifier[button_entry_comment] + literal[string] + identifier[state_name] + literal[string]
keyword[if] keyword[not] identifier[add_string_pairs_from_attributed_ui_element] ( identifier[results] , identifier[state] , identifier[state_entry_comment] ):
keyword[try] :
identifier[button_entry_key] = identifier[state] . identifier[attributes] [ literal[string] ]. identifier[value]
keyword[except] identifier[KeyError] :
keyword[try] :
identifier[button_entry_key] = identifier[state] . identifier[getElementsByTagName] ( literal[string] )[ literal[int] ]. identifier[firstChild] . identifier[nodeValue]
keyword[except] identifier[Exception] :
keyword[continue]
identifier[results] . identifier[append] (( identifier[button_entry_key] , identifier[state_entry_comment] ))
identifier[warn_if_element_not_of_class] ( identifier[button] , literal[string] , identifier[special_ui_components_prefix] ) | def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):
""" Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
"""
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return # depends on [control=['if'], data=[]]
for state in button.getElementsByTagName('state'):
state_name = state.attributes['key'].value
state_entry_comment = button_entry_comment + ' - ' + state_name + ' state of button'
if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):
try:
button_entry_key = state.attributes['title'].value # depends on [control=['try'], data=[]]
except KeyError:
try:
button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue # depends on [control=['try'], data=[]]
except Exception:
continue # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
results.append((button_entry_key, state_entry_comment)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['state']]
warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix) |
def generate_string_to_file(converter,
input,
output_file,
format='xml',
out_encoding='utf8'):
"""
Like generate(), but reads the input from a string instead of
from a file, and writes the output to the given output file.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input: str
:param input: The string to convert.
:type output_file: str
:param output_file: The output filename.
:type format: str
:param format: The output format.
:type out_encoding: str
:param out_encoding: Character encoding of the output file.
:rtype: str
:return: The resulting output.
"""
with codecs.open(output_file, 'w', encoding=out_encoding) as thefile:
result = generate_string(converter, input, format=format)
thefile.write(result) | def function[generate_string_to_file, parameter[converter, input, output_file, format, out_encoding]]:
constant[
Like generate(), but reads the input from a string instead of
from a file, and writes the output to the given output file.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input: str
:param input: The string to convert.
:type output_file: str
:param output_file: The output filename.
:type format: str
:param format: The output format.
:type out_encoding: str
:param out_encoding: Character encoding of the output file.
:rtype: str
:return: The resulting output.
]
with call[name[codecs].open, parameter[name[output_file], constant[w]]] begin[:]
variable[result] assign[=] call[name[generate_string], parameter[name[converter], name[input]]]
call[name[thefile].write, parameter[name[result]]] | keyword[def] identifier[generate_string_to_file] ( identifier[converter] ,
identifier[input] ,
identifier[output_file] ,
identifier[format] = literal[string] ,
identifier[out_encoding] = literal[string] ):
literal[string]
keyword[with] identifier[codecs] . identifier[open] ( identifier[output_file] , literal[string] , identifier[encoding] = identifier[out_encoding] ) keyword[as] identifier[thefile] :
identifier[result] = identifier[generate_string] ( identifier[converter] , identifier[input] , identifier[format] = identifier[format] )
identifier[thefile] . identifier[write] ( identifier[result] ) | def generate_string_to_file(converter, input, output_file, format='xml', out_encoding='utf8'):
"""
Like generate(), but reads the input from a string instead of
from a file, and writes the output to the given output file.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input: str
:param input: The string to convert.
:type output_file: str
:param output_file: The output filename.
:type format: str
:param format: The output format.
:type out_encoding: str
:param out_encoding: Character encoding of the output file.
:rtype: str
:return: The resulting output.
"""
with codecs.open(output_file, 'w', encoding=out_encoding) as thefile:
result = generate_string(converter, input, format=format)
thefile.write(result) # depends on [control=['with'], data=['thefile']] |
def mont_priv_to_ed_pair(cls, mont_priv):
"""
Derive a Twisted Edwards key pair from given Montgomery private key.
:param mont_priv: A bytes-like object encoding the private key with length
MONT_PRIV_KEY_SIZE.
:returns: A tuple of bytes-like objects encoding the private key with length
ED_PRIV_KEY_SIZE and the public key with length ED_PUB_KEY_SIZE.
"""
if not isinstance(mont_priv, bytes):
raise TypeError("Wrong type passed for the mont_priv parameter.")
if len(mont_priv) != cls.MONT_PRIV_KEY_SIZE:
raise ValueError("Invalid value passed for the mont_priv parameter.")
ed_priv, ed_pub = cls._mont_priv_to_ed_pair(bytearray(mont_priv))
return bytes(ed_priv), bytes(ed_pub) | def function[mont_priv_to_ed_pair, parameter[cls, mont_priv]]:
constant[
Derive a Twisted Edwards key pair from given Montgomery private key.
:param mont_priv: A bytes-like object encoding the private key with length
MONT_PRIV_KEY_SIZE.
:returns: A tuple of bytes-like objects encoding the private key with length
ED_PRIV_KEY_SIZE and the public key with length ED_PUB_KEY_SIZE.
]
if <ast.UnaryOp object at 0x7da1b1fa96c0> begin[:]
<ast.Raise object at 0x7da1b1fa8250>
if compare[call[name[len], parameter[name[mont_priv]]] not_equal[!=] name[cls].MONT_PRIV_KEY_SIZE] begin[:]
<ast.Raise object at 0x7da1b1fa9e70>
<ast.Tuple object at 0x7da1b1fa9720> assign[=] call[name[cls]._mont_priv_to_ed_pair, parameter[call[name[bytearray], parameter[name[mont_priv]]]]]
return[tuple[[<ast.Call object at 0x7da1b1faa950>, <ast.Call object at 0x7da1b1fa8df0>]]] | keyword[def] identifier[mont_priv_to_ed_pair] ( identifier[cls] , identifier[mont_priv] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[mont_priv] , identifier[bytes] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[len] ( identifier[mont_priv] )!= identifier[cls] . identifier[MONT_PRIV_KEY_SIZE] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[ed_priv] , identifier[ed_pub] = identifier[cls] . identifier[_mont_priv_to_ed_pair] ( identifier[bytearray] ( identifier[mont_priv] ))
keyword[return] identifier[bytes] ( identifier[ed_priv] ), identifier[bytes] ( identifier[ed_pub] ) | def mont_priv_to_ed_pair(cls, mont_priv):
"""
Derive a Twisted Edwards key pair from given Montgomery private key.
:param mont_priv: A bytes-like object encoding the private key with length
MONT_PRIV_KEY_SIZE.
:returns: A tuple of bytes-like objects encoding the private key with length
ED_PRIV_KEY_SIZE and the public key with length ED_PUB_KEY_SIZE.
"""
if not isinstance(mont_priv, bytes):
raise TypeError('Wrong type passed for the mont_priv parameter.') # depends on [control=['if'], data=[]]
if len(mont_priv) != cls.MONT_PRIV_KEY_SIZE:
raise ValueError('Invalid value passed for the mont_priv parameter.') # depends on [control=['if'], data=[]]
(ed_priv, ed_pub) = cls._mont_priv_to_ed_pair(bytearray(mont_priv))
return (bytes(ed_priv), bytes(ed_pub)) |
def log_scalar(self, metric_name, value, step=None):
"""
Add a new measurement.
The measurement will be processed by the MongoDB observer
during a heartbeat event.
Other observers are not yet supported.
:param metric_name: The name of the metric, e.g. training.loss
:param value: The measured value
:param step: The step number (integer), e.g. the iteration number
If not specified, an internal counter for each metric
is used, incremented by one.
"""
# Method added in change https://github.com/chovanecm/sacred/issues/4
# The same as Experiment.log_scalar (if something changes,
# update the docstring too!)
return self._metrics.log_scalar_metric(metric_name, value, step) | def function[log_scalar, parameter[self, metric_name, value, step]]:
constant[
Add a new measurement.
The measurement will be processed by the MongoDB observer
during a heartbeat event.
Other observers are not yet supported.
:param metric_name: The name of the metric, e.g. training.loss
:param value: The measured value
:param step: The step number (integer), e.g. the iteration number
If not specified, an internal counter for each metric
is used, incremented by one.
]
return[call[name[self]._metrics.log_scalar_metric, parameter[name[metric_name], name[value], name[step]]]] | keyword[def] identifier[log_scalar] ( identifier[self] , identifier[metric_name] , identifier[value] , identifier[step] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_metrics] . identifier[log_scalar_metric] ( identifier[metric_name] , identifier[value] , identifier[step] ) | def log_scalar(self, metric_name, value, step=None):
"""
Add a new measurement.
The measurement will be processed by the MongoDB observer
during a heartbeat event.
Other observers are not yet supported.
:param metric_name: The name of the metric, e.g. training.loss
:param value: The measured value
:param step: The step number (integer), e.g. the iteration number
If not specified, an internal counter for each metric
is used, incremented by one.
"""
# Method added in change https://github.com/chovanecm/sacred/issues/4
# The same as Experiment.log_scalar (if something changes,
# update the docstring too!)
return self._metrics.log_scalar_metric(metric_name, value, step) |
def get(self, sid):
"""
Constructs a TaskContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.task.TaskContext
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskContext
"""
return TaskContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, ) | def function[get, parameter[self, sid]]:
constant[
Constructs a TaskContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.task.TaskContext
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskContext
]
return[call[name[TaskContext], parameter[name[self]._version]]] | keyword[def] identifier[get] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[return] identifier[TaskContext] ( identifier[self] . identifier[_version] , identifier[assistant_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[sid] = identifier[sid] ,) | def get(self, sid):
"""
Constructs a TaskContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.task.TaskContext
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskContext
"""
return TaskContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid) |
def add_child(self, id_, child_id):
"""Adds a child to a ``Id``.
arg: id (osid.id.Id): the ``Id`` of the node
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``child_id`` is already a child of
``id``
raise: NotFound - ``id`` or ``child_id`` not found
raise: NullArgument - ``id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if bool(self._rls.get_relationships_by_genus_type_for_peers(id_, child_id, self._relationship_type).available()):
raise errors.AlreadyExists()
rfc = self._ras.get_relationship_form_for_create(id_, child_id, [])
rfc.set_display_name(str(id_) + ' to ' + str(child_id) + ' Parent-Child Relationship')
rfc.set_description(self._relationship_type.get_display_name().get_text() + ' relationship for parent: ' + str(id_) + ' and child: ' + str(child_id))
rfc.set_genus_type(self._relationship_type)
self._ras.create_relationship(rfc) | def function[add_child, parameter[self, id_, child_id]]:
constant[Adds a child to a ``Id``.
arg: id (osid.id.Id): the ``Id`` of the node
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``child_id`` is already a child of
``id``
raise: NotFound - ``id`` or ``child_id`` not found
raise: NullArgument - ``id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if call[name[bool], parameter[call[call[name[self]._rls.get_relationships_by_genus_type_for_peers, parameter[name[id_], name[child_id], name[self]._relationship_type]].available, parameter[]]]] begin[:]
<ast.Raise object at 0x7da204961e10>
variable[rfc] assign[=] call[name[self]._ras.get_relationship_form_for_create, parameter[name[id_], name[child_id], list[[]]]]
call[name[rfc].set_display_name, parameter[binary_operation[binary_operation[binary_operation[call[name[str], parameter[name[id_]]] + constant[ to ]] + call[name[str], parameter[name[child_id]]]] + constant[ Parent-Child Relationship]]]]
call[name[rfc].set_description, parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[call[name[self]._relationship_type.get_display_name, parameter[]].get_text, parameter[]] + constant[ relationship for parent: ]] + call[name[str], parameter[name[id_]]]] + constant[ and child: ]] + call[name[str], parameter[name[child_id]]]]]]
call[name[rfc].set_genus_type, parameter[name[self]._relationship_type]]
call[name[self]._ras.create_relationship, parameter[name[rfc]]] | keyword[def] identifier[add_child] ( identifier[self] , identifier[id_] , identifier[child_id] ):
literal[string]
keyword[if] identifier[bool] ( identifier[self] . identifier[_rls] . identifier[get_relationships_by_genus_type_for_peers] ( identifier[id_] , identifier[child_id] , identifier[self] . identifier[_relationship_type] ). identifier[available] ()):
keyword[raise] identifier[errors] . identifier[AlreadyExists] ()
identifier[rfc] = identifier[self] . identifier[_ras] . identifier[get_relationship_form_for_create] ( identifier[id_] , identifier[child_id] ,[])
identifier[rfc] . identifier[set_display_name] ( identifier[str] ( identifier[id_] )+ literal[string] + identifier[str] ( identifier[child_id] )+ literal[string] )
identifier[rfc] . identifier[set_description] ( identifier[self] . identifier[_relationship_type] . identifier[get_display_name] (). identifier[get_text] ()+ literal[string] + identifier[str] ( identifier[id_] )+ literal[string] + identifier[str] ( identifier[child_id] ))
identifier[rfc] . identifier[set_genus_type] ( identifier[self] . identifier[_relationship_type] )
identifier[self] . identifier[_ras] . identifier[create_relationship] ( identifier[rfc] ) | def add_child(self, id_, child_id):
"""Adds a child to a ``Id``.
arg: id (osid.id.Id): the ``Id`` of the node
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``child_id`` is already a child of
``id``
raise: NotFound - ``id`` or ``child_id`` not found
raise: NullArgument - ``id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if bool(self._rls.get_relationships_by_genus_type_for_peers(id_, child_id, self._relationship_type).available()):
raise errors.AlreadyExists() # depends on [control=['if'], data=[]]
rfc = self._ras.get_relationship_form_for_create(id_, child_id, [])
rfc.set_display_name(str(id_) + ' to ' + str(child_id) + ' Parent-Child Relationship')
rfc.set_description(self._relationship_type.get_display_name().get_text() + ' relationship for parent: ' + str(id_) + ' and child: ' + str(child_id))
rfc.set_genus_type(self._relationship_type)
self._ras.create_relationship(rfc) |
def __interrupt_search(self):
"""
Interrupt the current search.
"""
if self.__search_worker_thread:
self.__search_worker_thread.quit()
self.__search_worker_thread.wait()
self.__container.engine.stop_processing(warning=False) | def function[__interrupt_search, parameter[self]]:
constant[
Interrupt the current search.
]
if name[self].__search_worker_thread begin[:]
call[name[self].__search_worker_thread.quit, parameter[]]
call[name[self].__search_worker_thread.wait, parameter[]]
call[name[self].__container.engine.stop_processing, parameter[]] | keyword[def] identifier[__interrupt_search] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__search_worker_thread] :
identifier[self] . identifier[__search_worker_thread] . identifier[quit] ()
identifier[self] . identifier[__search_worker_thread] . identifier[wait] ()
identifier[self] . identifier[__container] . identifier[engine] . identifier[stop_processing] ( identifier[warning] = keyword[False] ) | def __interrupt_search(self):
"""
Interrupt the current search.
"""
if self.__search_worker_thread:
self.__search_worker_thread.quit()
self.__search_worker_thread.wait()
self.__container.engine.stop_processing(warning=False) # depends on [control=['if'], data=[]] |
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
"""Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy
app.config.setdefault(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')
)
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(
entry_point_group):
base_entry.load()
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes()
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager) | def function[init_db, parameter[self, app, entry_point_group]]:
constant[Initialize Flask-SQLAlchemy extension.]
call[name[app].config.setdefault, parameter[constant[SQLALCHEMY_DATABASE_URI], binary_operation[constant[sqlite:///] + call[name[os].path.join, parameter[name[app].instance_path, binary_operation[name[app].name + constant[.db]]]]]]]
call[name[app].config.setdefault, parameter[constant[SQLALCHEMY_ECHO], constant[False]]]
variable[database] assign[=] call[name[kwargs].get, parameter[constant[db], name[db]]]
call[name[database].init_app, parameter[name[app]]]
call[name[self].init_versioning, parameter[name[app], name[database], call[name[kwargs].get, parameter[constant[versioning_manager]]]]]
if name[entry_point_group] begin[:]
for taget[name[base_entry]] in starred[call[name[pkg_resources].iter_entry_points, parameter[name[entry_point_group]]]] begin[:]
call[name[base_entry].load, parameter[]]
call[name[sa].orm.configure_mappers, parameter[]]
if call[name[app].config][constant[DB_VERSIONING]] begin[:]
variable[manager] assign[=] name[self].versioning_manager
if name[manager].pending_classes begin[:]
if <ast.UnaryOp object at 0x7da1b0feb130> begin[:]
call[name[manager].builder.configure_versioned_classes, parameter[]] | keyword[def] identifier[init_db] ( identifier[self] , identifier[app] , identifier[entry_point_group] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[app] . identifier[config] . identifier[setdefault] (
literal[string] ,
literal[string] + identifier[os] . identifier[path] . identifier[join] ( identifier[app] . identifier[instance_path] , identifier[app] . identifier[name] + literal[string] )
)
identifier[app] . identifier[config] . identifier[setdefault] ( literal[string] , keyword[False] )
identifier[database] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[db] )
identifier[database] . identifier[init_app] ( identifier[app] )
identifier[self] . identifier[init_versioning] ( identifier[app] , identifier[database] , identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[if] identifier[entry_point_group] :
keyword[for] identifier[base_entry] keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] (
identifier[entry_point_group] ):
identifier[base_entry] . identifier[load] ()
identifier[sa] . identifier[orm] . identifier[configure_mappers] ()
keyword[if] identifier[app] . identifier[config] [ literal[string] ]:
identifier[manager] = identifier[self] . identifier[versioning_manager]
keyword[if] identifier[manager] . identifier[pending_classes] :
keyword[if] keyword[not] identifier[versioning_models_registered] ( identifier[manager] , identifier[database] . identifier[Model] ):
identifier[manager] . identifier[builder] . identifier[configure_versioned_classes] ()
keyword[elif] literal[string] keyword[not] keyword[in] identifier[database] . identifier[metadata] . identifier[tables] :
identifier[manager] . identifier[declarative_base] = identifier[database] . identifier[Model]
identifier[manager] . identifier[create_transaction_model] ()
identifier[manager] . identifier[plugins] . identifier[after_build_tx_class] ( identifier[manager] ) | def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
"""Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///' + os.path.join(app.instance_path, app.name + '.db'))
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(entry_point_group):
base_entry.load() # depends on [control=['for'], data=['base_entry']] # depends on [control=['if'], data=[]]
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def encode_events(self, duration, events, values, dtype=np.bool):
'''Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
'''
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total] | def function[encode_events, parameter[self, duration, events, values, dtype]]:
constant[Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
]
variable[frames] assign[=] call[name[time_to_frames], parameter[name[events]]]
variable[n_total] assign[=] call[name[int], parameter[call[name[time_to_frames], parameter[name[duration]]]]]
variable[n_alloc] assign[=] name[n_total]
if call[name[np].any, parameter[name[frames]]] begin[:]
variable[n_alloc] assign[=] call[name[max], parameter[name[n_total], binary_operation[constant[1] + call[name[int], parameter[call[name[frames].max, parameter[]]]]]]]
variable[target] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da1b1077010>, <ast.Subscript object at 0x7da1b10758a0>]]]]
call[name[target].fill, parameter[call[name[fill_value], parameter[name[dtype]]]]]
variable[values] assign[=] call[name[values].astype, parameter[name[dtype]]]
for taget[tuple[[<ast.Name object at 0x7da1b1074220>, <ast.Name object at 0x7da1b1076c50>]]] in starred[call[name[zip], parameter[name[values], name[frames]]]] begin[:]
<ast.AugAssign object at 0x7da1b1074850>
return[call[name[target]][<ast.Slice object at 0x7da1b10749d0>]] | keyword[def] identifier[encode_events] ( identifier[self] , identifier[duration] , identifier[events] , identifier[values] , identifier[dtype] = identifier[np] . identifier[bool] ):
literal[string]
identifier[frames] = identifier[time_to_frames] ( identifier[events] , identifier[sr] = identifier[self] . identifier[sr] ,
identifier[hop_length] = identifier[self] . identifier[hop_length] )
identifier[n_total] = identifier[int] ( identifier[time_to_frames] ( identifier[duration] , identifier[sr] = identifier[self] . identifier[sr] ,
identifier[hop_length] = identifier[self] . identifier[hop_length] ))
identifier[n_alloc] = identifier[n_total]
keyword[if] identifier[np] . identifier[any] ( identifier[frames] ):
identifier[n_alloc] = identifier[max] ( identifier[n_total] , literal[int] + identifier[int] ( identifier[frames] . identifier[max] ()))
identifier[target] = identifier[np] . identifier[empty] (( identifier[n_alloc] , identifier[values] . identifier[shape] [ literal[int] ]),
identifier[dtype] = identifier[dtype] )
identifier[target] . identifier[fill] ( identifier[fill_value] ( identifier[dtype] ))
identifier[values] = identifier[values] . identifier[astype] ( identifier[dtype] )
keyword[for] identifier[column] , identifier[event] keyword[in] identifier[zip] ( identifier[values] , identifier[frames] ):
identifier[target] [ identifier[event] ]+= identifier[column]
keyword[return] identifier[target] [: identifier[n_total] ] | def encode_events(self, duration, events, values, dtype=np.bool):
"""Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
"""
frames = time_to_frames(events, sr=self.sr, hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max())) # depends on [control=['if'], data=[]]
target = np.empty((n_alloc, values.shape[1]), dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for (column, event) in zip(values, frames):
target[event] += column # depends on [control=['for'], data=[]]
return target[:n_total] |
def draw_triangle(setter, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""Draw triangle with points x0,y0 - x1,y1 - x2,y2"""
draw_line(setter, x0, y0, x1, y1, color, aa)
draw_line(setter, x1, y1, x2, y2, color, aa)
draw_line(setter, x2, y2, x0, y0, color, aa) | def function[draw_triangle, parameter[setter, x0, y0, x1, y1, x2, y2, color, aa]]:
constant[Draw triangle with points x0,y0 - x1,y1 - x2,y2]
call[name[draw_line], parameter[name[setter], name[x0], name[y0], name[x1], name[y1], name[color], name[aa]]]
call[name[draw_line], parameter[name[setter], name[x1], name[y1], name[x2], name[y2], name[color], name[aa]]]
call[name[draw_line], parameter[name[setter], name[x2], name[y2], name[x0], name[y0], name[color], name[aa]]] | keyword[def] identifier[draw_triangle] ( identifier[setter] , identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[color] = keyword[None] , identifier[aa] = keyword[False] ):
literal[string]
identifier[draw_line] ( identifier[setter] , identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] , identifier[color] , identifier[aa] )
identifier[draw_line] ( identifier[setter] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[color] , identifier[aa] )
identifier[draw_line] ( identifier[setter] , identifier[x2] , identifier[y2] , identifier[x0] , identifier[y0] , identifier[color] , identifier[aa] ) | def draw_triangle(setter, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""Draw triangle with points x0,y0 - x1,y1 - x2,y2"""
draw_line(setter, x0, y0, x1, y1, color, aa)
draw_line(setter, x1, y1, x2, y2, color, aa)
draw_line(setter, x2, y2, x0, y0, color, aa) |
def create_alarm(deployment_id, metric_name, data, api_key=None, profile="telemetry"):
'''
create an telemetry alarms.
data is a dict of alert configuration data.
Returns (bool success, str message) tuple.
CLI Example:
salt myminion telemetry.create_alarm rs-ds033197 {} profile=telemetry
'''
auth = _auth(api_key, profile)
request_uri = _get_telemetry_base(profile) + "/alerts"
key = "telemetry.{0}.alerts".format(deployment_id)
# set the notification channels if not already set
post_body = {
"deployment": deployment_id,
"filter": data.get('filter'),
"notificationChannel": get_notification_channel_id(data.get('escalate_to')).split(),
"condition": {
"metric": metric_name,
"max": data.get('max'),
"min": data.get('min')
}
}
try:
response = requests.post(request_uri, data=salt.utils.json.dumps(post_body), headers=auth)
except requests.exceptions.RequestException as e:
# TODO: May be we should retry?
log.error(six.text_type(e))
if response.status_code >= 200 and response.status_code < 300:
# update cache
log.info('Created alarm on metric: %s in deployment: %s', metric_name, deployment_id)
log.debug('Updating cache for metric %s in deployment %s: %s',
metric_name, deployment_id, response.json())
_update_cache(deployment_id, metric_name, response.json())
else:
log.error(
'Failed to create alarm on metric: %s in '
'deployment %s: payload: %s',
metric_name, deployment_id, salt.utils.json.dumps(post_body)
)
return response.status_code >= 200 and response.status_code < 300, response.json() | def function[create_alarm, parameter[deployment_id, metric_name, data, api_key, profile]]:
constant[
create an telemetry alarms.
data is a dict of alert configuration data.
Returns (bool success, str message) tuple.
CLI Example:
salt myminion telemetry.create_alarm rs-ds033197 {} profile=telemetry
]
variable[auth] assign[=] call[name[_auth], parameter[name[api_key], name[profile]]]
variable[request_uri] assign[=] binary_operation[call[name[_get_telemetry_base], parameter[name[profile]]] + constant[/alerts]]
variable[key] assign[=] call[constant[telemetry.{0}.alerts].format, parameter[name[deployment_id]]]
variable[post_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b219f550>, <ast.Constant object at 0x7da1b219ca30>, <ast.Constant object at 0x7da1b219fe50>, <ast.Constant object at 0x7da1b219fb20>], [<ast.Name object at 0x7da1b219ee30>, <ast.Call object at 0x7da1b219f5e0>, <ast.Call object at 0x7da1b219e620>, <ast.Dict object at 0x7da1b219cee0>]]
<ast.Try object at 0x7da1b219d960>
if <ast.BoolOp object at 0x7da1b21950f0> begin[:]
call[name[log].info, parameter[constant[Created alarm on metric: %s in deployment: %s], name[metric_name], name[deployment_id]]]
call[name[log].debug, parameter[constant[Updating cache for metric %s in deployment %s: %s], name[metric_name], name[deployment_id], call[name[response].json, parameter[]]]]
call[name[_update_cache], parameter[name[deployment_id], name[metric_name], call[name[response].json, parameter[]]]]
return[tuple[[<ast.BoolOp object at 0x7da1b2195e10>, <ast.Call object at 0x7da1b2196260>]]] | keyword[def] identifier[create_alarm] ( identifier[deployment_id] , identifier[metric_name] , identifier[data] , identifier[api_key] = keyword[None] , identifier[profile] = literal[string] ):
literal[string]
identifier[auth] = identifier[_auth] ( identifier[api_key] , identifier[profile] )
identifier[request_uri] = identifier[_get_telemetry_base] ( identifier[profile] )+ literal[string]
identifier[key] = literal[string] . identifier[format] ( identifier[deployment_id] )
identifier[post_body] ={
literal[string] : identifier[deployment_id] ,
literal[string] : identifier[data] . identifier[get] ( literal[string] ),
literal[string] : identifier[get_notification_channel_id] ( identifier[data] . identifier[get] ( literal[string] )). identifier[split] (),
literal[string] :{
literal[string] : identifier[metric_name] ,
literal[string] : identifier[data] . identifier[get] ( literal[string] ),
literal[string] : identifier[data] . identifier[get] ( literal[string] )
}
}
keyword[try] :
identifier[response] = identifier[requests] . identifier[post] ( identifier[request_uri] , identifier[data] = identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[post_body] ), identifier[headers] = identifier[auth] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( identifier[six] . identifier[text_type] ( identifier[e] ))
keyword[if] identifier[response] . identifier[status_code] >= literal[int] keyword[and] identifier[response] . identifier[status_code] < literal[int] :
identifier[log] . identifier[info] ( literal[string] , identifier[metric_name] , identifier[deployment_id] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[metric_name] , identifier[deployment_id] , identifier[response] . identifier[json] ())
identifier[_update_cache] ( identifier[deployment_id] , identifier[metric_name] , identifier[response] . identifier[json] ())
keyword[else] :
identifier[log] . identifier[error] (
literal[string]
literal[string] ,
identifier[metric_name] , identifier[deployment_id] , identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[post_body] )
)
keyword[return] identifier[response] . identifier[status_code] >= literal[int] keyword[and] identifier[response] . identifier[status_code] < literal[int] , identifier[response] . identifier[json] () | def create_alarm(deployment_id, metric_name, data, api_key=None, profile='telemetry'):
"""
create an telemetry alarms.
data is a dict of alert configuration data.
Returns (bool success, str message) tuple.
CLI Example:
salt myminion telemetry.create_alarm rs-ds033197 {} profile=telemetry
"""
auth = _auth(api_key, profile)
request_uri = _get_telemetry_base(profile) + '/alerts'
key = 'telemetry.{0}.alerts'.format(deployment_id)
# set the notification channels if not already set
post_body = {'deployment': deployment_id, 'filter': data.get('filter'), 'notificationChannel': get_notification_channel_id(data.get('escalate_to')).split(), 'condition': {'metric': metric_name, 'max': data.get('max'), 'min': data.get('min')}}
try:
response = requests.post(request_uri, data=salt.utils.json.dumps(post_body), headers=auth) # depends on [control=['try'], data=[]]
except requests.exceptions.RequestException as e:
# TODO: May be we should retry?
log.error(six.text_type(e)) # depends on [control=['except'], data=['e']]
if response.status_code >= 200 and response.status_code < 300:
# update cache
log.info('Created alarm on metric: %s in deployment: %s', metric_name, deployment_id)
log.debug('Updating cache for metric %s in deployment %s: %s', metric_name, deployment_id, response.json())
_update_cache(deployment_id, metric_name, response.json()) # depends on [control=['if'], data=[]]
else:
log.error('Failed to create alarm on metric: %s in deployment %s: payload: %s', metric_name, deployment_id, salt.utils.json.dumps(post_body))
return (response.status_code >= 200 and response.status_code < 300, response.json()) |
def list_equivalencies(self):
"""Lists the possible equivalencies associated with this unit object
Examples
--------
>>> from unyt import km
>>> km.units.list_equivalencies()
spectral: length <-> spatial_frequency <-> frequency <-> energy
schwarzschild: mass <-> length
compton: mass <-> length
"""
from unyt.equivalencies import equivalence_registry
for k, v in equivalence_registry.items():
if self.has_equivalent(k):
print(v()) | def function[list_equivalencies, parameter[self]]:
constant[Lists the possible equivalencies associated with this unit object
Examples
--------
>>> from unyt import km
>>> km.units.list_equivalencies()
spectral: length <-> spatial_frequency <-> frequency <-> energy
schwarzschild: mass <-> length
compton: mass <-> length
]
from relative_module[unyt.equivalencies] import module[equivalence_registry]
for taget[tuple[[<ast.Name object at 0x7da1b1193790>, <ast.Name object at 0x7da1b1191600>]]] in starred[call[name[equivalence_registry].items, parameter[]]] begin[:]
if call[name[self].has_equivalent, parameter[name[k]]] begin[:]
call[name[print], parameter[call[name[v], parameter[]]]] | keyword[def] identifier[list_equivalencies] ( identifier[self] ):
literal[string]
keyword[from] identifier[unyt] . identifier[equivalencies] keyword[import] identifier[equivalence_registry]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[equivalence_registry] . identifier[items] ():
keyword[if] identifier[self] . identifier[has_equivalent] ( identifier[k] ):
identifier[print] ( identifier[v] ()) | def list_equivalencies(self):
"""Lists the possible equivalencies associated with this unit object
Examples
--------
>>> from unyt import km
>>> km.units.list_equivalencies()
spectral: length <-> spatial_frequency <-> frequency <-> energy
schwarzschild: mass <-> length
compton: mass <-> length
"""
from unyt.equivalencies import equivalence_registry
for (k, v) in equivalence_registry.items():
if self.has_equivalent(k):
print(v()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def keyboard_field(value, args=None):
"""
Format keyboard /command field.
"""
qs = QueryDict(args)
per_line = qs.get('per_line', 1)
field = qs.get("field", "slug")
command = qs.get("command")
convert = lambda element: "/" + command + " " + str(getattr(element, field))
group = lambda flat, size: [flat[i:i+size] for i in range(0, len(flat), size)]
grouped = group(value, int(per_line))
new_list = []
for line in grouped:
new_list.append([convert(e) for e in line])
return str(new_list).encode('utf-8') | def function[keyboard_field, parameter[value, args]]:
constant[
Format keyboard /command field.
]
variable[qs] assign[=] call[name[QueryDict], parameter[name[args]]]
variable[per_line] assign[=] call[name[qs].get, parameter[constant[per_line], constant[1]]]
variable[field] assign[=] call[name[qs].get, parameter[constant[field], constant[slug]]]
variable[command] assign[=] call[name[qs].get, parameter[constant[command]]]
variable[convert] assign[=] <ast.Lambda object at 0x7da20e9b0730>
variable[group] assign[=] <ast.Lambda object at 0x7da20e9b1750>
variable[grouped] assign[=] call[name[group], parameter[name[value], call[name[int], parameter[name[per_line]]]]]
variable[new_list] assign[=] list[[]]
for taget[name[line]] in starred[name[grouped]] begin[:]
call[name[new_list].append, parameter[<ast.ListComp object at 0x7da18dc9be80>]]
return[call[call[name[str], parameter[name[new_list]]].encode, parameter[constant[utf-8]]]] | keyword[def] identifier[keyboard_field] ( identifier[value] , identifier[args] = keyword[None] ):
literal[string]
identifier[qs] = identifier[QueryDict] ( identifier[args] )
identifier[per_line] = identifier[qs] . identifier[get] ( literal[string] , literal[int] )
identifier[field] = identifier[qs] . identifier[get] ( literal[string] , literal[string] )
identifier[command] = identifier[qs] . identifier[get] ( literal[string] )
identifier[convert] = keyword[lambda] identifier[element] : literal[string] + identifier[command] + literal[string] + identifier[str] ( identifier[getattr] ( identifier[element] , identifier[field] ))
identifier[group] = keyword[lambda] identifier[flat] , identifier[size] :[ identifier[flat] [ identifier[i] : identifier[i] + identifier[size] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[flat] ), identifier[size] )]
identifier[grouped] = identifier[group] ( identifier[value] , identifier[int] ( identifier[per_line] ))
identifier[new_list] =[]
keyword[for] identifier[line] keyword[in] identifier[grouped] :
identifier[new_list] . identifier[append] ([ identifier[convert] ( identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[line] ])
keyword[return] identifier[str] ( identifier[new_list] ). identifier[encode] ( literal[string] ) | def keyboard_field(value, args=None):
"""
Format keyboard /command field.
"""
qs = QueryDict(args)
per_line = qs.get('per_line', 1)
field = qs.get('field', 'slug')
command = qs.get('command')
convert = lambda element: '/' + command + ' ' + str(getattr(element, field))
group = lambda flat, size: [flat[i:i + size] for i in range(0, len(flat), size)]
grouped = group(value, int(per_line))
new_list = []
for line in grouped:
new_list.append([convert(e) for e in line]) # depends on [control=['for'], data=['line']]
return str(new_list).encode('utf-8') |
def document(self, document_id, **kwargs):
"""Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI,
document_id)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | def function[document, parameter[self, document_id]]:
constant[Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
]
variable[baseuri] assign[=] call[constant[{}document/{}/content].format, parameter[name[self]._DOCUMENT_URI, name[document_id]]]
variable[res] assign[=] call[name[self].session.get, parameter[name[baseuri]]]
call[name[self].handle_http_error, parameter[name[res]]]
return[name[res]] | keyword[def] identifier[document] ( identifier[self] , identifier[document_id] ,** identifier[kwargs] ):
literal[string]
identifier[baseuri] = literal[string] . identifier[format] ( identifier[self] . identifier[_DOCUMENT_URI] ,
identifier[document_id] )
identifier[res] = identifier[self] . identifier[session] . identifier[get] ( identifier[baseuri] , identifier[params] = identifier[kwargs] )
identifier[self] . identifier[handle_http_error] ( identifier[res] )
keyword[return] identifier[res] | def document(self, document_id, **kwargs):
"""Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI, document_id)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res |
def convert_concat(node, **kwargs):
"""Map MXNet's Concat operator attributes to onnx's Concat operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("dim", 1))
concat_node = onnx.helper.make_node(
"Concat",
input_nodes,
[name],
axis=axis,
name=name
)
return [concat_node] | def function[convert_concat, parameter[node]]:
constant[Map MXNet's Concat operator attributes to onnx's Concat operator
and return the created node.
]
<ast.Tuple object at 0x7da1b204e410> assign[=] call[name[get_inputs], parameter[name[node], name[kwargs]]]
variable[axis] assign[=] call[name[int], parameter[call[name[attrs].get, parameter[constant[dim], constant[1]]]]]
variable[concat_node] assign[=] call[name[onnx].helper.make_node, parameter[constant[Concat], name[input_nodes], list[[<ast.Name object at 0x7da1b208ae90>]]]]
return[list[[<ast.Name object at 0x7da1b204cb20>]]] | keyword[def] identifier[convert_concat] ( identifier[node] ,** identifier[kwargs] ):
literal[string]
identifier[name] , identifier[input_nodes] , identifier[attrs] = identifier[get_inputs] ( identifier[node] , identifier[kwargs] )
identifier[axis] = identifier[int] ( identifier[attrs] . identifier[get] ( literal[string] , literal[int] ))
identifier[concat_node] = identifier[onnx] . identifier[helper] . identifier[make_node] (
literal[string] ,
identifier[input_nodes] ,
[ identifier[name] ],
identifier[axis] = identifier[axis] ,
identifier[name] = identifier[name]
)
keyword[return] [ identifier[concat_node] ] | def convert_concat(node, **kwargs):
"""Map MXNet's Concat operator attributes to onnx's Concat operator
and return the created node.
"""
(name, input_nodes, attrs) = get_inputs(node, kwargs)
axis = int(attrs.get('dim', 1))
concat_node = onnx.helper.make_node('Concat', input_nodes, [name], axis=axis, name=name)
return [concat_node] |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self,
'cross_reference') and self.cross_reference is not None:
_dict['cross_reference'] = self.cross_reference
if hasattr(self, 'relevance') and self.relevance is not None:
_dict['relevance'] = self.relevance
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da18fe90430> begin[:]
call[name[_dict]][constant[document_id]] assign[=] name[self].document_id
if <ast.BoolOp object at 0x7da18fe92020> begin[:]
call[name[_dict]][constant[cross_reference]] assign[=] name[self].cross_reference
if <ast.BoolOp object at 0x7da18fe93790> begin[:]
call[name[_dict]][constant[relevance]] assign[=] name[self].relevance
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[document_id] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[document_id]
keyword[if] identifier[hasattr] ( identifier[self] ,
literal[string] ) keyword[and] identifier[self] . identifier[cross_reference] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[cross_reference]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[relevance] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[relevance]
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id # depends on [control=['if'], data=[]]
if hasattr(self, 'cross_reference') and self.cross_reference is not None:
_dict['cross_reference'] = self.cross_reference # depends on [control=['if'], data=[]]
if hasattr(self, 'relevance') and self.relevance is not None:
_dict['relevance'] = self.relevance # depends on [control=['if'], data=[]]
return _dict |
def parse_code(url):
"""
Parse the code parameter from the a URL
:param str url: URL to parse
:return: code query parameter
:rtype: str
"""
result = urlparse(url)
query = parse_qs(result.query)
return query['code'] | def function[parse_code, parameter[url]]:
constant[
Parse the code parameter from the a URL
:param str url: URL to parse
:return: code query parameter
:rtype: str
]
variable[result] assign[=] call[name[urlparse], parameter[name[url]]]
variable[query] assign[=] call[name[parse_qs], parameter[name[result].query]]
return[call[name[query]][constant[code]]] | keyword[def] identifier[parse_code] ( identifier[url] ):
literal[string]
identifier[result] = identifier[urlparse] ( identifier[url] )
identifier[query] = identifier[parse_qs] ( identifier[result] . identifier[query] )
keyword[return] identifier[query] [ literal[string] ] | def parse_code(url):
"""
Parse the code parameter from the a URL
:param str url: URL to parse
:return: code query parameter
:rtype: str
"""
result = urlparse(url)
query = parse_qs(result.query)
return query['code'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.