code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _get_singlekws(skw_matches, spires=False):
"""Get single keywords.
:var skw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
"""
output = {}
for single_keyword, info in skw_matches:
output[single_keyword.output(spires)] = len(info[0])
output = [{'keyword': key, 'number': value}
for key, value in output.iteritems()]
return sorted(output, key=lambda x: x['number'], reverse=True) | def function[_get_singlekws, parameter[skw_matches, spires]]:
constant[Get single keywords.
:var skw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
]
variable[output] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18f8105e0>, <ast.Name object at 0x7da18f812d70>]]] in starred[name[skw_matches]] begin[:]
call[name[output]][call[name[single_keyword].output, parameter[name[spires]]]] assign[=] call[name[len], parameter[call[name[info]][constant[0]]]]
variable[output] assign[=] <ast.ListComp object at 0x7da18f813d00>
return[call[name[sorted], parameter[name[output]]]] | keyword[def] identifier[_get_singlekws] ( identifier[skw_matches] , identifier[spires] = keyword[False] ):
literal[string]
identifier[output] ={}
keyword[for] identifier[single_keyword] , identifier[info] keyword[in] identifier[skw_matches] :
identifier[output] [ identifier[single_keyword] . identifier[output] ( identifier[spires] )]= identifier[len] ( identifier[info] [ literal[int] ])
identifier[output] =[{ literal[string] : identifier[key] , literal[string] : identifier[value] }
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[output] . identifier[iteritems] ()]
keyword[return] identifier[sorted] ( identifier[output] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ], identifier[reverse] = keyword[True] ) | def _get_singlekws(skw_matches, spires=False):
"""Get single keywords.
:var skw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
"""
output = {}
for (single_keyword, info) in skw_matches:
output[single_keyword.output(spires)] = len(info[0]) # depends on [control=['for'], data=[]]
output = [{'keyword': key, 'number': value} for (key, value) in output.iteritems()]
return sorted(output, key=lambda x: x['number'], reverse=True) |
def gevent_start(self):
"""
Helper method to start the node for gevent-based applications.
"""
import gevent
import gevent.select
self._poller_greenlet = gevent.spawn(self.poll)
self._select = gevent.select.select
self.heartbeat()
self.update() | def function[gevent_start, parameter[self]]:
constant[
Helper method to start the node for gevent-based applications.
]
import module[gevent]
import module[gevent.select]
name[self]._poller_greenlet assign[=] call[name[gevent].spawn, parameter[name[self].poll]]
name[self]._select assign[=] name[gevent].select.select
call[name[self].heartbeat, parameter[]]
call[name[self].update, parameter[]] | keyword[def] identifier[gevent_start] ( identifier[self] ):
literal[string]
keyword[import] identifier[gevent]
keyword[import] identifier[gevent] . identifier[select]
identifier[self] . identifier[_poller_greenlet] = identifier[gevent] . identifier[spawn] ( identifier[self] . identifier[poll] )
identifier[self] . identifier[_select] = identifier[gevent] . identifier[select] . identifier[select]
identifier[self] . identifier[heartbeat] ()
identifier[self] . identifier[update] () | def gevent_start(self):
"""
Helper method to start the node for gevent-based applications.
"""
import gevent
import gevent.select
self._poller_greenlet = gevent.spawn(self.poll)
self._select = gevent.select.select
self.heartbeat()
self.update() |
def follow_tail(self):
"""
Read (tail and follow) the log file, parse entries and send messages
to Sentry using Raven.
"""
try:
follower = tailhead.follow_path(self.filepath)
except (FileNotFoundError, PermissionError) as err:
raise SystemExit("Error: Can't read logfile %s (%s)" %
(self.filepath, err))
for line in follower:
self.message = None
self.params = None
self.site = None
if line is not None:
self.parse(line)
send_message(self.message,
self.params,
self.site,
self.logger) | def function[follow_tail, parameter[self]]:
constant[
Read (tail and follow) the log file, parse entries and send messages
to Sentry using Raven.
]
<ast.Try object at 0x7da1b0686770>
for taget[name[line]] in starred[name[follower]] begin[:]
name[self].message assign[=] constant[None]
name[self].params assign[=] constant[None]
name[self].site assign[=] constant[None]
if compare[name[line] is_not constant[None]] begin[:]
call[name[self].parse, parameter[name[line]]]
call[name[send_message], parameter[name[self].message, name[self].params, name[self].site, name[self].logger]] | keyword[def] identifier[follow_tail] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[follower] = identifier[tailhead] . identifier[follow_path] ( identifier[self] . identifier[filepath] )
keyword[except] ( identifier[FileNotFoundError] , identifier[PermissionError] ) keyword[as] identifier[err] :
keyword[raise] identifier[SystemExit] ( literal[string] %
( identifier[self] . identifier[filepath] , identifier[err] ))
keyword[for] identifier[line] keyword[in] identifier[follower] :
identifier[self] . identifier[message] = keyword[None]
identifier[self] . identifier[params] = keyword[None]
identifier[self] . identifier[site] = keyword[None]
keyword[if] identifier[line] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[parse] ( identifier[line] )
identifier[send_message] ( identifier[self] . identifier[message] ,
identifier[self] . identifier[params] ,
identifier[self] . identifier[site] ,
identifier[self] . identifier[logger] ) | def follow_tail(self):
"""
Read (tail and follow) the log file, parse entries and send messages
to Sentry using Raven.
"""
try:
follower = tailhead.follow_path(self.filepath) # depends on [control=['try'], data=[]]
except (FileNotFoundError, PermissionError) as err:
raise SystemExit("Error: Can't read logfile %s (%s)" % (self.filepath, err)) # depends on [control=['except'], data=['err']]
for line in follower:
self.message = None
self.params = None
self.site = None
if line is not None:
self.parse(line)
send_message(self.message, self.params, self.site, self.logger) # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] |
def extract_attrs(x, n):
"""Extracts attributes from element list 'x' beginning at index 'n'.
The elements encapsulating the attributes (typically a series of Str and
Space elements) are removed from 'x'. Items before index 'n' are left
unchanged.
Returns the attributes in pandoc format. A ValueError is raised if
attributes aren't found. An IndexError is raised if the index 'n' is out
of range."""
# Check for the start of the attributes string
if not (x[n]['t'] == 'Str' and x[n]['c'].startswith('{')):
raise ValueError('Attributes not found.')
# It starts with {, so this *may* be an attributes list. Search for where
# the attributes end. Do not consider } in quoted elements.
seq = [] # A sequence of saved values
quotechar = None # Used to keep track of quotes in strings
flag = False # Flags that an attributes list was found
i = 0 # Initialization
for i, v in enumerate(x[n:]): # Scan through the list
if v and v['t'] == 'Str':
# Scan for } outside of a quote
for j, c in enumerate(v['c']):
if c == quotechar: # This is an end quote
quotechar = None
elif c in ['"', "'"]: # This is an open quote
quotechar = c
elif c == '}' and quotechar is None: # The attributes end here
# Split the string at the } and save the pieces
head, tail = v['c'][:j+1], v['c'][j+1:]
x[n+i] = copy.deepcopy(v)
x[n+i]['c'] = tail
v['c'] = head
flag = True
break
seq.append(v)
if flag:
break
if flag: # Attributes string was found, so process it
# Delete empty and extracted elements
if x[n+i]['t'] == 'Str' and not x[n+i]['c']:
del x[n+i]
del x[n:n+i]
# Process the attrs
attrstr = stringify(dollarfy(quotify(seq))).strip()
attrs = PandocAttributes(attrstr, 'markdown').to_pandoc()
# Remove extranneous quotes from kvs
for i, (k, v) in enumerate(attrs[2]): # pylint: disable=unused-variable
if v[0] == v[-1] == '"' or v[0] == "'" == v[-1] == "'":
attrs[2][i][1] = attrs[2][i][1][1:-1]
# We're done
return attrs
# Attributes not found
raise ValueError('Attributes not found.') | def function[extract_attrs, parameter[x, n]]:
constant[Extracts attributes from element list 'x' beginning at index 'n'.
The elements encapsulating the attributes (typically a series of Str and
Space elements) are removed from 'x'. Items before index 'n' are left
unchanged.
Returns the attributes in pandoc format. A ValueError is raised if
attributes aren't found. An IndexError is raised if the index 'n' is out
of range.]
if <ast.UnaryOp object at 0x7da204344a60> begin[:]
<ast.Raise object at 0x7da2043441f0>
variable[seq] assign[=] list[[]]
variable[quotechar] assign[=] constant[None]
variable[flag] assign[=] constant[False]
variable[i] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da204344160>, <ast.Name object at 0x7da204347a90>]]] in starred[call[name[enumerate], parameter[call[name[x]][<ast.Slice object at 0x7da204347820>]]]] begin[:]
if <ast.BoolOp object at 0x7da2043441c0> begin[:]
for taget[tuple[[<ast.Name object at 0x7da204346410>, <ast.Name object at 0x7da204347b20>]]] in starred[call[name[enumerate], parameter[call[name[v]][constant[c]]]]] begin[:]
if compare[name[c] equal[==] name[quotechar]] begin[:]
variable[quotechar] assign[=] constant[None]
call[name[seq].append, parameter[name[v]]]
if name[flag] begin[:]
break
if name[flag] begin[:]
if <ast.BoolOp object at 0x7da2043461a0> begin[:]
<ast.Delete object at 0x7da204345090>
<ast.Delete object at 0x7da204347c40>
variable[attrstr] assign[=] call[call[name[stringify], parameter[call[name[dollarfy], parameter[call[name[quotify], parameter[name[seq]]]]]]].strip, parameter[]]
variable[attrs] assign[=] call[call[name[PandocAttributes], parameter[name[attrstr], constant[markdown]]].to_pandoc, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da204347a60>, <ast.Tuple object at 0x7da2043460e0>]]] in starred[call[name[enumerate], parameter[call[name[attrs]][constant[2]]]]] begin[:]
if <ast.BoolOp object at 0x7da204344730> begin[:]
call[call[call[name[attrs]][constant[2]]][name[i]]][constant[1]] assign[=] call[call[call[call[name[attrs]][constant[2]]][name[i]]][constant[1]]][<ast.Slice object at 0x7da18f09e8c0>]
return[name[attrs]]
<ast.Raise object at 0x7da18f09c9a0> | keyword[def] identifier[extract_attrs] ( identifier[x] , identifier[n] ):
literal[string]
keyword[if] keyword[not] ( identifier[x] [ identifier[n] ][ literal[string] ]== literal[string] keyword[and] identifier[x] [ identifier[n] ][ literal[string] ]. identifier[startswith] ( literal[string] )):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[seq] =[]
identifier[quotechar] = keyword[None]
identifier[flag] = keyword[False]
identifier[i] = literal[int]
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[x] [ identifier[n] :]):
keyword[if] identifier[v] keyword[and] identifier[v] [ literal[string] ]== literal[string] :
keyword[for] identifier[j] , identifier[c] keyword[in] identifier[enumerate] ( identifier[v] [ literal[string] ]):
keyword[if] identifier[c] == identifier[quotechar] :
identifier[quotechar] = keyword[None]
keyword[elif] identifier[c] keyword[in] [ literal[string] , literal[string] ]:
identifier[quotechar] = identifier[c]
keyword[elif] identifier[c] == literal[string] keyword[and] identifier[quotechar] keyword[is] keyword[None] :
identifier[head] , identifier[tail] = identifier[v] [ literal[string] ][: identifier[j] + literal[int] ], identifier[v] [ literal[string] ][ identifier[j] + literal[int] :]
identifier[x] [ identifier[n] + identifier[i] ]= identifier[copy] . identifier[deepcopy] ( identifier[v] )
identifier[x] [ identifier[n] + identifier[i] ][ literal[string] ]= identifier[tail]
identifier[v] [ literal[string] ]= identifier[head]
identifier[flag] = keyword[True]
keyword[break]
identifier[seq] . identifier[append] ( identifier[v] )
keyword[if] identifier[flag] :
keyword[break]
keyword[if] identifier[flag] :
keyword[if] identifier[x] [ identifier[n] + identifier[i] ][ literal[string] ]== literal[string] keyword[and] keyword[not] identifier[x] [ identifier[n] + identifier[i] ][ literal[string] ]:
keyword[del] identifier[x] [ identifier[n] + identifier[i] ]
keyword[del] identifier[x] [ identifier[n] : identifier[n] + identifier[i] ]
identifier[attrstr] = identifier[stringify] ( identifier[dollarfy] ( identifier[quotify] ( identifier[seq] ))). identifier[strip] ()
identifier[attrs] = identifier[PandocAttributes] ( identifier[attrstr] , literal[string] ). identifier[to_pandoc] ()
keyword[for] identifier[i] ,( identifier[k] , identifier[v] ) keyword[in] identifier[enumerate] ( identifier[attrs] [ literal[int] ]):
keyword[if] identifier[v] [ literal[int] ]== identifier[v] [- literal[int] ]== literal[string] keyword[or] identifier[v] [ literal[int] ]== literal[string] == identifier[v] [- literal[int] ]== literal[string] :
identifier[attrs] [ literal[int] ][ identifier[i] ][ literal[int] ]= identifier[attrs] [ literal[int] ][ identifier[i] ][ literal[int] ][ literal[int] :- literal[int] ]
keyword[return] identifier[attrs]
keyword[raise] identifier[ValueError] ( literal[string] ) | def extract_attrs(x, n):
"""Extracts attributes from element list 'x' beginning at index 'n'.
The elements encapsulating the attributes (typically a series of Str and
Space elements) are removed from 'x'. Items before index 'n' are left
unchanged.
Returns the attributes in pandoc format. A ValueError is raised if
attributes aren't found. An IndexError is raised if the index 'n' is out
of range."""
# Check for the start of the attributes string
if not (x[n]['t'] == 'Str' and x[n]['c'].startswith('{')):
raise ValueError('Attributes not found.') # depends on [control=['if'], data=[]]
# It starts with {, so this *may* be an attributes list. Search for where
# the attributes end. Do not consider } in quoted elements.
seq = [] # A sequence of saved values
quotechar = None # Used to keep track of quotes in strings
flag = False # Flags that an attributes list was found
i = 0 # Initialization
for (i, v) in enumerate(x[n:]): # Scan through the list
if v and v['t'] == 'Str':
# Scan for } outside of a quote
for (j, c) in enumerate(v['c']):
if c == quotechar: # This is an end quote
quotechar = None # depends on [control=['if'], data=['quotechar']]
elif c in ['"', "'"]: # This is an open quote
quotechar = c # depends on [control=['if'], data=['c']]
elif c == '}' and quotechar is None: # The attributes end here
# Split the string at the } and save the pieces
(head, tail) = (v['c'][:j + 1], v['c'][j + 1:])
x[n + i] = copy.deepcopy(v)
x[n + i]['c'] = tail
v['c'] = head
flag = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
seq.append(v)
if flag:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if flag: # Attributes string was found, so process it
# Delete empty and extracted elements
if x[n + i]['t'] == 'Str' and (not x[n + i]['c']):
del x[n + i] # depends on [control=['if'], data=[]]
del x[n:n + i]
# Process the attrs
attrstr = stringify(dollarfy(quotify(seq))).strip()
attrs = PandocAttributes(attrstr, 'markdown').to_pandoc()
# Remove extranneous quotes from kvs
for (i, (k, v)) in enumerate(attrs[2]): # pylint: disable=unused-variable
if v[0] == v[-1] == '"' or v[0] == "'" == v[-1] == "'":
attrs[2][i][1] = attrs[2][i][1][1:-1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# We're done
return attrs # depends on [control=['if'], data=[]]
# Attributes not found
raise ValueError('Attributes not found.') |
def calibrate(self, data, calibration):
"""Calibrate the data."""
tic = datetime.now()
channel_name = self.channel_name
if calibration == 'counts':
res = data
elif calibration in ['radiance', 'reflectance', 'brightness_temperature']:
# Choose calibration coefficients
# a) Internal: Nominal or GSICS?
band_idx = self.mda['spectral_channel_id'] - 1
if self.calib_mode != 'GSICS' or self.channel_name in VIS_CHANNELS:
# you cant apply GSICS values to the VIS channels
coefs = self.prologue["RadiometricProcessing"]["Level15ImageCalibration"]
int_gain = coefs['CalSlope'][band_idx]
int_offset = coefs['CalOffset'][band_idx]
else:
coefs = self.prologue["RadiometricProcessing"]['MPEFCalFeedback']
int_gain = coefs['GSICSCalCoeff'][band_idx]
int_offset = coefs['GSICSOffsetCount'][band_idx]
# b) Internal or external? External takes precedence.
gain = self.ext_calib_coefs.get(self.channel_name, {}).get('gain', int_gain)
offset = self.ext_calib_coefs.get(self.channel_name, {}).get('offset', int_offset)
# Convert to radiance
data = data.where(data > 0)
res = self._convert_to_radiance(data.astype(np.float32), gain, offset)
line_mask = self.mda['image_segment_line_quality']['line_validity'] >= 2
line_mask &= self.mda['image_segment_line_quality']['line_validity'] <= 3
line_mask &= self.mda['image_segment_line_quality']['line_radiometric_quality'] == 4
line_mask &= self.mda['image_segment_line_quality']['line_geometric_quality'] == 4
res *= np.choose(line_mask, [1, np.nan])[:, np.newaxis].astype(np.float32)
if calibration == 'reflectance':
solar_irradiance = CALIB[self.platform_id][channel_name]["F"]
res = self._vis_calibrate(res, solar_irradiance)
elif calibration == 'brightness_temperature':
cal_type = self.prologue['ImageDescription'][
'Level15ImageProduction']['PlannedChanProcessing'][self.mda['spectral_channel_id']]
res = self._ir_calibrate(res, channel_name, cal_type)
logger.debug("Calibration time " + str(datetime.now() - tic))
return res | def function[calibrate, parameter[self, data, calibration]]:
constant[Calibrate the data.]
variable[tic] assign[=] call[name[datetime].now, parameter[]]
variable[channel_name] assign[=] name[self].channel_name
if compare[name[calibration] equal[==] constant[counts]] begin[:]
variable[res] assign[=] name[data]
if compare[name[calibration] equal[==] constant[reflectance]] begin[:]
variable[solar_irradiance] assign[=] call[call[call[name[CALIB]][name[self].platform_id]][name[channel_name]]][constant[F]]
variable[res] assign[=] call[name[self]._vis_calibrate, parameter[name[res], name[solar_irradiance]]]
call[name[logger].debug, parameter[binary_operation[constant[Calibration time ] + call[name[str], parameter[binary_operation[call[name[datetime].now, parameter[]] - name[tic]]]]]]]
return[name[res]] | keyword[def] identifier[calibrate] ( identifier[self] , identifier[data] , identifier[calibration] ):
literal[string]
identifier[tic] = identifier[datetime] . identifier[now] ()
identifier[channel_name] = identifier[self] . identifier[channel_name]
keyword[if] identifier[calibration] == literal[string] :
identifier[res] = identifier[data]
keyword[elif] identifier[calibration] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[band_idx] = identifier[self] . identifier[mda] [ literal[string] ]- literal[int]
keyword[if] identifier[self] . identifier[calib_mode] != literal[string] keyword[or] identifier[self] . identifier[channel_name] keyword[in] identifier[VIS_CHANNELS] :
identifier[coefs] = identifier[self] . identifier[prologue] [ literal[string] ][ literal[string] ]
identifier[int_gain] = identifier[coefs] [ literal[string] ][ identifier[band_idx] ]
identifier[int_offset] = identifier[coefs] [ literal[string] ][ identifier[band_idx] ]
keyword[else] :
identifier[coefs] = identifier[self] . identifier[prologue] [ literal[string] ][ literal[string] ]
identifier[int_gain] = identifier[coefs] [ literal[string] ][ identifier[band_idx] ]
identifier[int_offset] = identifier[coefs] [ literal[string] ][ identifier[band_idx] ]
identifier[gain] = identifier[self] . identifier[ext_calib_coefs] . identifier[get] ( identifier[self] . identifier[channel_name] ,{}). identifier[get] ( literal[string] , identifier[int_gain] )
identifier[offset] = identifier[self] . identifier[ext_calib_coefs] . identifier[get] ( identifier[self] . identifier[channel_name] ,{}). identifier[get] ( literal[string] , identifier[int_offset] )
identifier[data] = identifier[data] . identifier[where] ( identifier[data] > literal[int] )
identifier[res] = identifier[self] . identifier[_convert_to_radiance] ( identifier[data] . identifier[astype] ( identifier[np] . identifier[float32] ), identifier[gain] , identifier[offset] )
identifier[line_mask] = identifier[self] . identifier[mda] [ literal[string] ][ literal[string] ]>= literal[int]
identifier[line_mask] &= identifier[self] . identifier[mda] [ literal[string] ][ literal[string] ]<= literal[int]
identifier[line_mask] &= identifier[self] . identifier[mda] [ literal[string] ][ literal[string] ]== literal[int]
identifier[line_mask] &= identifier[self] . identifier[mda] [ literal[string] ][ literal[string] ]== literal[int]
identifier[res] *= identifier[np] . identifier[choose] ( identifier[line_mask] ,[ literal[int] , identifier[np] . identifier[nan] ])[:, identifier[np] . identifier[newaxis] ]. identifier[astype] ( identifier[np] . identifier[float32] )
keyword[if] identifier[calibration] == literal[string] :
identifier[solar_irradiance] = identifier[CALIB] [ identifier[self] . identifier[platform_id] ][ identifier[channel_name] ][ literal[string] ]
identifier[res] = identifier[self] . identifier[_vis_calibrate] ( identifier[res] , identifier[solar_irradiance] )
keyword[elif] identifier[calibration] == literal[string] :
identifier[cal_type] = identifier[self] . identifier[prologue] [ literal[string] ][
literal[string] ][ literal[string] ][ identifier[self] . identifier[mda] [ literal[string] ]]
identifier[res] = identifier[self] . identifier[_ir_calibrate] ( identifier[res] , identifier[channel_name] , identifier[cal_type] )
identifier[logger] . identifier[debug] ( literal[string] + identifier[str] ( identifier[datetime] . identifier[now] ()- identifier[tic] ))
keyword[return] identifier[res] | def calibrate(self, data, calibration):
"""Calibrate the data."""
tic = datetime.now()
channel_name = self.channel_name
if calibration == 'counts':
res = data # depends on [control=['if'], data=[]]
elif calibration in ['radiance', 'reflectance', 'brightness_temperature']:
# Choose calibration coefficients
# a) Internal: Nominal or GSICS?
band_idx = self.mda['spectral_channel_id'] - 1
if self.calib_mode != 'GSICS' or self.channel_name in VIS_CHANNELS:
# you cant apply GSICS values to the VIS channels
coefs = self.prologue['RadiometricProcessing']['Level15ImageCalibration']
int_gain = coefs['CalSlope'][band_idx]
int_offset = coefs['CalOffset'][band_idx] # depends on [control=['if'], data=[]]
else:
coefs = self.prologue['RadiometricProcessing']['MPEFCalFeedback']
int_gain = coefs['GSICSCalCoeff'][band_idx]
int_offset = coefs['GSICSOffsetCount'][band_idx]
# b) Internal or external? External takes precedence.
gain = self.ext_calib_coefs.get(self.channel_name, {}).get('gain', int_gain)
offset = self.ext_calib_coefs.get(self.channel_name, {}).get('offset', int_offset)
# Convert to radiance
data = data.where(data > 0)
res = self._convert_to_radiance(data.astype(np.float32), gain, offset)
line_mask = self.mda['image_segment_line_quality']['line_validity'] >= 2
line_mask &= self.mda['image_segment_line_quality']['line_validity'] <= 3
line_mask &= self.mda['image_segment_line_quality']['line_radiometric_quality'] == 4
line_mask &= self.mda['image_segment_line_quality']['line_geometric_quality'] == 4
res *= np.choose(line_mask, [1, np.nan])[:, np.newaxis].astype(np.float32) # depends on [control=['if'], data=[]]
if calibration == 'reflectance':
solar_irradiance = CALIB[self.platform_id][channel_name]['F']
res = self._vis_calibrate(res, solar_irradiance) # depends on [control=['if'], data=[]]
elif calibration == 'brightness_temperature':
cal_type = self.prologue['ImageDescription']['Level15ImageProduction']['PlannedChanProcessing'][self.mda['spectral_channel_id']]
res = self._ir_calibrate(res, channel_name, cal_type) # depends on [control=['if'], data=[]]
logger.debug('Calibration time ' + str(datetime.now() - tic))
return res |
def _fulfill(self, *args, **kwargs):
"""
Fulfill the condition.
:param args:
:param kwargs:
:return: true if the condition was successfully fulfilled, bool
"""
tx_hash = self.send_transaction('fulfill', args, **kwargs)
receipt = self.get_tx_receipt(tx_hash)
return receipt.status == 1 | def function[_fulfill, parameter[self]]:
constant[
Fulfill the condition.
:param args:
:param kwargs:
:return: true if the condition was successfully fulfilled, bool
]
variable[tx_hash] assign[=] call[name[self].send_transaction, parameter[constant[fulfill], name[args]]]
variable[receipt] assign[=] call[name[self].get_tx_receipt, parameter[name[tx_hash]]]
return[compare[name[receipt].status equal[==] constant[1]]] | keyword[def] identifier[_fulfill] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[tx_hash] = identifier[self] . identifier[send_transaction] ( literal[string] , identifier[args] ,** identifier[kwargs] )
identifier[receipt] = identifier[self] . identifier[get_tx_receipt] ( identifier[tx_hash] )
keyword[return] identifier[receipt] . identifier[status] == literal[int] | def _fulfill(self, *args, **kwargs):
"""
Fulfill the condition.
:param args:
:param kwargs:
:return: true if the condition was successfully fulfilled, bool
"""
tx_hash = self.send_transaction('fulfill', args, **kwargs)
receipt = self.get_tx_receipt(tx_hash)
return receipt.status == 1 |
def p_block_open(self, p):
""" block_open : identifier brace_open
"""
try:
p[1].parse(self.scope)
except SyntaxError:
pass
p[0] = p[1]
self.scope.current = p[1] | def function[p_block_open, parameter[self, p]]:
constant[ block_open : identifier brace_open
]
<ast.Try object at 0x7da1aff8f7f0>
call[name[p]][constant[0]] assign[=] call[name[p]][constant[1]]
name[self].scope.current assign[=] call[name[p]][constant[1]] | keyword[def] identifier[p_block_open] ( identifier[self] , identifier[p] ):
literal[string]
keyword[try] :
identifier[p] [ literal[int] ]. identifier[parse] ( identifier[self] . identifier[scope] )
keyword[except] identifier[SyntaxError] :
keyword[pass]
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]
identifier[self] . identifier[scope] . identifier[current] = identifier[p] [ literal[int] ] | def p_block_open(self, p):
""" block_open : identifier brace_open
"""
try:
p[1].parse(self.scope) # depends on [control=['try'], data=[]]
except SyntaxError:
pass # depends on [control=['except'], data=[]]
p[0] = p[1]
self.scope.current = p[1] |
def writeFromDict(dataDict, headers, csvFile):
"""
Write dictionary to a CSV, where keys are row numbers and values are a list.
"""
with open(csvFile, "wb") as f:
writer = csv.writer(f, delimiter=",")
writer.writerow(headers)
for row in sorted(dataDict.keys()):
writer.writerow(dataDict[row]) | def function[writeFromDict, parameter[dataDict, headers, csvFile]]:
constant[
Write dictionary to a CSV, where keys are row numbers and values are a list.
]
with call[name[open], parameter[name[csvFile], constant[wb]]] begin[:]
variable[writer] assign[=] call[name[csv].writer, parameter[name[f]]]
call[name[writer].writerow, parameter[name[headers]]]
for taget[name[row]] in starred[call[name[sorted], parameter[call[name[dataDict].keys, parameter[]]]]] begin[:]
call[name[writer].writerow, parameter[call[name[dataDict]][name[row]]]] | keyword[def] identifier[writeFromDict] ( identifier[dataDict] , identifier[headers] , identifier[csvFile] ):
literal[string]
keyword[with] identifier[open] ( identifier[csvFile] , literal[string] ) keyword[as] identifier[f] :
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[f] , identifier[delimiter] = literal[string] )
identifier[writer] . identifier[writerow] ( identifier[headers] )
keyword[for] identifier[row] keyword[in] identifier[sorted] ( identifier[dataDict] . identifier[keys] ()):
identifier[writer] . identifier[writerow] ( identifier[dataDict] [ identifier[row] ]) | def writeFromDict(dataDict, headers, csvFile):
"""
Write dictionary to a CSV, where keys are row numbers and values are a list.
"""
with open(csvFile, 'wb') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(headers)
for row in sorted(dataDict.keys()):
writer.writerow(dataDict[row]) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['f']] |
def _is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if common.is_philips([dicom_header]):
if common.is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
# for all others if there is image position patient we assume it is ok
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False | def function[_is_valid_imaging_dicom, parameter[dicom_header]]:
constant[
Function will do some basic checks to see if this is a valid imaging dicom
]
<ast.Try object at 0x7da1b15ac580> | keyword[def] identifier[_is_valid_imaging_dicom] ( identifier[dicom_header] ):
literal[string]
keyword[try] :
keyword[if] identifier[common] . identifier[is_philips] ([ identifier[dicom_header] ]):
keyword[if] identifier[common] . identifier[is_multiframe_dicom] ([ identifier[dicom_header] ]):
keyword[return] keyword[True]
keyword[if] literal[string] keyword[not] keyword[in] identifier[dicom_header] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[dicom_header] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[dicom_header] keyword[or] identifier[len] ( identifier[dicom_header] . identifier[ImageOrientationPatient] )< literal[int] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[dicom_header] keyword[or] identifier[len] ( identifier[dicom_header] . identifier[ImagePositionPatient] )< literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[Tag] ( literal[int] , literal[int] ) keyword[not] keyword[in] identifier[dicom_header] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[except] ( identifier[KeyError] , identifier[AttributeError] ):
keyword[return] keyword[False] | def _is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if common.is_philips([dicom_header]):
if common.is_multiframe_dicom([dicom_header]):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'SeriesInstanceUID' not in dicom_header:
return False # depends on [control=['if'], data=[]]
if 'InstanceNumber' not in dicom_header:
return False # depends on [control=['if'], data=[]]
if 'ImageOrientationPatient' not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False # depends on [control=['if'], data=[]]
if 'ImagePositionPatient' not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False # depends on [control=['if'], data=[]]
# for all others if there is image position patient we assume it is ok
if Tag(32, 55) not in dicom_header:
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['try'], data=[]]
except (KeyError, AttributeError):
return False # depends on [control=['except'], data=[]] |
def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
'''Check if a neuron has apical dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
'''
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number) | def function[has_apical_dendrite, parameter[neuron, min_number, treefun]]:
constant[Check if a neuron has apical dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
]
variable[types] assign[=] <ast.ListComp object at 0x7da204623730>
return[call[name[CheckResult], parameter[compare[call[name[types].count, parameter[name[NeuriteType].apical_dendrite]] greater_or_equal[>=] name[min_number]]]]] | keyword[def] identifier[has_apical_dendrite] ( identifier[neuron] , identifier[min_number] = literal[int] , identifier[treefun] = identifier[_read_neurite_type] ):
literal[string]
identifier[types] =[ identifier[treefun] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[neuron] . identifier[neurites] ]
keyword[return] identifier[CheckResult] ( identifier[types] . identifier[count] ( identifier[NeuriteType] . identifier[apical_dendrite] )>= identifier[min_number] ) | def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
"""Check if a neuron has apical dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
"""
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number) |
def oqi(ql, qs, ns=None, rc=None, ot=None, coe=None, moc=None):
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.OpenQueryInstances`.
Open an enumeration session to execute a query in a namespace and to
retrieve the instances representing the query result.
Use the :func:`~wbemcli.pi` function to retrieve the next set of
instances or the :func:`~wbcmeli.ce` function to close the enumeration
session before it is complete.
Parameters:
ql (:term:`string`):
Filter query language to be used for the filter defined in the `q`
parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM
Query Language. Because this is not a filter query, "DMTF:FQL" is not
a valid query language for this request.
qs (:term:`string`):
Filter to apply to objects to be returned. Based on filter query
language defined by the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
rc (:class:`py:bool`):
Controls whether a class definition describing the properties of the
returned instances will be returned.
`None` will cause the server to use its default of `False`.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of objects to return for this operation.
`None` will cause the server to use its default of 0.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **instances** (list of :class:`~pywbem.CIMInstance`):
The retrieved instances.
* **eos** (:class:`py:bool`):
`True` if the enumeration session is exhausted after this operation.
Otherwise `eos` is `False` and the `context` item is the context
object for the next operation on the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session, including
its current enumeration state, and the namespace. This object must be
supplied with the next pull or close operation for this enumeration
session.
* **query_result_class** (:class:`~pywbem.CIMClass`):
Result class definition describing the properties of the returned
instances if requested, or otherwise `None`.
"""
return CONN.OpenQueryInstances(FilterQueryLanguage=ql,
FilterQuery=qs,
namespace=ns,
ReturnQueryResultClass=rc,
OperationTimeout=ot,
ContinueOnError=coe,
MaxObjectCount=moc) | def function[oqi, parameter[ql, qs, ns, rc, ot, coe, moc]]:
constant[
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.OpenQueryInstances`.
Open an enumeration session to execute a query in a namespace and to
retrieve the instances representing the query result.
Use the :func:`~wbemcli.pi` function to retrieve the next set of
instances or the :func:`~wbcmeli.ce` function to close the enumeration
session before it is complete.
Parameters:
ql (:term:`string`):
Filter query language to be used for the filter defined in the `q`
parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM
Query Language. Because this is not a filter query, "DMTF:FQL" is not
a valid query language for this request.
qs (:term:`string`):
Filter to apply to objects to be returned. Based on filter query
language defined by the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
rc (:class:`py:bool`):
Controls whether a class definition describing the properties of the
returned instances will be returned.
`None` will cause the server to use its default of `False`.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of objects to return for this operation.
`None` will cause the server to use its default of 0.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **instances** (list of :class:`~pywbem.CIMInstance`):
The retrieved instances.
* **eos** (:class:`py:bool`):
`True` if the enumeration session is exhausted after this operation.
Otherwise `eos` is `False` and the `context` item is the context
object for the next operation on the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session, including
its current enumeration state, and the namespace. This object must be
supplied with the next pull or close operation for this enumeration
session.
* **query_result_class** (:class:`~pywbem.CIMClass`):
Result class definition describing the properties of the returned
instances if requested, or otherwise `None`.
]
return[call[name[CONN].OpenQueryInstances, parameter[]]] | keyword[def] identifier[oqi] ( identifier[ql] , identifier[qs] , identifier[ns] = keyword[None] , identifier[rc] = keyword[None] , identifier[ot] = keyword[None] , identifier[coe] = keyword[None] , identifier[moc] = keyword[None] ):
literal[string]
keyword[return] identifier[CONN] . identifier[OpenQueryInstances] ( identifier[FilterQueryLanguage] = identifier[ql] ,
identifier[FilterQuery] = identifier[qs] ,
identifier[namespace] = identifier[ns] ,
identifier[ReturnQueryResultClass] = identifier[rc] ,
identifier[OperationTimeout] = identifier[ot] ,
identifier[ContinueOnError] = identifier[coe] ,
identifier[MaxObjectCount] = identifier[moc] ) | def oqi(ql, qs, ns=None, rc=None, ot=None, coe=None, moc=None):
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.OpenQueryInstances`.
Open an enumeration session to execute a query in a namespace and to
retrieve the instances representing the query result.
Use the :func:`~wbemcli.pi` function to retrieve the next set of
instances or the :func:`~wbcmeli.ce` function to close the enumeration
session before it is complete.
Parameters:
ql (:term:`string`):
Filter query language to be used for the filter defined in the `q`
parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM
Query Language. Because this is not a filter query, "DMTF:FQL" is not
a valid query language for this request.
qs (:term:`string`):
Filter to apply to objects to be returned. Based on filter query
language defined by the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
rc (:class:`py:bool`):
Controls whether a class definition describing the properties of the
returned instances will be returned.
`None` will cause the server to use its default of `False`.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of objects to return for this operation.
`None` will cause the server to use its default of 0.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **instances** (list of :class:`~pywbem.CIMInstance`):
The retrieved instances.
* **eos** (:class:`py:bool`):
`True` if the enumeration session is exhausted after this operation.
Otherwise `eos` is `False` and the `context` item is the context
object for the next operation on the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session, including
its current enumeration state, and the namespace. This object must be
supplied with the next pull or close operation for this enumeration
session.
* **query_result_class** (:class:`~pywbem.CIMClass`):
Result class definition describing the properties of the returned
instances if requested, or otherwise `None`.
"""
return CONN.OpenQueryInstances(FilterQueryLanguage=ql, FilterQuery=qs, namespace=ns, ReturnQueryResultClass=rc, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc) |
def entrance_rounded(Di, rc, method='Rennels'):
r'''Returns loss coefficient for a rounded entrance to a pipe
flush with the wall of a reservoir. This calculation has six methods
available.
The most conservative formulation is that of Rennels; with the Swammee
correlation being 0.02-0.07 lower. They were published in 2012 and 2008
respectively, and for this reason could be regarded as more reliable.
The Idel'chik correlation appears based on the Hamilton data; and the
Miller correlation as well, except a little more conservative. The Crane
model trends similarly but only has a few points. The Harris data set is
the lowest.
The Rennels [1]_ formulas are:
.. math::
K = 0.0696\left(1 - 0.569\frac{r}{d}\right)\lambda^2 + (\lambda-1)^2
.. math::
\lambda = 1 + 0.622\left(1 - 0.30\sqrt{\frac{r}{d}}
- 0.70\frac{r}{d}\right)^4
The Swamee [5]_ formula is:
.. math::
K = 0.5\left[1 + 36\left(\frac{r}{D}\right)^{1.2}\right]^{-1}
.. figure:: fittings/flush_mounted_rounded_entrance.png
:scale: 30 %
:alt: rounded entrace mounted straight and flush; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rc : float
Radius of curvature of the entrance, [m]
method : str, optional
One of 'Rennels', 'Crane', 'Miller', 'Idelchik', 'Harris', or 'Swamee'.
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
For generously rounded entrance (rc/Di >= 1), the loss coefficient
converges to 0.03 in the Rennels method.
The Rennels formulation was derived primarily from data and theoretical
analysis from different flow scenarios than a rounded pipe entrance; the
only available data in [2]_ is quite old and [1]_ casts doubt on it.
The Hamilton data set is available in [1]_ and [6]_.
.. plot:: plots/entrance_rounded.py
Examples
--------
Point from Diagram 9.2 in [1]_, which was used to confirm the Rennels
model implementation:
>>> entrance_rounded(Di=0.1, rc=0.0235)
0.09839534618360923
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Hamilton, James Baker. Suppression of Pipe Intake Losses by Various
Degrees of Rounding. Seattle: Published by the University of Washington,
1929. https://search.library.wisc.edu/catalog/999823652202121.
.. [3] Miller, Donald S. Internal Flow Systems: Design and Performance
Prediction. Gulf Publishing Company, 1990.
.. [4] Harris, Charles William. Elimination of Hydraulic Eddy Current Loss
at Intake, Agreement of Theory and Experiment. University of Washington,
1930.
.. [5] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply
Pipe Networks. John Wiley & Sons, 2008.
.. [6] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [7] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
.. [8] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966.
'''
if method is None:
method = 'Rennels'
if method == 'Rennels':
if rc/Di > 1.0:
return 0.03
lbd = 1.0 + 0.622*(1.0 - 0.30*(rc/Di)**0.5 - 0.70*(rc/Di))**4.0
return 0.0696*(1.0 - 0.569*rc/Di)*lbd**2.0 + (lbd - 1.0)**2
elif method == 'Swamee':
return 0.5/(1.0 + 36.0*(rc/Di)**1.2)
elif method == 'Crane':
ratio = rc/Di
if ratio < 0:
return 0.5
elif ratio > 0.15:
return 0.04
else:
return interp(ratio, entrance_rounded_ratios_Crane,
entrance_rounded_Ks_Crane)
elif method == 'Miller':
rc_Di = rc/Di
if rc_Di > 0.3:
rc_Di = 0.3
return horner(entrance_rounded_Miller_coeffs, 20.0/3.0*(rc_Di - 0.15))
elif method == 'Harris':
ratio = rc/Di
if ratio > .16:
return 0.0
return float(entrance_rounded_Harris(ratio))
elif method == 'Idelchik':
ratio = rc/Di
if ratio > .2:
return entrance_rounded_Ks_Idelchik[-1]
return float(entrance_rounded_Idelchik(ratio))
else:
raise ValueError('Specified method not recognized; methods are %s'
%(entrance_rounded_methods)) | def function[entrance_rounded, parameter[Di, rc, method]]:
constant[Returns loss coefficient for a rounded entrance to a pipe
flush with the wall of a reservoir. This calculation has six methods
available.
The most conservative formulation is that of Rennels; with the Swammee
correlation being 0.02-0.07 lower. They were published in 2012 and 2008
respectively, and for this reason could be regarded as more reliable.
The Idel'chik correlation appears based on the Hamilton data; and the
Miller correlation as well, except a little more conservative. The Crane
model trends similarly but only has a few points. The Harris data set is
the lowest.
The Rennels [1]_ formulas are:
.. math::
K = 0.0696\left(1 - 0.569\frac{r}{d}\right)\lambda^2 + (\lambda-1)^2
.. math::
\lambda = 1 + 0.622\left(1 - 0.30\sqrt{\frac{r}{d}}
- 0.70\frac{r}{d}\right)^4
The Swamee [5]_ formula is:
.. math::
K = 0.5\left[1 + 36\left(\frac{r}{D}\right)^{1.2}\right]^{-1}
.. figure:: fittings/flush_mounted_rounded_entrance.png
:scale: 30 %
:alt: rounded entrace mounted straight and flush; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rc : float
Radius of curvature of the entrance, [m]
method : str, optional
One of 'Rennels', 'Crane', 'Miller', 'Idelchik', 'Harris', or 'Swamee'.
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
For generously rounded entrance (rc/Di >= 1), the loss coefficient
converges to 0.03 in the Rennels method.
The Rennels formulation was derived primarily from data and theoretical
analysis from different flow scenarios than a rounded pipe entrance; the
only available data in [2]_ is quite old and [1]_ casts doubt on it.
The Hamilton data set is available in [1]_ and [6]_.
.. plot:: plots/entrance_rounded.py
Examples
--------
Point from Diagram 9.2 in [1]_, which was used to confirm the Rennels
model implementation:
>>> entrance_rounded(Di=0.1, rc=0.0235)
0.09839534618360923
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Hamilton, James Baker. Suppression of Pipe Intake Losses by Various
Degrees of Rounding. Seattle: Published by the University of Washington,
1929. https://search.library.wisc.edu/catalog/999823652202121.
.. [3] Miller, Donald S. Internal Flow Systems: Design and Performance
Prediction. Gulf Publishing Company, 1990.
.. [4] Harris, Charles William. Elimination of Hydraulic Eddy Current Loss
at Intake, Agreement of Theory and Experiment. University of Washington,
1930.
.. [5] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply
Pipe Networks. John Wiley & Sons, 2008.
.. [6] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [7] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
.. [8] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966.
]
if compare[name[method] is constant[None]] begin[:]
variable[method] assign[=] constant[Rennels]
if compare[name[method] equal[==] constant[Rennels]] begin[:]
if compare[binary_operation[name[rc] / name[Di]] greater[>] constant[1.0]] begin[:]
return[constant[0.03]]
variable[lbd] assign[=] binary_operation[constant[1.0] + binary_operation[constant[0.622] * binary_operation[binary_operation[binary_operation[constant[1.0] - binary_operation[constant[0.3] * binary_operation[binary_operation[name[rc] / name[Di]] ** constant[0.5]]]] - binary_operation[constant[0.7] * binary_operation[name[rc] / name[Di]]]] ** constant[4.0]]]]
return[binary_operation[binary_operation[binary_operation[constant[0.0696] * binary_operation[constant[1.0] - binary_operation[binary_operation[constant[0.569] * name[rc]] / name[Di]]]] * binary_operation[name[lbd] ** constant[2.0]]] + binary_operation[binary_operation[name[lbd] - constant[1.0]] ** constant[2]]]] | keyword[def] identifier[entrance_rounded] ( identifier[Di] , identifier[rc] , identifier[method] = literal[string] ):
literal[string]
keyword[if] identifier[method] keyword[is] keyword[None] :
identifier[method] = literal[string]
keyword[if] identifier[method] == literal[string] :
keyword[if] identifier[rc] / identifier[Di] > literal[int] :
keyword[return] literal[int]
identifier[lbd] = literal[int] + literal[int] *( literal[int] - literal[int] *( identifier[rc] / identifier[Di] )** literal[int] - literal[int] *( identifier[rc] / identifier[Di] ))** literal[int]
keyword[return] literal[int] *( literal[int] - literal[int] * identifier[rc] / identifier[Di] )* identifier[lbd] ** literal[int] +( identifier[lbd] - literal[int] )** literal[int]
keyword[elif] identifier[method] == literal[string] :
keyword[return] literal[int] /( literal[int] + literal[int] *( identifier[rc] / identifier[Di] )** literal[int] )
keyword[elif] identifier[method] == literal[string] :
identifier[ratio] = identifier[rc] / identifier[Di]
keyword[if] identifier[ratio] < literal[int] :
keyword[return] literal[int]
keyword[elif] identifier[ratio] > literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[interp] ( identifier[ratio] , identifier[entrance_rounded_ratios_Crane] ,
identifier[entrance_rounded_Ks_Crane] )
keyword[elif] identifier[method] == literal[string] :
identifier[rc_Di] = identifier[rc] / identifier[Di]
keyword[if] identifier[rc_Di] > literal[int] :
identifier[rc_Di] = literal[int]
keyword[return] identifier[horner] ( identifier[entrance_rounded_Miller_coeffs] , literal[int] / literal[int] *( identifier[rc_Di] - literal[int] ))
keyword[elif] identifier[method] == literal[string] :
identifier[ratio] = identifier[rc] / identifier[Di]
keyword[if] identifier[ratio] > literal[int] :
keyword[return] literal[int]
keyword[return] identifier[float] ( identifier[entrance_rounded_Harris] ( identifier[ratio] ))
keyword[elif] identifier[method] == literal[string] :
identifier[ratio] = identifier[rc] / identifier[Di]
keyword[if] identifier[ratio] > literal[int] :
keyword[return] identifier[entrance_rounded_Ks_Idelchik] [- literal[int] ]
keyword[return] identifier[float] ( identifier[entrance_rounded_Idelchik] ( identifier[ratio] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
%( identifier[entrance_rounded_methods] )) | def entrance_rounded(Di, rc, method='Rennels'):
"""Returns loss coefficient for a rounded entrance to a pipe
flush with the wall of a reservoir. This calculation has six methods
available.
The most conservative formulation is that of Rennels; with the Swammee
correlation being 0.02-0.07 lower. They were published in 2012 and 2008
respectively, and for this reason could be regarded as more reliable.
The Idel'chik correlation appears based on the Hamilton data; and the
Miller correlation as well, except a little more conservative. The Crane
model trends similarly but only has a few points. The Harris data set is
the lowest.
The Rennels [1]_ formulas are:
.. math::
K = 0.0696\\left(1 - 0.569\\frac{r}{d}\\right)\\lambda^2 + (\\lambda-1)^2
.. math::
\\lambda = 1 + 0.622\\left(1 - 0.30\\sqrt{\\frac{r}{d}}
- 0.70\\frac{r}{d}\\right)^4
The Swamee [5]_ formula is:
.. math::
K = 0.5\\left[1 + 36\\left(\\frac{r}{D}\\right)^{1.2}\\right]^{-1}
.. figure:: fittings/flush_mounted_rounded_entrance.png
:scale: 30 %
:alt: rounded entrace mounted straight and flush; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rc : float
Radius of curvature of the entrance, [m]
method : str, optional
One of 'Rennels', 'Crane', 'Miller', 'Idelchik', 'Harris', or 'Swamee'.
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
For generously rounded entrance (rc/Di >= 1), the loss coefficient
converges to 0.03 in the Rennels method.
The Rennels formulation was derived primarily from data and theoretical
analysis from different flow scenarios than a rounded pipe entrance; the
only available data in [2]_ is quite old and [1]_ casts doubt on it.
The Hamilton data set is available in [1]_ and [6]_.
.. plot:: plots/entrance_rounded.py
Examples
--------
Point from Diagram 9.2 in [1]_, which was used to confirm the Rennels
model implementation:
>>> entrance_rounded(Di=0.1, rc=0.0235)
0.09839534618360923
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Hamilton, James Baker. Suppression of Pipe Intake Losses by Various
Degrees of Rounding. Seattle: Published by the University of Washington,
1929. https://search.library.wisc.edu/catalog/999823652202121.
.. [3] Miller, Donald S. Internal Flow Systems: Design and Performance
Prediction. Gulf Publishing Company, 1990.
.. [4] Harris, Charles William. Elimination of Hydraulic Eddy Current Loss
at Intake, Agreement of Theory and Experiment. University of Washington,
1930.
.. [5] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply
Pipe Networks. John Wiley & Sons, 2008.
.. [6] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [7] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
.. [8] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966.
"""
if method is None:
method = 'Rennels' # depends on [control=['if'], data=['method']]
if method == 'Rennels':
if rc / Di > 1.0:
return 0.03 # depends on [control=['if'], data=[]]
lbd = 1.0 + 0.622 * (1.0 - 0.3 * (rc / Di) ** 0.5 - 0.7 * (rc / Di)) ** 4.0
return 0.0696 * (1.0 - 0.569 * rc / Di) * lbd ** 2.0 + (lbd - 1.0) ** 2 # depends on [control=['if'], data=[]]
elif method == 'Swamee':
return 0.5 / (1.0 + 36.0 * (rc / Di) ** 1.2) # depends on [control=['if'], data=[]]
elif method == 'Crane':
ratio = rc / Di
if ratio < 0:
return 0.5 # depends on [control=['if'], data=[]]
elif ratio > 0.15:
return 0.04 # depends on [control=['if'], data=[]]
else:
return interp(ratio, entrance_rounded_ratios_Crane, entrance_rounded_Ks_Crane) # depends on [control=['if'], data=[]]
elif method == 'Miller':
rc_Di = rc / Di
if rc_Di > 0.3:
rc_Di = 0.3 # depends on [control=['if'], data=['rc_Di']]
return horner(entrance_rounded_Miller_coeffs, 20.0 / 3.0 * (rc_Di - 0.15)) # depends on [control=['if'], data=[]]
elif method == 'Harris':
ratio = rc / Di
if ratio > 0.16:
return 0.0 # depends on [control=['if'], data=[]]
return float(entrance_rounded_Harris(ratio)) # depends on [control=['if'], data=[]]
elif method == 'Idelchik':
ratio = rc / Di
if ratio > 0.2:
return entrance_rounded_Ks_Idelchik[-1] # depends on [control=['if'], data=[]]
return float(entrance_rounded_Idelchik(ratio)) # depends on [control=['if'], data=[]]
else:
raise ValueError('Specified method not recognized; methods are %s' % entrance_rounded_methods) |
def fir_zero_filter(coeff, timeseries):
"""Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
timeseries: pycbc.types.TimeSeries
Time series to be filtered.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
"""
# apply the filter
series = lfilter(coeff, timeseries.numpy())
# reverse the time shift caused by the filter,
# corruption regions contain zeros
# If the number of filter coefficients is odd, the central point *should*
# be included in the output so we only zero out a region of len(coeff) - 1
data = numpy.zeros(len(timeseries))
data[len(coeff)//2:len(data)-len(coeff)//2] = series[(len(coeff) // 2) * 2:]
return data | def function[fir_zero_filter, parameter[coeff, timeseries]]:
constant[Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
timeseries: pycbc.types.TimeSeries
Time series to be filtered.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
]
variable[series] assign[=] call[name[lfilter], parameter[name[coeff], call[name[timeseries].numpy, parameter[]]]]
variable[data] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[name[timeseries]]]]]
call[name[data]][<ast.Slice object at 0x7da2044c0700>] assign[=] call[name[series]][<ast.Slice object at 0x7da2044c29b0>]
return[name[data]] | keyword[def] identifier[fir_zero_filter] ( identifier[coeff] , identifier[timeseries] ):
literal[string]
identifier[series] = identifier[lfilter] ( identifier[coeff] , identifier[timeseries] . identifier[numpy] ())
identifier[data] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[timeseries] ))
identifier[data] [ identifier[len] ( identifier[coeff] )// literal[int] : identifier[len] ( identifier[data] )- identifier[len] ( identifier[coeff] )// literal[int] ]= identifier[series] [( identifier[len] ( identifier[coeff] )// literal[int] )* literal[int] :]
keyword[return] identifier[data] | def fir_zero_filter(coeff, timeseries):
"""Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
timeseries: pycbc.types.TimeSeries
Time series to be filtered.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
"""
# apply the filter
series = lfilter(coeff, timeseries.numpy())
# reverse the time shift caused by the filter,
# corruption regions contain zeros
# If the number of filter coefficients is odd, the central point *should*
# be included in the output so we only zero out a region of len(coeff) - 1
data = numpy.zeros(len(timeseries))
data[len(coeff) // 2:len(data) - len(coeff) // 2] = series[len(coeff) // 2 * 2:]
return data |
def probability_of_n_purchases_up_to_time(self, t, n):
r"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
"""
r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
first_term = (
beta(a, b + n)
/ beta(a, b)
* gamma(r + n)
/ gamma(r)
/ gamma(n + 1)
* (alpha / (alpha + t)) ** r
* (t / (alpha + t)) ** n
)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) * (t / (alpha + t)) ** j).sum()
second_term = beta(a + 1, b + n - 1) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)
else:
second_term = 0
return first_term + second_term | def function[probability_of_n_purchases_up_to_time, parameter[self, t, n]]:
constant[
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
]
<ast.Tuple object at 0x7da1b1d34520> assign[=] call[name[self]._unload_params, parameter[constant[r], constant[alpha], constant[a], constant[b]]]
variable[first_term] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[beta], parameter[name[a], binary_operation[name[b] + name[n]]]] / call[name[beta], parameter[name[a], name[b]]]] * call[name[gamma], parameter[binary_operation[name[r] + name[n]]]]] / call[name[gamma], parameter[name[r]]]] / call[name[gamma], parameter[binary_operation[name[n] + constant[1]]]]] * binary_operation[binary_operation[name[alpha] / binary_operation[name[alpha] + name[t]]] ** name[r]]] * binary_operation[binary_operation[name[t] / binary_operation[name[alpha] + name[t]]] ** name[n]]]
if compare[name[n] greater[>] constant[0]] begin[:]
variable[j] assign[=] call[name[np].arange, parameter[constant[0], name[n]]]
variable[finite_sum] assign[=] call[binary_operation[binary_operation[binary_operation[call[name[gamma], parameter[binary_operation[name[r] + name[j]]]] / call[name[gamma], parameter[name[r]]]] / call[name[gamma], parameter[binary_operation[name[j] + constant[1]]]]] * binary_operation[binary_operation[name[t] / binary_operation[name[alpha] + name[t]]] ** name[j]]].sum, parameter[]]
variable[second_term] assign[=] binary_operation[binary_operation[call[name[beta], parameter[binary_operation[name[a] + constant[1]], binary_operation[binary_operation[name[b] + name[n]] - constant[1]]]] / call[name[beta], parameter[name[a], name[b]]]] * binary_operation[constant[1] - binary_operation[binary_operation[binary_operation[name[alpha] / binary_operation[name[alpha] + name[t]]] ** name[r]] * name[finite_sum]]]]
return[binary_operation[name[first_term] + name[second_term]]] | keyword[def] identifier[probability_of_n_purchases_up_to_time] ( identifier[self] , identifier[t] , identifier[n] ):
literal[string]
identifier[r] , identifier[alpha] , identifier[a] , identifier[b] = identifier[self] . identifier[_unload_params] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[first_term] =(
identifier[beta] ( identifier[a] , identifier[b] + identifier[n] )
/ identifier[beta] ( identifier[a] , identifier[b] )
* identifier[gamma] ( identifier[r] + identifier[n] )
/ identifier[gamma] ( identifier[r] )
/ identifier[gamma] ( identifier[n] + literal[int] )
*( identifier[alpha] /( identifier[alpha] + identifier[t] ))** identifier[r]
*( identifier[t] /( identifier[alpha] + identifier[t] ))** identifier[n]
)
keyword[if] identifier[n] > literal[int] :
identifier[j] = identifier[np] . identifier[arange] ( literal[int] , identifier[n] )
identifier[finite_sum] =( identifier[gamma] ( identifier[r] + identifier[j] )/ identifier[gamma] ( identifier[r] )/ identifier[gamma] ( identifier[j] + literal[int] )*( identifier[t] /( identifier[alpha] + identifier[t] ))** identifier[j] ). identifier[sum] ()
identifier[second_term] = identifier[beta] ( identifier[a] + literal[int] , identifier[b] + identifier[n] - literal[int] )/ identifier[beta] ( identifier[a] , identifier[b] )*( literal[int] -( identifier[alpha] /( identifier[alpha] + identifier[t] ))** identifier[r] * identifier[finite_sum] )
keyword[else] :
identifier[second_term] = literal[int]
keyword[return] identifier[first_term] + identifier[second_term] | def probability_of_n_purchases_up_to_time(self, t, n):
"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \\text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
"""
(r, alpha, a, b) = self._unload_params('r', 'alpha', 'a', 'b')
first_term = beta(a, b + n) / beta(a, b) * gamma(r + n) / gamma(r) / gamma(n + 1) * (alpha / (alpha + t)) ** r * (t / (alpha + t)) ** n
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) * (t / (alpha + t)) ** j).sum()
second_term = beta(a + 1, b + n - 1) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum) # depends on [control=['if'], data=['n']]
else:
second_term = 0
return first_term + second_term |
def prepare_site_db_and_overrides():
'''Prepare overrides and create _SITE_DB
_SITE_DB.keys() need to be ready for filter_translations
'''
_SITE_DB.clear()
_SITE_DB[_MAIN_LANG] = _MAIN_SITEURL
# make sure it works for both root-relative and absolute
main_siteurl = '/' if _MAIN_SITEURL == '' else _MAIN_SITEURL
for lang, overrides in _SUBSITE_QUEUE.items():
if 'SITEURL' not in overrides:
overrides['SITEURL'] = posixpath.join(main_siteurl, lang)
_SITE_DB[lang] = overrides['SITEURL']
# default subsite hierarchy
if 'OUTPUT_PATH' not in overrides:
overrides['OUTPUT_PATH'] = os.path.join(
_MAIN_SETTINGS['OUTPUT_PATH'], lang)
if 'CACHE_PATH' not in overrides:
overrides['CACHE_PATH'] = os.path.join(
_MAIN_SETTINGS['CACHE_PATH'], lang)
if 'STATIC_PATHS' not in overrides:
overrides['STATIC_PATHS'] = []
if ('THEME' not in overrides and 'THEME_STATIC_DIR' not in overrides and
'THEME_STATIC_PATHS' not in overrides):
relpath = relpath_to_site(lang, _MAIN_LANG)
overrides['THEME_STATIC_DIR'] = posixpath.join(
relpath, _MAIN_SETTINGS['THEME_STATIC_DIR'])
overrides['THEME_STATIC_PATHS'] = []
# to change what is perceived as translations
overrides['DEFAULT_LANG'] = lang | def function[prepare_site_db_and_overrides, parameter[]]:
constant[Prepare overrides and create _SITE_DB
_SITE_DB.keys() need to be ready for filter_translations
]
call[name[_SITE_DB].clear, parameter[]]
call[name[_SITE_DB]][name[_MAIN_LANG]] assign[=] name[_MAIN_SITEURL]
variable[main_siteurl] assign[=] <ast.IfExp object at 0x7da1b1d22890>
for taget[tuple[[<ast.Name object at 0x7da1b1d232b0>, <ast.Name object at 0x7da1b1d23910>]]] in starred[call[name[_SUBSITE_QUEUE].items, parameter[]]] begin[:]
if compare[constant[SITEURL] <ast.NotIn object at 0x7da2590d7190> name[overrides]] begin[:]
call[name[overrides]][constant[SITEURL]] assign[=] call[name[posixpath].join, parameter[name[main_siteurl], name[lang]]]
call[name[_SITE_DB]][name[lang]] assign[=] call[name[overrides]][constant[SITEURL]]
if compare[constant[OUTPUT_PATH] <ast.NotIn object at 0x7da2590d7190> name[overrides]] begin[:]
call[name[overrides]][constant[OUTPUT_PATH]] assign[=] call[name[os].path.join, parameter[call[name[_MAIN_SETTINGS]][constant[OUTPUT_PATH]], name[lang]]]
if compare[constant[CACHE_PATH] <ast.NotIn object at 0x7da2590d7190> name[overrides]] begin[:]
call[name[overrides]][constant[CACHE_PATH]] assign[=] call[name[os].path.join, parameter[call[name[_MAIN_SETTINGS]][constant[CACHE_PATH]], name[lang]]]
if compare[constant[STATIC_PATHS] <ast.NotIn object at 0x7da2590d7190> name[overrides]] begin[:]
call[name[overrides]][constant[STATIC_PATHS]] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b1d06f20> begin[:]
variable[relpath] assign[=] call[name[relpath_to_site], parameter[name[lang], name[_MAIN_LANG]]]
call[name[overrides]][constant[THEME_STATIC_DIR]] assign[=] call[name[posixpath].join, parameter[name[relpath], call[name[_MAIN_SETTINGS]][constant[THEME_STATIC_DIR]]]]
call[name[overrides]][constant[THEME_STATIC_PATHS]] assign[=] list[[]]
call[name[overrides]][constant[DEFAULT_LANG]] assign[=] name[lang] | keyword[def] identifier[prepare_site_db_and_overrides] ():
literal[string]
identifier[_SITE_DB] . identifier[clear] ()
identifier[_SITE_DB] [ identifier[_MAIN_LANG] ]= identifier[_MAIN_SITEURL]
identifier[main_siteurl] = literal[string] keyword[if] identifier[_MAIN_SITEURL] == literal[string] keyword[else] identifier[_MAIN_SITEURL]
keyword[for] identifier[lang] , identifier[overrides] keyword[in] identifier[_SUBSITE_QUEUE] . identifier[items] ():
keyword[if] literal[string] keyword[not] keyword[in] identifier[overrides] :
identifier[overrides] [ literal[string] ]= identifier[posixpath] . identifier[join] ( identifier[main_siteurl] , identifier[lang] )
identifier[_SITE_DB] [ identifier[lang] ]= identifier[overrides] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[overrides] :
identifier[overrides] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] (
identifier[_MAIN_SETTINGS] [ literal[string] ], identifier[lang] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[overrides] :
identifier[overrides] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] (
identifier[_MAIN_SETTINGS] [ literal[string] ], identifier[lang] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[overrides] :
identifier[overrides] [ literal[string] ]=[]
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[overrides] keyword[and] literal[string] keyword[not] keyword[in] identifier[overrides] keyword[and]
literal[string] keyword[not] keyword[in] identifier[overrides] ):
identifier[relpath] = identifier[relpath_to_site] ( identifier[lang] , identifier[_MAIN_LANG] )
identifier[overrides] [ literal[string] ]= identifier[posixpath] . identifier[join] (
identifier[relpath] , identifier[_MAIN_SETTINGS] [ literal[string] ])
identifier[overrides] [ literal[string] ]=[]
identifier[overrides] [ literal[string] ]= identifier[lang] | def prepare_site_db_and_overrides():
"""Prepare overrides and create _SITE_DB
_SITE_DB.keys() need to be ready for filter_translations
"""
_SITE_DB.clear()
_SITE_DB[_MAIN_LANG] = _MAIN_SITEURL
# make sure it works for both root-relative and absolute
main_siteurl = '/' if _MAIN_SITEURL == '' else _MAIN_SITEURL
for (lang, overrides) in _SUBSITE_QUEUE.items():
if 'SITEURL' not in overrides:
overrides['SITEURL'] = posixpath.join(main_siteurl, lang) # depends on [control=['if'], data=['overrides']]
_SITE_DB[lang] = overrides['SITEURL']
# default subsite hierarchy
if 'OUTPUT_PATH' not in overrides:
overrides['OUTPUT_PATH'] = os.path.join(_MAIN_SETTINGS['OUTPUT_PATH'], lang) # depends on [control=['if'], data=['overrides']]
if 'CACHE_PATH' not in overrides:
overrides['CACHE_PATH'] = os.path.join(_MAIN_SETTINGS['CACHE_PATH'], lang) # depends on [control=['if'], data=['overrides']]
if 'STATIC_PATHS' not in overrides:
overrides['STATIC_PATHS'] = [] # depends on [control=['if'], data=['overrides']]
if 'THEME' not in overrides and 'THEME_STATIC_DIR' not in overrides and ('THEME_STATIC_PATHS' not in overrides):
relpath = relpath_to_site(lang, _MAIN_LANG)
overrides['THEME_STATIC_DIR'] = posixpath.join(relpath, _MAIN_SETTINGS['THEME_STATIC_DIR'])
overrides['THEME_STATIC_PATHS'] = [] # depends on [control=['if'], data=[]]
# to change what is perceived as translations
overrides['DEFAULT_LANG'] = lang # depends on [control=['for'], data=[]] |
def excepthook(type, value, traceback): # pylint: disable=unused-argument
"""Log exceptions instead of printing a traceback to stderr."""
try:
six.reraise(type, value, traceback)
except type:
_LOGGER.exception(str(value))
if isinstance(value, KeyboardInterrupt):
message = "Cancelling at the user's request."
else:
message = handle_unexpected_exception(value)
print(message, file=sys.stderr) | def function[excepthook, parameter[type, value, traceback]]:
constant[Log exceptions instead of printing a traceback to stderr.]
<ast.Try object at 0x7da20c76ff40>
if call[name[isinstance], parameter[name[value], name[KeyboardInterrupt]]] begin[:]
variable[message] assign[=] constant[Cancelling at the user's request.]
call[name[print], parameter[name[message]]] | keyword[def] identifier[excepthook] ( identifier[type] , identifier[value] , identifier[traceback] ):
literal[string]
keyword[try] :
identifier[six] . identifier[reraise] ( identifier[type] , identifier[value] , identifier[traceback] )
keyword[except] identifier[type] :
identifier[_LOGGER] . identifier[exception] ( identifier[str] ( identifier[value] ))
keyword[if] identifier[isinstance] ( identifier[value] , identifier[KeyboardInterrupt] ):
identifier[message] = literal[string]
keyword[else] :
identifier[message] = identifier[handle_unexpected_exception] ( identifier[value] )
identifier[print] ( identifier[message] , identifier[file] = identifier[sys] . identifier[stderr] ) | def excepthook(type, value, traceback): # pylint: disable=unused-argument
'Log exceptions instead of printing a traceback to stderr.'
try:
six.reraise(type, value, traceback) # depends on [control=['try'], data=[]]
except type:
_LOGGER.exception(str(value)) # depends on [control=['except'], data=[]]
if isinstance(value, KeyboardInterrupt):
message = "Cancelling at the user's request." # depends on [control=['if'], data=[]]
else:
message = handle_unexpected_exception(value)
print(message, file=sys.stderr) |
def start(self, proxy=None, cookie_db=None, disk_cache_dir=None,
disk_cache_size=None):
'''
Starts chrome/chromium process.
Args:
proxy: http proxy 'host:port' (default None)
cookie_db: raw bytes of chrome/chromium sqlite3 cookies database,
which, if supplied, will be written to
{chrome_user_data_dir}/Default/Cookies before running the
browser (default None)
disk_cache_dir: use directory for disk cache. The default location
is inside `self._home_tmpdir` (default None).
disk_cache_size: Forces the maximum disk space to be used by the disk
cache, in bytes. (default None)
Returns:
websocket url to chrome window with about:blank loaded
'''
# these can raise exceptions
self._home_tmpdir = tempfile.TemporaryDirectory()
self._chrome_user_data_dir = os.path.join(
self._home_tmpdir.name, 'chrome-user-data')
if cookie_db:
self._init_cookie_db(cookie_db)
self._shutdown.clear()
new_env = os.environ.copy()
new_env['HOME'] = self._home_tmpdir.name
chrome_args = [
self.chrome_exe,
'--remote-debugging-port=%s' % self.port,
'--use-mock-keychain', # mac thing
'--user-data-dir=%s' % self._chrome_user_data_dir,
'--disable-background-networking',
'--disable-renderer-backgrounding', '--disable-hang-monitor',
'--disable-background-timer-throttling', '--mute-audio',
'--disable-web-sockets',
'--window-size=1100,900', '--no-default-browser-check',
'--disable-first-run-ui', '--no-first-run',
'--homepage=about:blank', '--disable-direct-npapi-requests',
'--disable-web-security', '--disable-notifications',
'--disable-extensions', '--disable-save-password-bubble']
if disk_cache_dir:
chrome_args.append('--disk-cache-dir=%s' % disk_cache_dir)
if disk_cache_size:
chrome_args.append('--disk-cache-size=%s' % disk_cache_size)
if self.ignore_cert_errors:
chrome_args.append('--ignore-certificate-errors')
if proxy:
chrome_args.append('--proxy-server=%s' % proxy)
chrome_args.append('about:blank')
self.logger.info('running: %r', subprocess.list2cmdline(chrome_args))
# start_new_session - new process group so we can kill the whole group
self.chrome_process = subprocess.Popen(
chrome_args, env=new_env, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
self._out_reader_thread = threading.Thread(
target=self._read_stderr_stdout,
name='ChromeOutReaderThread:%s' % self.port, daemon=True)
self._out_reader_thread.start()
self.logger.info('chrome running, pid %s' % self.chrome_process.pid)
return self._websocket_url() | def function[start, parameter[self, proxy, cookie_db, disk_cache_dir, disk_cache_size]]:
constant[
Starts chrome/chromium process.
Args:
proxy: http proxy 'host:port' (default None)
cookie_db: raw bytes of chrome/chromium sqlite3 cookies database,
which, if supplied, will be written to
{chrome_user_data_dir}/Default/Cookies before running the
browser (default None)
disk_cache_dir: use directory for disk cache. The default location
is inside `self._home_tmpdir` (default None).
disk_cache_size: Forces the maximum disk space to be used by the disk
cache, in bytes. (default None)
Returns:
websocket url to chrome window with about:blank loaded
]
name[self]._home_tmpdir assign[=] call[name[tempfile].TemporaryDirectory, parameter[]]
name[self]._chrome_user_data_dir assign[=] call[name[os].path.join, parameter[name[self]._home_tmpdir.name, constant[chrome-user-data]]]
if name[cookie_db] begin[:]
call[name[self]._init_cookie_db, parameter[name[cookie_db]]]
call[name[self]._shutdown.clear, parameter[]]
variable[new_env] assign[=] call[name[os].environ.copy, parameter[]]
call[name[new_env]][constant[HOME]] assign[=] name[self]._home_tmpdir.name
variable[chrome_args] assign[=] list[[<ast.Attribute object at 0x7da1b1ea32e0>, <ast.BinOp object at 0x7da1b1ea2bf0>, <ast.Constant object at 0x7da1b1ea3a00>, <ast.BinOp object at 0x7da1b1ea26b0>, <ast.Constant object at 0x7da1b1ea0850>, <ast.Constant object at 0x7da1b1ea2e30>, <ast.Constant object at 0x7da1b1ea0940>, <ast.Constant object at 0x7da1b1ea0280>, <ast.Constant object at 0x7da1b1ea07f0>, <ast.Constant object at 0x7da1b1ea3160>, <ast.Constant object at 0x7da1b1ea2c20>, <ast.Constant object at 0x7da1b1ea27a0>, <ast.Constant object at 0x7da1b1ea2e00>, <ast.Constant object at 0x7da1b1ea22f0>, <ast.Constant object at 0x7da1b1ea0190>, <ast.Constant object at 0x7da1b1ea1270>, <ast.Constant object at 0x7da1b1ea19c0>, <ast.Constant object at 0x7da1b1ea1630>, <ast.Constant object at 0x7da1b1ea3e50>, <ast.Constant object at 0x7da1b1ea09a0>]]
if name[disk_cache_dir] begin[:]
call[name[chrome_args].append, parameter[binary_operation[constant[--disk-cache-dir=%s] <ast.Mod object at 0x7da2590d6920> name[disk_cache_dir]]]]
if name[disk_cache_size] begin[:]
call[name[chrome_args].append, parameter[binary_operation[constant[--disk-cache-size=%s] <ast.Mod object at 0x7da2590d6920> name[disk_cache_size]]]]
if name[self].ignore_cert_errors begin[:]
call[name[chrome_args].append, parameter[constant[--ignore-certificate-errors]]]
if name[proxy] begin[:]
call[name[chrome_args].append, parameter[binary_operation[constant[--proxy-server=%s] <ast.Mod object at 0x7da2590d6920> name[proxy]]]]
call[name[chrome_args].append, parameter[constant[about:blank]]]
call[name[self].logger.info, parameter[constant[running: %r], call[name[subprocess].list2cmdline, parameter[name[chrome_args]]]]]
name[self].chrome_process assign[=] call[name[subprocess].Popen, parameter[name[chrome_args]]]
name[self]._out_reader_thread assign[=] call[name[threading].Thread, parameter[]]
call[name[self]._out_reader_thread.start, parameter[]]
call[name[self].logger.info, parameter[binary_operation[constant[chrome running, pid %s] <ast.Mod object at 0x7da2590d6920> name[self].chrome_process.pid]]]
return[call[name[self]._websocket_url, parameter[]]] | keyword[def] identifier[start] ( identifier[self] , identifier[proxy] = keyword[None] , identifier[cookie_db] = keyword[None] , identifier[disk_cache_dir] = keyword[None] ,
identifier[disk_cache_size] = keyword[None] ):
literal[string]
identifier[self] . identifier[_home_tmpdir] = identifier[tempfile] . identifier[TemporaryDirectory] ()
identifier[self] . identifier[_chrome_user_data_dir] = identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[_home_tmpdir] . identifier[name] , literal[string] )
keyword[if] identifier[cookie_db] :
identifier[self] . identifier[_init_cookie_db] ( identifier[cookie_db] )
identifier[self] . identifier[_shutdown] . identifier[clear] ()
identifier[new_env] = identifier[os] . identifier[environ] . identifier[copy] ()
identifier[new_env] [ literal[string] ]= identifier[self] . identifier[_home_tmpdir] . identifier[name]
identifier[chrome_args] =[
identifier[self] . identifier[chrome_exe] ,
literal[string] % identifier[self] . identifier[port] ,
literal[string] ,
literal[string] % identifier[self] . identifier[_chrome_user_data_dir] ,
literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ]
keyword[if] identifier[disk_cache_dir] :
identifier[chrome_args] . identifier[append] ( literal[string] % identifier[disk_cache_dir] )
keyword[if] identifier[disk_cache_size] :
identifier[chrome_args] . identifier[append] ( literal[string] % identifier[disk_cache_size] )
keyword[if] identifier[self] . identifier[ignore_cert_errors] :
identifier[chrome_args] . identifier[append] ( literal[string] )
keyword[if] identifier[proxy] :
identifier[chrome_args] . identifier[append] ( literal[string] % identifier[proxy] )
identifier[chrome_args] . identifier[append] ( literal[string] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[subprocess] . identifier[list2cmdline] ( identifier[chrome_args] ))
identifier[self] . identifier[chrome_process] = identifier[subprocess] . identifier[Popen] (
identifier[chrome_args] , identifier[env] = identifier[new_env] , identifier[start_new_session] = keyword[True] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] , identifier[bufsize] = literal[int] )
identifier[self] . identifier[_out_reader_thread] = identifier[threading] . identifier[Thread] (
identifier[target] = identifier[self] . identifier[_read_stderr_stdout] ,
identifier[name] = literal[string] % identifier[self] . identifier[port] , identifier[daemon] = keyword[True] )
identifier[self] . identifier[_out_reader_thread] . identifier[start] ()
identifier[self] . identifier[logger] . identifier[info] ( literal[string] % identifier[self] . identifier[chrome_process] . identifier[pid] )
keyword[return] identifier[self] . identifier[_websocket_url] () | def start(self, proxy=None, cookie_db=None, disk_cache_dir=None, disk_cache_size=None):
"""
Starts chrome/chromium process.
Args:
proxy: http proxy 'host:port' (default None)
cookie_db: raw bytes of chrome/chromium sqlite3 cookies database,
which, if supplied, will be written to
{chrome_user_data_dir}/Default/Cookies before running the
browser (default None)
disk_cache_dir: use directory for disk cache. The default location
is inside `self._home_tmpdir` (default None).
disk_cache_size: Forces the maximum disk space to be used by the disk
cache, in bytes. (default None)
Returns:
websocket url to chrome window with about:blank loaded
"""
# these can raise exceptions
self._home_tmpdir = tempfile.TemporaryDirectory()
self._chrome_user_data_dir = os.path.join(self._home_tmpdir.name, 'chrome-user-data')
if cookie_db:
self._init_cookie_db(cookie_db) # depends on [control=['if'], data=[]]
self._shutdown.clear()
new_env = os.environ.copy()
new_env['HOME'] = self._home_tmpdir.name # mac thing
chrome_args = [self.chrome_exe, '--remote-debugging-port=%s' % self.port, '--use-mock-keychain', '--user-data-dir=%s' % self._chrome_user_data_dir, '--disable-background-networking', '--disable-renderer-backgrounding', '--disable-hang-monitor', '--disable-background-timer-throttling', '--mute-audio', '--disable-web-sockets', '--window-size=1100,900', '--no-default-browser-check', '--disable-first-run-ui', '--no-first-run', '--homepage=about:blank', '--disable-direct-npapi-requests', '--disable-web-security', '--disable-notifications', '--disable-extensions', '--disable-save-password-bubble']
if disk_cache_dir:
chrome_args.append('--disk-cache-dir=%s' % disk_cache_dir) # depends on [control=['if'], data=[]]
if disk_cache_size:
chrome_args.append('--disk-cache-size=%s' % disk_cache_size) # depends on [control=['if'], data=[]]
if self.ignore_cert_errors:
chrome_args.append('--ignore-certificate-errors') # depends on [control=['if'], data=[]]
if proxy:
chrome_args.append('--proxy-server=%s' % proxy) # depends on [control=['if'], data=[]]
chrome_args.append('about:blank')
self.logger.info('running: %r', subprocess.list2cmdline(chrome_args))
# start_new_session - new process group so we can kill the whole group
self.chrome_process = subprocess.Popen(chrome_args, env=new_env, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
self._out_reader_thread = threading.Thread(target=self._read_stderr_stdout, name='ChromeOutReaderThread:%s' % self.port, daemon=True)
self._out_reader_thread.start()
self.logger.info('chrome running, pid %s' % self.chrome_process.pid)
return self._websocket_url() |
def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path:
"""
Ensures a path is parsed.
:param path: A path string or Path object.
:return: The path as a Path object.
"""
if isinstance(path, str):
return pathlib.Path(path)
return path | def function[_normalise_path, parameter[path]]:
constant[
Ensures a path is parsed.
:param path: A path string or Path object.
:return: The path as a Path object.
]
if call[name[isinstance], parameter[name[path], name[str]]] begin[:]
return[call[name[pathlib].Path, parameter[name[path]]]]
return[name[path]] | keyword[def] identifier[_normalise_path] ( identifier[path] : identifier[Union] [ identifier[str] , identifier[pathlib] . identifier[Path] ])-> identifier[pathlib] . identifier[Path] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[path] , identifier[str] ):
keyword[return] identifier[pathlib] . identifier[Path] ( identifier[path] )
keyword[return] identifier[path] | def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path:
"""
Ensures a path is parsed.
:param path: A path string or Path object.
:return: The path as a Path object.
"""
if isinstance(path, str):
return pathlib.Path(path) # depends on [control=['if'], data=[]]
return path |
def get_output_error(cmd):
"""Return the exit status, stdout, stderr of a command"""
if not isinstance(cmd, list):
cmd = [cmd]
logging.debug("Running: %s", ' '.join(map(quote, cmd)))
try:
result = Popen(cmd, stdout=PIPE, stderr=PIPE)
except IOError as e:
return -1, u(''), u('Failed to run %r: %r' % (cmd, e))
so, se = result.communicate()
# unicode:
so = so.decode('utf8', 'replace')
se = se.decode('utf8', 'replace')
return result.returncode, so, se | def function[get_output_error, parameter[cmd]]:
constant[Return the exit status, stdout, stderr of a command]
if <ast.UnaryOp object at 0x7da20c993700> begin[:]
variable[cmd] assign[=] list[[<ast.Name object at 0x7da20c993880>]]
call[name[logging].debug, parameter[constant[Running: %s], call[constant[ ].join, parameter[call[name[map], parameter[name[quote], name[cmd]]]]]]]
<ast.Try object at 0x7da20c992c50>
<ast.Tuple object at 0x7da20c990a90> assign[=] call[name[result].communicate, parameter[]]
variable[so] assign[=] call[name[so].decode, parameter[constant[utf8], constant[replace]]]
variable[se] assign[=] call[name[se].decode, parameter[constant[utf8], constant[replace]]]
return[tuple[[<ast.Attribute object at 0x7da20c9910c0>, <ast.Name object at 0x7da20c992ec0>, <ast.Name object at 0x7da20c993cd0>]]] | keyword[def] identifier[get_output_error] ( identifier[cmd] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[cmd] , identifier[list] ):
identifier[cmd] =[ identifier[cmd] ]
identifier[logging] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[map] ( identifier[quote] , identifier[cmd] )))
keyword[try] :
identifier[result] = identifier[Popen] ( identifier[cmd] , identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[return] - literal[int] , identifier[u] ( literal[string] ), identifier[u] ( literal[string] %( identifier[cmd] , identifier[e] ))
identifier[so] , identifier[se] = identifier[result] . identifier[communicate] ()
identifier[so] = identifier[so] . identifier[decode] ( literal[string] , literal[string] )
identifier[se] = identifier[se] . identifier[decode] ( literal[string] , literal[string] )
keyword[return] identifier[result] . identifier[returncode] , identifier[so] , identifier[se] | def get_output_error(cmd):
"""Return the exit status, stdout, stderr of a command"""
if not isinstance(cmd, list):
cmd = [cmd] # depends on [control=['if'], data=[]]
logging.debug('Running: %s', ' '.join(map(quote, cmd)))
try:
result = Popen(cmd, stdout=PIPE, stderr=PIPE) # depends on [control=['try'], data=[]]
except IOError as e:
return (-1, u(''), u('Failed to run %r: %r' % (cmd, e))) # depends on [control=['except'], data=['e']]
(so, se) = result.communicate()
# unicode:
so = so.decode('utf8', 'replace')
se = se.decode('utf8', 'replace')
return (result.returncode, so, se) |
def addvPPfunc(self,solution):
'''
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
'''
vPPfuncNow = MargMargValueFunc2D(solution.cFunc,self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution | def function[addvPPfunc, parameter[self, solution]]:
constant[
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
]
variable[vPPfuncNow] assign[=] call[name[MargMargValueFunc2D], parameter[name[solution].cFunc, name[self].CRRA]]
name[solution].vPPfunc assign[=] name[vPPfuncNow]
return[name[solution]] | keyword[def] identifier[addvPPfunc] ( identifier[self] , identifier[solution] ):
literal[string]
identifier[vPPfuncNow] = identifier[MargMargValueFunc2D] ( identifier[solution] . identifier[cFunc] , identifier[self] . identifier[CRRA] )
identifier[solution] . identifier[vPPfunc] = identifier[vPPfuncNow]
keyword[return] identifier[solution] | def addvPPfunc(self, solution):
"""
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
"""
vPPfuncNow = MargMargValueFunc2D(solution.cFunc, self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution |
def bsinPoints(pb, pe):
"""Return Bezier control points, when pb and pe stand for a full period
from (0,0) to (2*pi, 0), respectively, in the user's coordinate system.
The returned points can be used to draw up to four Bezier curves for
the complete phase of the sine function graph (0 to 360 degrees).
"""
v = pe - pb
assert v.y == 0, "begin and end points must have same y coordinate"
f = abs(v) * 0.5 / math.pi # represents the unit
cp1 = 5.34295228e-01
cp2 = 1.01474288e+00
y_ampl = (0, f)
y_cp1 = (0, f * cp1)
y_cp2 = (0, f * cp2)
p0 = pb
p4 = pe
p1 = pb + v * 0.25 - y_ampl
p2 = pb + v * 0.5
p3 = pb + v * 0.75 + y_ampl
k1 = pb + v * (1./12.) - y_cp1
k2 = pb + v * (2./12.) - y_cp2
k3 = pb + v * (4./12.) - y_cp2
k4 = pb + v * (5./12.) - y_cp1
k5 = pb + v * (7./12.) + y_cp1
k6 = pb + v * (8./12.) + y_cp2
k7 = pb + v * (10./12.) + y_cp2
k8 = pb + v * (11./12.) + y_cp1
return p0, k1, k2, p1, k3, k4, p2, k5, k6, p3, k7, k8, p4 | def function[bsinPoints, parameter[pb, pe]]:
constant[Return Bezier control points, when pb and pe stand for a full period
from (0,0) to (2*pi, 0), respectively, in the user's coordinate system.
The returned points can be used to draw up to four Bezier curves for
the complete phase of the sine function graph (0 to 360 degrees).
]
variable[v] assign[=] binary_operation[name[pe] - name[pb]]
assert[compare[name[v].y equal[==] constant[0]]]
variable[f] assign[=] binary_operation[binary_operation[call[name[abs], parameter[name[v]]] * constant[0.5]] / name[math].pi]
variable[cp1] assign[=] constant[0.534295228]
variable[cp2] assign[=] constant[1.01474288]
variable[y_ampl] assign[=] tuple[[<ast.Constant object at 0x7da1b167db40>, <ast.Name object at 0x7da1b167dc60>]]
variable[y_cp1] assign[=] tuple[[<ast.Constant object at 0x7da1b167dcc0>, <ast.BinOp object at 0x7da1b167e080>]]
variable[y_cp2] assign[=] tuple[[<ast.Constant object at 0x7da1b167f9d0>, <ast.BinOp object at 0x7da1b167e020>]]
variable[p0] assign[=] name[pb]
variable[p4] assign[=] name[pe]
variable[p1] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * constant[0.25]]] - name[y_ampl]]
variable[p2] assign[=] binary_operation[name[pb] + binary_operation[name[v] * constant[0.5]]]
variable[p3] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * constant[0.75]]] + name[y_ampl]]
variable[k1] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[1.0] / constant[12.0]]]] - name[y_cp1]]
variable[k2] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[2.0] / constant[12.0]]]] - name[y_cp2]]
variable[k3] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[4.0] / constant[12.0]]]] - name[y_cp2]]
variable[k4] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[5.0] / constant[12.0]]]] - name[y_cp1]]
variable[k5] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[7.0] / constant[12.0]]]] + name[y_cp1]]
variable[k6] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[8.0] / constant[12.0]]]] + name[y_cp2]]
variable[k7] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[10.0] / constant[12.0]]]] + name[y_cp2]]
variable[k8] assign[=] binary_operation[binary_operation[name[pb] + binary_operation[name[v] * binary_operation[constant[11.0] / constant[12.0]]]] + name[y_cp1]]
return[tuple[[<ast.Name object at 0x7da1b18c12d0>, <ast.Name object at 0x7da1b18c1360>, <ast.Name object at 0x7da1b18c1390>, <ast.Name object at 0x7da1b18c08e0>, <ast.Name object at 0x7da1b18c0910>, <ast.Name object at 0x7da1b18c0940>, <ast.Name object at 0x7da1b18c0970>, <ast.Name object at 0x7da1b18c09a0>, <ast.Name object at 0x7da1b18c09d0>, <ast.Name object at 0x7da1b18c0a00>, <ast.Name object at 0x7da1b18c0a30>, <ast.Name object at 0x7da1b18c0a60>, <ast.Name object at 0x7da1b18c0a90>]]] | keyword[def] identifier[bsinPoints] ( identifier[pb] , identifier[pe] ):
literal[string]
identifier[v] = identifier[pe] - identifier[pb]
keyword[assert] identifier[v] . identifier[y] == literal[int] , literal[string]
identifier[f] = identifier[abs] ( identifier[v] )* literal[int] / identifier[math] . identifier[pi]
identifier[cp1] = literal[int]
identifier[cp2] = literal[int]
identifier[y_ampl] =( literal[int] , identifier[f] )
identifier[y_cp1] =( literal[int] , identifier[f] * identifier[cp1] )
identifier[y_cp2] =( literal[int] , identifier[f] * identifier[cp2] )
identifier[p0] = identifier[pb]
identifier[p4] = identifier[pe]
identifier[p1] = identifier[pb] + identifier[v] * literal[int] - identifier[y_ampl]
identifier[p2] = identifier[pb] + identifier[v] * literal[int]
identifier[p3] = identifier[pb] + identifier[v] * literal[int] + identifier[y_ampl]
identifier[k1] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )- identifier[y_cp1]
identifier[k2] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )- identifier[y_cp2]
identifier[k3] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )- identifier[y_cp2]
identifier[k4] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )- identifier[y_cp1]
identifier[k5] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )+ identifier[y_cp1]
identifier[k6] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )+ identifier[y_cp2]
identifier[k7] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )+ identifier[y_cp2]
identifier[k8] = identifier[pb] + identifier[v] *( literal[int] / literal[int] )+ identifier[y_cp1]
keyword[return] identifier[p0] , identifier[k1] , identifier[k2] , identifier[p1] , identifier[k3] , identifier[k4] , identifier[p2] , identifier[k5] , identifier[k6] , identifier[p3] , identifier[k7] , identifier[k8] , identifier[p4] | def bsinPoints(pb, pe):
"""Return Bezier control points, when pb and pe stand for a full period
from (0,0) to (2*pi, 0), respectively, in the user's coordinate system.
The returned points can be used to draw up to four Bezier curves for
the complete phase of the sine function graph (0 to 360 degrees).
"""
v = pe - pb
assert v.y == 0, 'begin and end points must have same y coordinate'
f = abs(v) * 0.5 / math.pi # represents the unit
cp1 = 0.534295228
cp2 = 1.01474288
y_ampl = (0, f)
y_cp1 = (0, f * cp1)
y_cp2 = (0, f * cp2)
p0 = pb
p4 = pe
p1 = pb + v * 0.25 - y_ampl
p2 = pb + v * 0.5
p3 = pb + v * 0.75 + y_ampl
k1 = pb + v * (1.0 / 12.0) - y_cp1
k2 = pb + v * (2.0 / 12.0) - y_cp2
k3 = pb + v * (4.0 / 12.0) - y_cp2
k4 = pb + v * (5.0 / 12.0) - y_cp1
k5 = pb + v * (7.0 / 12.0) + y_cp1
k6 = pb + v * (8.0 / 12.0) + y_cp2
k7 = pb + v * (10.0 / 12.0) + y_cp2
k8 = pb + v * (11.0 / 12.0) + y_cp1
return (p0, k1, k2, p1, k3, k4, p2, k5, k6, p3, k7, k8, p4) |
def get_pkglist():
"""
Return list of all installed packages
Note: It returns one project name per pkg no matter how many versions
of a particular package is installed
@returns: list of project name strings for every installed pkg
"""
dists = Distributions()
projects = []
for (dist, _active) in dists.get_distributions("all"):
if dist.project_name not in projects:
projects.append(dist.project_name)
return projects | def function[get_pkglist, parameter[]]:
constant[
Return list of all installed packages
Note: It returns one project name per pkg no matter how many versions
of a particular package is installed
@returns: list of project name strings for every installed pkg
]
variable[dists] assign[=] call[name[Distributions], parameter[]]
variable[projects] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c76c7c0>, <ast.Name object at 0x7da20c76f9a0>]]] in starred[call[name[dists].get_distributions, parameter[constant[all]]]] begin[:]
if compare[name[dist].project_name <ast.NotIn object at 0x7da2590d7190> name[projects]] begin[:]
call[name[projects].append, parameter[name[dist].project_name]]
return[name[projects]] | keyword[def] identifier[get_pkglist] ():
literal[string]
identifier[dists] = identifier[Distributions] ()
identifier[projects] =[]
keyword[for] ( identifier[dist] , identifier[_active] ) keyword[in] identifier[dists] . identifier[get_distributions] ( literal[string] ):
keyword[if] identifier[dist] . identifier[project_name] keyword[not] keyword[in] identifier[projects] :
identifier[projects] . identifier[append] ( identifier[dist] . identifier[project_name] )
keyword[return] identifier[projects] | def get_pkglist():
"""
Return list of all installed packages
Note: It returns one project name per pkg no matter how many versions
of a particular package is installed
@returns: list of project name strings for every installed pkg
"""
dists = Distributions()
projects = []
for (dist, _active) in dists.get_distributions('all'):
if dist.project_name not in projects:
projects.append(dist.project_name) # depends on [control=['if'], data=['projects']] # depends on [control=['for'], data=[]]
return projects |
def gen_mod(src1, src2, dst):
"""Return a MOD instruction.
"""
assert src1.size == src2.size
return ReilBuilder.build(ReilMnemonic.MOD, src1, src2, dst) | def function[gen_mod, parameter[src1, src2, dst]]:
constant[Return a MOD instruction.
]
assert[compare[name[src1].size equal[==] name[src2].size]]
return[call[name[ReilBuilder].build, parameter[name[ReilMnemonic].MOD, name[src1], name[src2], name[dst]]]] | keyword[def] identifier[gen_mod] ( identifier[src1] , identifier[src2] , identifier[dst] ):
literal[string]
keyword[assert] identifier[src1] . identifier[size] == identifier[src2] . identifier[size]
keyword[return] identifier[ReilBuilder] . identifier[build] ( identifier[ReilMnemonic] . identifier[MOD] , identifier[src1] , identifier[src2] , identifier[dst] ) | def gen_mod(src1, src2, dst):
"""Return a MOD instruction.
"""
assert src1.size == src2.size
return ReilBuilder.build(ReilMnemonic.MOD, src1, src2, dst) |
def _le_from_gt(self, other):
"""Return a <= b. Computed by @total_ordering from (not a > b)."""
op_result = self.__gt__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result | def function[_le_from_gt, parameter[self, other]]:
constant[Return a <= b. Computed by @total_ordering from (not a > b).]
variable[op_result] assign[=] call[name[self].__gt__, parameter[name[other]]]
if compare[name[op_result] is name[NotImplemented]] begin[:]
return[name[NotImplemented]]
return[<ast.UnaryOp object at 0x7da1b2559c60>] | keyword[def] identifier[_le_from_gt] ( identifier[self] , identifier[other] ):
literal[string]
identifier[op_result] = identifier[self] . identifier[__gt__] ( identifier[other] )
keyword[if] identifier[op_result] keyword[is] identifier[NotImplemented] :
keyword[return] identifier[NotImplemented]
keyword[return] keyword[not] identifier[op_result] | def _le_from_gt(self, other):
"""Return a <= b. Computed by @total_ordering from (not a > b)."""
op_result = self.__gt__(other)
if op_result is NotImplemented:
return NotImplemented # depends on [control=['if'], data=['NotImplemented']]
return not op_result |
def get_success_url(self):
"""Get the url depending on what type of configuration I deleted."""
if self.stage_id:
url = reverse('projects_stage_view', args=(self.project_id, self.stage_id))
else:
url = reverse('projects_project_view', args=(self.project_id,))
return url | def function[get_success_url, parameter[self]]:
constant[Get the url depending on what type of configuration I deleted.]
if name[self].stage_id begin[:]
variable[url] assign[=] call[name[reverse], parameter[constant[projects_stage_view]]]
return[name[url]] | keyword[def] identifier[get_success_url] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[stage_id] :
identifier[url] = identifier[reverse] ( literal[string] , identifier[args] =( identifier[self] . identifier[project_id] , identifier[self] . identifier[stage_id] ))
keyword[else] :
identifier[url] = identifier[reverse] ( literal[string] , identifier[args] =( identifier[self] . identifier[project_id] ,))
keyword[return] identifier[url] | def get_success_url(self):
"""Get the url depending on what type of configuration I deleted."""
if self.stage_id:
url = reverse('projects_stage_view', args=(self.project_id, self.stage_id)) # depends on [control=['if'], data=[]]
else:
url = reverse('projects_project_view', args=(self.project_id,))
return url |
def _match_filters(parameter, filters=None):
"""Return True if the given parameter matches all the filters"""
for filter_obj in (filters or []):
key = filter_obj['Key']
option = filter_obj.get('Option', 'Equals')
values = filter_obj.get('Values', [])
what = None
if key == 'Type':
what = parameter.type
elif key == 'KeyId':
what = parameter.keyid
if option == 'Equals'\
and not any(what == value for value in values):
return False
elif option == 'BeginsWith'\
and not any(what.startswith(value) for value in values):
return False
# True if no false match (or no filters at all)
return True | def function[_match_filters, parameter[parameter, filters]]:
constant[Return True if the given parameter matches all the filters]
for taget[name[filter_obj]] in starred[<ast.BoolOp object at 0x7da18fe92980>] begin[:]
variable[key] assign[=] call[name[filter_obj]][constant[Key]]
variable[option] assign[=] call[name[filter_obj].get, parameter[constant[Option], constant[Equals]]]
variable[values] assign[=] call[name[filter_obj].get, parameter[constant[Values], list[[]]]]
variable[what] assign[=] constant[None]
if compare[name[key] equal[==] constant[Type]] begin[:]
variable[what] assign[=] name[parameter].type
if <ast.BoolOp object at 0x7da18fe93c70> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_match_filters] ( identifier[parameter] , identifier[filters] = keyword[None] ):
literal[string]
keyword[for] identifier[filter_obj] keyword[in] ( identifier[filters] keyword[or] []):
identifier[key] = identifier[filter_obj] [ literal[string] ]
identifier[option] = identifier[filter_obj] . identifier[get] ( literal[string] , literal[string] )
identifier[values] = identifier[filter_obj] . identifier[get] ( literal[string] ,[])
identifier[what] = keyword[None]
keyword[if] identifier[key] == literal[string] :
identifier[what] = identifier[parameter] . identifier[type]
keyword[elif] identifier[key] == literal[string] :
identifier[what] = identifier[parameter] . identifier[keyid]
keyword[if] identifier[option] == literal[string] keyword[and] keyword[not] identifier[any] ( identifier[what] == identifier[value] keyword[for] identifier[value] keyword[in] identifier[values] ):
keyword[return] keyword[False]
keyword[elif] identifier[option] == literal[string] keyword[and] keyword[not] identifier[any] ( identifier[what] . identifier[startswith] ( identifier[value] ) keyword[for] identifier[value] keyword[in] identifier[values] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def _match_filters(parameter, filters=None):
"""Return True if the given parameter matches all the filters"""
for filter_obj in filters or []:
key = filter_obj['Key']
option = filter_obj.get('Option', 'Equals')
values = filter_obj.get('Values', [])
what = None
if key == 'Type':
what = parameter.type # depends on [control=['if'], data=[]]
elif key == 'KeyId':
what = parameter.keyid # depends on [control=['if'], data=[]]
if option == 'Equals' and (not any((what == value for value in values))):
return False # depends on [control=['if'], data=[]]
elif option == 'BeginsWith' and (not any((what.startswith(value) for value in values))):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filter_obj']]
# True if no false match (or no filters at all)
return True |
def erase(self, leave_alternate_screen=True, erase_title=True):
"""
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
:param erase_title: When True, clear the title from the title bar.
"""
output = self.output
output.cursor_backward(self._cursor_pos.x)
output.cursor_up(self._cursor_pos.y)
output.erase_down()
output.reset_attributes()
output.enable_autowrap()
output.flush()
# Erase title.
if self._last_title and erase_title:
output.clear_title()
self.reset(leave_alternate_screen=leave_alternate_screen) | def function[erase, parameter[self, leave_alternate_screen, erase_title]]:
constant[
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
:param erase_title: When True, clear the title from the title bar.
]
variable[output] assign[=] name[self].output
call[name[output].cursor_backward, parameter[name[self]._cursor_pos.x]]
call[name[output].cursor_up, parameter[name[self]._cursor_pos.y]]
call[name[output].erase_down, parameter[]]
call[name[output].reset_attributes, parameter[]]
call[name[output].enable_autowrap, parameter[]]
call[name[output].flush, parameter[]]
if <ast.BoolOp object at 0x7da1b08450f0> begin[:]
call[name[output].clear_title, parameter[]]
call[name[self].reset, parameter[]] | keyword[def] identifier[erase] ( identifier[self] , identifier[leave_alternate_screen] = keyword[True] , identifier[erase_title] = keyword[True] ):
literal[string]
identifier[output] = identifier[self] . identifier[output]
identifier[output] . identifier[cursor_backward] ( identifier[self] . identifier[_cursor_pos] . identifier[x] )
identifier[output] . identifier[cursor_up] ( identifier[self] . identifier[_cursor_pos] . identifier[y] )
identifier[output] . identifier[erase_down] ()
identifier[output] . identifier[reset_attributes] ()
identifier[output] . identifier[enable_autowrap] ()
identifier[output] . identifier[flush] ()
keyword[if] identifier[self] . identifier[_last_title] keyword[and] identifier[erase_title] :
identifier[output] . identifier[clear_title] ()
identifier[self] . identifier[reset] ( identifier[leave_alternate_screen] = identifier[leave_alternate_screen] ) | def erase(self, leave_alternate_screen=True, erase_title=True):
"""
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
:param erase_title: When True, clear the title from the title bar.
"""
output = self.output
output.cursor_backward(self._cursor_pos.x)
output.cursor_up(self._cursor_pos.y)
output.erase_down()
output.reset_attributes()
output.enable_autowrap()
output.flush()
# Erase title.
if self._last_title and erase_title:
output.clear_title() # depends on [control=['if'], data=[]]
self.reset(leave_alternate_screen=leave_alternate_screen) |
def logging_levels():
"""
Context manager to conditionally set logging levels.
Supports setting per-request debug logging using the `X-Request-Debug` header.
"""
enabled = strtobool(request.headers.get("x-request-debug", "false"))
level = None
try:
if enabled:
level = getLogger().getEffectiveLevel()
getLogger().setLevel(DEBUG)
yield
finally:
if enabled:
getLogger().setLevel(level) | def function[logging_levels, parameter[]]:
constant[
Context manager to conditionally set logging levels.
Supports setting per-request debug logging using the `X-Request-Debug` header.
]
variable[enabled] assign[=] call[name[strtobool], parameter[call[name[request].headers.get, parameter[constant[x-request-debug], constant[false]]]]]
variable[level] assign[=] constant[None]
<ast.Try object at 0x7da1b0c646d0> | keyword[def] identifier[logging_levels] ():
literal[string]
identifier[enabled] = identifier[strtobool] ( identifier[request] . identifier[headers] . identifier[get] ( literal[string] , literal[string] ))
identifier[level] = keyword[None]
keyword[try] :
keyword[if] identifier[enabled] :
identifier[level] = identifier[getLogger] (). identifier[getEffectiveLevel] ()
identifier[getLogger] (). identifier[setLevel] ( identifier[DEBUG] )
keyword[yield]
keyword[finally] :
keyword[if] identifier[enabled] :
identifier[getLogger] (). identifier[setLevel] ( identifier[level] ) | def logging_levels():
"""
Context manager to conditionally set logging levels.
Supports setting per-request debug logging using the `X-Request-Debug` header.
"""
enabled = strtobool(request.headers.get('x-request-debug', 'false'))
level = None
try:
if enabled:
level = getLogger().getEffectiveLevel()
getLogger().setLevel(DEBUG) # depends on [control=['if'], data=[]]
yield # depends on [control=['try'], data=[]]
finally:
if enabled:
getLogger().setLevel(level) # depends on [control=['if'], data=[]] |
def explicit_path(cls, ndivsm, kpath_bounds):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, kpath_bounds=kpath_bounds, comment="Explicit K-path") | def function[explicit_path, parameter[cls, ndivsm, kpath_bounds]]:
constant[See _path for the meaning of the variables]
return[call[name[cls]._path, parameter[name[ndivsm]]]] | keyword[def] identifier[explicit_path] ( identifier[cls] , identifier[ndivsm] , identifier[kpath_bounds] ):
literal[string]
keyword[return] identifier[cls] . identifier[_path] ( identifier[ndivsm] , identifier[kpath_bounds] = identifier[kpath_bounds] , identifier[comment] = literal[string] ) | def explicit_path(cls, ndivsm, kpath_bounds):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, kpath_bounds=kpath_bounds, comment='Explicit K-path') |
def pipe(value, *functions, funcs=None):
"""pipe(value, f, g, h) == h(g(f(value)))"""
if funcs:
functions = funcs
for function in functions:
value = function(value)
return value | def function[pipe, parameter[value]]:
constant[pipe(value, f, g, h) == h(g(f(value)))]
if name[funcs] begin[:]
variable[functions] assign[=] name[funcs]
for taget[name[function]] in starred[name[functions]] begin[:]
variable[value] assign[=] call[name[function], parameter[name[value]]]
return[name[value]] | keyword[def] identifier[pipe] ( identifier[value] ,* identifier[functions] , identifier[funcs] = keyword[None] ):
literal[string]
keyword[if] identifier[funcs] :
identifier[functions] = identifier[funcs]
keyword[for] identifier[function] keyword[in] identifier[functions] :
identifier[value] = identifier[function] ( identifier[value] )
keyword[return] identifier[value] | def pipe(value, *functions, funcs=None):
"""pipe(value, f, g, h) == h(g(f(value)))"""
if funcs:
functions = funcs # depends on [control=['if'], data=[]]
for function in functions:
value = function(value) # depends on [control=['for'], data=['function']]
return value |
def runs_once(meth):
"""
A wrapper around Fabric's runs_once() to support our dryrun feature.
"""
from burlap.common import get_dryrun, runs_once_methods
if get_dryrun():
pass
else:
runs_once_methods.append(meth)
_runs_once(meth)
return meth | def function[runs_once, parameter[meth]]:
constant[
A wrapper around Fabric's runs_once() to support our dryrun feature.
]
from relative_module[burlap.common] import module[get_dryrun], module[runs_once_methods]
if call[name[get_dryrun], parameter[]] begin[:]
pass
return[name[meth]] | keyword[def] identifier[runs_once] ( identifier[meth] ):
literal[string]
keyword[from] identifier[burlap] . identifier[common] keyword[import] identifier[get_dryrun] , identifier[runs_once_methods]
keyword[if] identifier[get_dryrun] ():
keyword[pass]
keyword[else] :
identifier[runs_once_methods] . identifier[append] ( identifier[meth] )
identifier[_runs_once] ( identifier[meth] )
keyword[return] identifier[meth] | def runs_once(meth):
"""
A wrapper around Fabric's runs_once() to support our dryrun feature.
"""
from burlap.common import get_dryrun, runs_once_methods
if get_dryrun():
pass # depends on [control=['if'], data=[]]
else:
runs_once_methods.append(meth)
_runs_once(meth)
return meth |
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList | def function[blocks2numList, parameter[blocks, n]]:
constant[inverse function of numList2blocks.]
variable[toProcess] assign[=] call[name[copy].copy, parameter[name[blocks]]]
variable[returnList] assign[=] list[[]]
for taget[name[numBlock]] in starred[name[toProcess]] begin[:]
variable[inner] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[n]]]] begin[:]
call[name[inner].append, parameter[binary_operation[name[numBlock] <ast.Mod object at 0x7da2590d6920> constant[256]]]]
<ast.AugAssign object at 0x7da1b26ad930>
call[name[inner].reverse, parameter[]]
call[name[returnList].extend, parameter[name[inner]]]
return[name[returnList]] | keyword[def] identifier[blocks2numList] ( identifier[blocks] , identifier[n] ):
literal[string]
identifier[toProcess] = identifier[copy] . identifier[copy] ( identifier[blocks] )
identifier[returnList] =[]
keyword[for] identifier[numBlock] keyword[in] identifier[toProcess] :
identifier[inner] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] ):
identifier[inner] . identifier[append] ( identifier[numBlock] % literal[int] )
identifier[numBlock] >>= literal[int]
identifier[inner] . identifier[reverse] ()
identifier[returnList] . identifier[extend] ( identifier[inner] )
keyword[return] identifier[returnList] | def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8 # depends on [control=['for'], data=[]]
inner.reverse()
returnList.extend(inner) # depends on [control=['for'], data=['numBlock']]
return returnList |
def execute_prepared_cql3_query(self, itemId, values, consistency):
"""
Parameters:
- itemId
- values
- consistency
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_execute_prepared_cql3_query(itemId, values, consistency)
return d | def function[execute_prepared_cql3_query, parameter[self, itemId, values, consistency]]:
constant[
Parameters:
- itemId
- values
- consistency
]
<ast.AugAssign object at 0x7da20e957c70>
variable[d] assign[=] call[name[defer].Deferred, parameter[]]
call[name[self].send_execute_prepared_cql3_query, parameter[name[itemId], name[values], name[consistency]]]
return[name[d]] | keyword[def] identifier[execute_prepared_cql3_query] ( identifier[self] , identifier[itemId] , identifier[values] , identifier[consistency] ):
literal[string]
identifier[self] . identifier[_seqid] += literal[int]
identifier[d] = identifier[self] . identifier[_reqs] [ identifier[self] . identifier[_seqid] ]= identifier[defer] . identifier[Deferred] ()
identifier[self] . identifier[send_execute_prepared_cql3_query] ( identifier[itemId] , identifier[values] , identifier[consistency] )
keyword[return] identifier[d] | def execute_prepared_cql3_query(self, itemId, values, consistency):
"""
Parameters:
- itemId
- values
- consistency
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_execute_prepared_cql3_query(itemId, values, consistency)
return d |
def institute_num_pattern_to_regex(pattern):
"""Given a numeration pattern from the institutes preprint report
numbers KB, convert it to turn it into a regexp string for
recognising such patterns in a reference line.
Change:
\ -> \\
9 -> \d
a -> [A-Za-z]
v -> [Vv] # Tony for arXiv vN
mm -> (0[1-9]|1[0-2])
yy -> \d{2}
yyyy -> [12]\d{3}
/ -> \/
s -> \s*
@param pattern: (string) a user-defined preprint reference numeration
pattern.
@return: (string) the regexp for recognising the pattern.
"""
simple_replacements = [
('9', r'\d'),
('w+', r'\w+'),
('a', r'[A-Za-z]'),
('v', r'[Vv]'),
('mm', r'(0[1-9]|1[0-2])'),
('yyyy', r'[12]\d{3}'),
('yy', r'\d\d'),
('s', r'\s*'),
(r'/', r'\/')]
# now loop through and carry out the simple replacements:
for repl in simple_replacements:
pattern = pattern.replace(repl[0], repl[1])
# now replace a couple of regexp-like paterns:
# quoted string with non-quoted version ("hello" with hello);
# Replace / [abcd ]/ with /( [abcd])?/ :
pattern = re_extract_quoted_text[0].sub(re_extract_quoted_text[1],
pattern)
pattern = re_extract_char_class[0].sub(re_extract_char_class[1],
pattern)
# the pattern has been transformed
return pattern | def function[institute_num_pattern_to_regex, parameter[pattern]]:
constant[Given a numeration pattern from the institutes preprint report
numbers KB, convert it to turn it into a regexp string for
recognising such patterns in a reference line.
Change:
\ -> \
9 -> \d
a -> [A-Za-z]
v -> [Vv] # Tony for arXiv vN
mm -> (0[1-9]|1[0-2])
yy -> \d{2}
yyyy -> [12]\d{3}
/ -> \/
s -> \s*
@param pattern: (string) a user-defined preprint reference numeration
pattern.
@return: (string) the regexp for recognising the pattern.
]
variable[simple_replacements] assign[=] list[[<ast.Tuple object at 0x7da1b13e3400>, <ast.Tuple object at 0x7da1b13e7a00>, <ast.Tuple object at 0x7da1b1392bc0>, <ast.Tuple object at 0x7da1b1391000>, <ast.Tuple object at 0x7da1b1391cc0>, <ast.Tuple object at 0x7da1b13933d0>, <ast.Tuple object at 0x7da1b1391690>, <ast.Tuple object at 0x7da1b1391240>, <ast.Tuple object at 0x7da1b1392e90>]]
for taget[name[repl]] in starred[name[simple_replacements]] begin[:]
variable[pattern] assign[=] call[name[pattern].replace, parameter[call[name[repl]][constant[0]], call[name[repl]][constant[1]]]]
variable[pattern] assign[=] call[call[name[re_extract_quoted_text]][constant[0]].sub, parameter[call[name[re_extract_quoted_text]][constant[1]], name[pattern]]]
variable[pattern] assign[=] call[call[name[re_extract_char_class]][constant[0]].sub, parameter[call[name[re_extract_char_class]][constant[1]], name[pattern]]]
return[name[pattern]] | keyword[def] identifier[institute_num_pattern_to_regex] ( identifier[pattern] ):
literal[string]
identifier[simple_replacements] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )]
keyword[for] identifier[repl] keyword[in] identifier[simple_replacements] :
identifier[pattern] = identifier[pattern] . identifier[replace] ( identifier[repl] [ literal[int] ], identifier[repl] [ literal[int] ])
identifier[pattern] = identifier[re_extract_quoted_text] [ literal[int] ]. identifier[sub] ( identifier[re_extract_quoted_text] [ literal[int] ],
identifier[pattern] )
identifier[pattern] = identifier[re_extract_char_class] [ literal[int] ]. identifier[sub] ( identifier[re_extract_char_class] [ literal[int] ],
identifier[pattern] )
keyword[return] identifier[pattern] | def institute_num_pattern_to_regex(pattern):
"""Given a numeration pattern from the institutes preprint report
numbers KB, convert it to turn it into a regexp string for
recognising such patterns in a reference line.
Change:
\\ -> \\
9 -> \\d
a -> [A-Za-z]
v -> [Vv] # Tony for arXiv vN
mm -> (0[1-9]|1[0-2])
yy -> \\d{2}
yyyy -> [12]\\d{3}
/ -> \\/
s -> \\s*
@param pattern: (string) a user-defined preprint reference numeration
pattern.
@return: (string) the regexp for recognising the pattern.
"""
simple_replacements = [('9', '\\d'), ('w+', '\\w+'), ('a', '[A-Za-z]'), ('v', '[Vv]'), ('mm', '(0[1-9]|1[0-2])'), ('yyyy', '[12]\\d{3}'), ('yy', '\\d\\d'), ('s', '\\s*'), ('/', '\\/')]
# now loop through and carry out the simple replacements:
for repl in simple_replacements:
pattern = pattern.replace(repl[0], repl[1]) # depends on [control=['for'], data=['repl']]
# now replace a couple of regexp-like paterns:
# quoted string with non-quoted version ("hello" with hello);
# Replace / [abcd ]/ with /( [abcd])?/ :
pattern = re_extract_quoted_text[0].sub(re_extract_quoted_text[1], pattern)
pattern = re_extract_char_class[0].sub(re_extract_char_class[1], pattern)
# the pattern has been transformed
return pattern |
def calculate_file_md5(filepath, blocksize=2 ** 20):
"""Calculate an MD5 hash for a file."""
checksum = hashlib.md5()
with click.open_file(filepath, "rb") as f:
def update_chunk():
"""Add chunk to checksum."""
buf = f.read(blocksize)
if buf:
checksum.update(buf)
return bool(buf)
while update_chunk():
pass
return checksum.hexdigest() | def function[calculate_file_md5, parameter[filepath, blocksize]]:
constant[Calculate an MD5 hash for a file.]
variable[checksum] assign[=] call[name[hashlib].md5, parameter[]]
with call[name[click].open_file, parameter[name[filepath], constant[rb]]] begin[:]
def function[update_chunk, parameter[]]:
constant[Add chunk to checksum.]
variable[buf] assign[=] call[name[f].read, parameter[name[blocksize]]]
if name[buf] begin[:]
call[name[checksum].update, parameter[name[buf]]]
return[call[name[bool], parameter[name[buf]]]]
while call[name[update_chunk], parameter[]] begin[:]
pass
return[call[name[checksum].hexdigest, parameter[]]] | keyword[def] identifier[calculate_file_md5] ( identifier[filepath] , identifier[blocksize] = literal[int] ** literal[int] ):
literal[string]
identifier[checksum] = identifier[hashlib] . identifier[md5] ()
keyword[with] identifier[click] . identifier[open_file] ( identifier[filepath] , literal[string] ) keyword[as] identifier[f] :
keyword[def] identifier[update_chunk] ():
literal[string]
identifier[buf] = identifier[f] . identifier[read] ( identifier[blocksize] )
keyword[if] identifier[buf] :
identifier[checksum] . identifier[update] ( identifier[buf] )
keyword[return] identifier[bool] ( identifier[buf] )
keyword[while] identifier[update_chunk] ():
keyword[pass]
keyword[return] identifier[checksum] . identifier[hexdigest] () | def calculate_file_md5(filepath, blocksize=2 ** 20):
"""Calculate an MD5 hash for a file."""
checksum = hashlib.md5()
with click.open_file(filepath, 'rb') as f:
def update_chunk():
"""Add chunk to checksum."""
buf = f.read(blocksize)
if buf:
checksum.update(buf) # depends on [control=['if'], data=[]]
return bool(buf)
while update_chunk():
pass # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['f']]
return checksum.hexdigest() |
def search(self, keyword, remotepath = None, recursive = True):
''' Usage: search <keyword> [remotepath] [recursive] - \
search for a file using keyword at Baidu Yun
keyword - the keyword to search
remotepath - remote path at Baidu Yun, if not specified, it's app's root directory
resursive - search recursively or not. default is true
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'search',
'path' : rpath,
'wd' : keyword,
're' : '1' if str2bool(recursive) else '0'}
self.pd("Searching: '{}'".format(rpath))
return self.__get(pcsurl + 'file', pars, self.__search_act) | def function[search, parameter[self, keyword, remotepath, recursive]]:
constant[ Usage: search <keyword> [remotepath] [recursive] - search for a file using keyword at Baidu Yun
keyword - the keyword to search
remotepath - remote path at Baidu Yun, if not specified, it's app's root directory
resursive - search recursively or not. default is true
]
variable[rpath] assign[=] call[name[get_pcs_path], parameter[name[remotepath]]]
variable[pars] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d46470>, <ast.Constant object at 0x7da1b1d463b0>, <ast.Constant object at 0x7da1b1d464d0>, <ast.Constant object at 0x7da1b1d463e0>], [<ast.Constant object at 0x7da1b1d46410>, <ast.Name object at 0x7da1b1d46500>, <ast.Name object at 0x7da1b1d477c0>, <ast.IfExp object at 0x7da1b1d45540>]]
call[name[self].pd, parameter[call[constant[Searching: '{}'].format, parameter[name[rpath]]]]]
return[call[name[self].__get, parameter[binary_operation[name[pcsurl] + constant[file]], name[pars], name[self].__search_act]]] | keyword[def] identifier[search] ( identifier[self] , identifier[keyword] , identifier[remotepath] = keyword[None] , identifier[recursive] = keyword[True] ):
literal[string]
identifier[rpath] = identifier[get_pcs_path] ( identifier[remotepath] )
identifier[pars] ={
literal[string] : literal[string] ,
literal[string] : identifier[rpath] ,
literal[string] : identifier[keyword] ,
literal[string] : literal[string] keyword[if] identifier[str2bool] ( identifier[recursive] ) keyword[else] literal[string] }
identifier[self] . identifier[pd] ( literal[string] . identifier[format] ( identifier[rpath] ))
keyword[return] identifier[self] . identifier[__get] ( identifier[pcsurl] + literal[string] , identifier[pars] , identifier[self] . identifier[__search_act] ) | def search(self, keyword, remotepath=None, recursive=True):
""" Usage: search <keyword> [remotepath] [recursive] - search for a file using keyword at Baidu Yun
keyword - the keyword to search
remotepath - remote path at Baidu Yun, if not specified, it's app's root directory
resursive - search recursively or not. default is true
"""
rpath = get_pcs_path(remotepath)
pars = {'method': 'search', 'path': rpath, 'wd': keyword, 're': '1' if str2bool(recursive) else '0'}
self.pd("Searching: '{}'".format(rpath))
return self.__get(pcsurl + 'file', pars, self.__search_act) |
def namingConventionDecorator(self, namingConvention):
"""
:type namingConvention:INamingConvention
"""
def decoratorFunction(cls):
SyntheticClassController(cls).setNamingConvention(namingConvention)
return cls
return decoratorFunction | def function[namingConventionDecorator, parameter[self, namingConvention]]:
constant[
:type namingConvention:INamingConvention
]
def function[decoratorFunction, parameter[cls]]:
call[call[name[SyntheticClassController], parameter[name[cls]]].setNamingConvention, parameter[name[namingConvention]]]
return[name[cls]]
return[name[decoratorFunction]] | keyword[def] identifier[namingConventionDecorator] ( identifier[self] , identifier[namingConvention] ):
literal[string]
keyword[def] identifier[decoratorFunction] ( identifier[cls] ):
identifier[SyntheticClassController] ( identifier[cls] ). identifier[setNamingConvention] ( identifier[namingConvention] )
keyword[return] identifier[cls]
keyword[return] identifier[decoratorFunction] | def namingConventionDecorator(self, namingConvention):
"""
:type namingConvention:INamingConvention
"""
def decoratorFunction(cls):
SyntheticClassController(cls).setNamingConvention(namingConvention)
return cls
return decoratorFunction |
def export_model(self, format, file_name=None):
"""Save the assembled model in a modeling formalism other than PySB.
For more details on exporting PySB models, see
http://pysb.readthedocs.io/en/latest/modules/export/index.html
Parameters
----------
format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
file_name : Optional[str]
An optional file name to save the exported model into.
Returns
-------
exp_str : str or object
The exported model string or object
"""
# Handle SBGN as special case
if format == 'sbgn':
exp_str = export_sbgn(self.model)
elif format == 'kappa_im':
# NOTE: this export is not a str, rather a graph object
return export_kappa_im(self.model, file_name)
elif format == 'kappa_cm':
# NOTE: this export is not a str, rather a graph object
return export_kappa_cm(self.model, file_name)
else:
try:
exp_str = pysb.export.export(self.model, format)
except KeyError:
logging.error('Unknown export format: %s' % format)
return None
if file_name:
with open(file_name, 'wb') as fh:
fh.write(exp_str.encode('utf-8'))
return exp_str | def function[export_model, parameter[self, format, file_name]]:
constant[Save the assembled model in a modeling formalism other than PySB.
For more details on exporting PySB models, see
http://pysb.readthedocs.io/en/latest/modules/export/index.html
Parameters
----------
format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
file_name : Optional[str]
An optional file name to save the exported model into.
Returns
-------
exp_str : str or object
The exported model string or object
]
if compare[name[format] equal[==] constant[sbgn]] begin[:]
variable[exp_str] assign[=] call[name[export_sbgn], parameter[name[self].model]]
if name[file_name] begin[:]
with call[name[open], parameter[name[file_name], constant[wb]]] begin[:]
call[name[fh].write, parameter[call[name[exp_str].encode, parameter[constant[utf-8]]]]]
return[name[exp_str]] | keyword[def] identifier[export_model] ( identifier[self] , identifier[format] , identifier[file_name] = keyword[None] ):
literal[string]
keyword[if] identifier[format] == literal[string] :
identifier[exp_str] = identifier[export_sbgn] ( identifier[self] . identifier[model] )
keyword[elif] identifier[format] == literal[string] :
keyword[return] identifier[export_kappa_im] ( identifier[self] . identifier[model] , identifier[file_name] )
keyword[elif] identifier[format] == literal[string] :
keyword[return] identifier[export_kappa_cm] ( identifier[self] . identifier[model] , identifier[file_name] )
keyword[else] :
keyword[try] :
identifier[exp_str] = identifier[pysb] . identifier[export] . identifier[export] ( identifier[self] . identifier[model] , identifier[format] )
keyword[except] identifier[KeyError] :
identifier[logging] . identifier[error] ( literal[string] % identifier[format] )
keyword[return] keyword[None]
keyword[if] identifier[file_name] :
keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[exp_str] . identifier[encode] ( literal[string] ))
keyword[return] identifier[exp_str] | def export_model(self, format, file_name=None):
"""Save the assembled model in a modeling formalism other than PySB.
For more details on exporting PySB models, see
http://pysb.readthedocs.io/en/latest/modules/export/index.html
Parameters
----------
format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
file_name : Optional[str]
An optional file name to save the exported model into.
Returns
-------
exp_str : str or object
The exported model string or object
"""
# Handle SBGN as special case
if format == 'sbgn':
exp_str = export_sbgn(self.model) # depends on [control=['if'], data=[]]
elif format == 'kappa_im':
# NOTE: this export is not a str, rather a graph object
return export_kappa_im(self.model, file_name) # depends on [control=['if'], data=[]]
elif format == 'kappa_cm':
# NOTE: this export is not a str, rather a graph object
return export_kappa_cm(self.model, file_name) # depends on [control=['if'], data=[]]
else:
try:
exp_str = pysb.export.export(self.model, format) # depends on [control=['try'], data=[]]
except KeyError:
logging.error('Unknown export format: %s' % format)
return None # depends on [control=['except'], data=[]]
if file_name:
with open(file_name, 'wb') as fh:
fh.write(exp_str.encode('utf-8')) # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=[]]
return exp_str |
def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message
s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
if 'system' in kwargs:
s += "[" + system + "] "
if 'indent' in kwargs:
s += ("\t" * int(kwargs['indent']))
s += u(msg)
if s[-1] != '\n':
s += '\n'
if 'streams' in kwargs:
streams = kwargs['streams']
elif 'stream' in kwargs:
streams = [kwargs['stream']]
else:
streams = [stderr]
for stream in streams:
stream.write(s)
return s | def function[log, parameter[msg]]:
constant[Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
]
if compare[constant[debug] in name[kwargs]] begin[:]
if compare[constant[currentdebug] in name[kwargs]] begin[:]
if compare[call[name[kwargs]][constant[currentdebug]] less[<] call[name[kwargs]][constant[debug]]] begin[:]
return[constant[False]]
variable[s] assign[=] binary_operation[binary_operation[constant[[] + call[call[name[datetime].datetime.now, parameter[]].strftime, parameter[constant[%Y-%m-%d %H:%M:%S]]]] + constant[] ]]
if compare[constant[system] in name[kwargs]] begin[:]
<ast.AugAssign object at 0x7da207f99270>
if compare[constant[indent] in name[kwargs]] begin[:]
<ast.AugAssign object at 0x7da207f98760>
<ast.AugAssign object at 0x7da207f9b0a0>
if compare[call[name[s]][<ast.UnaryOp object at 0x7da207f9a260>] not_equal[!=] constant[
]] begin[:]
<ast.AugAssign object at 0x7da207f9aa70>
if compare[constant[streams] in name[kwargs]] begin[:]
variable[streams] assign[=] call[name[kwargs]][constant[streams]]
for taget[name[stream]] in starred[name[streams]] begin[:]
call[name[stream].write, parameter[name[s]]]
return[name[s]] | keyword[def] identifier[log] ( identifier[msg] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[if] identifier[kwargs] [ literal[string] ]< identifier[kwargs] [ literal[string] ]:
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[False]
identifier[s] = literal[string] + identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] )+ literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[s] += literal[string] + identifier[system] + literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[s] +=( literal[string] * identifier[int] ( identifier[kwargs] [ literal[string] ]))
identifier[s] += identifier[u] ( identifier[msg] )
keyword[if] identifier[s] [- literal[int] ]!= literal[string] :
identifier[s] += literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[streams] = identifier[kwargs] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[kwargs] :
identifier[streams] =[ identifier[kwargs] [ literal[string] ]]
keyword[else] :
identifier[streams] =[ identifier[stderr] ]
keyword[for] identifier[stream] keyword[in] identifier[streams] :
identifier[stream] . identifier[write] ( identifier[s] )
keyword[return] identifier[s] | def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kwargs']]
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message # depends on [control=['if'], data=['kwargs']]
s = '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] '
if 'system' in kwargs:
s += '[' + system + '] ' # depends on [control=['if'], data=[]]
if 'indent' in kwargs:
s += '\t' * int(kwargs['indent']) # depends on [control=['if'], data=['kwargs']]
s += u(msg)
if s[-1] != '\n':
s += '\n' # depends on [control=['if'], data=[]]
if 'streams' in kwargs:
streams = kwargs['streams'] # depends on [control=['if'], data=['kwargs']]
elif 'stream' in kwargs:
streams = [kwargs['stream']] # depends on [control=['if'], data=['kwargs']]
else:
streams = [stderr]
for stream in streams:
stream.write(s) # depends on [control=['for'], data=['stream']]
return s |
def rect(self, x,y,w,h,style=''):
"Draw a rectangle"
if(style=='F'):
op='f'
elif(style=='FD' or style=='DF'):
op='B'
else:
op='S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s',x*self.k,(self.h-y)*self.k,w*self.k,-h*self.k,op)) | def function[rect, parameter[self, x, y, w, h, style]]:
constant[Draw a rectangle]
if compare[name[style] equal[==] constant[F]] begin[:]
variable[op] assign[=] constant[f]
call[name[self]._out, parameter[call[name[sprintf], parameter[constant[%.2f %.2f %.2f %.2f re %s], binary_operation[name[x] * name[self].k], binary_operation[binary_operation[name[self].h - name[y]] * name[self].k], binary_operation[name[w] * name[self].k], binary_operation[<ast.UnaryOp object at 0x7da2045653f0> * name[self].k], name[op]]]]] | keyword[def] identifier[rect] ( identifier[self] , identifier[x] , identifier[y] , identifier[w] , identifier[h] , identifier[style] = literal[string] ):
literal[string]
keyword[if] ( identifier[style] == literal[string] ):
identifier[op] = literal[string]
keyword[elif] ( identifier[style] == literal[string] keyword[or] identifier[style] == literal[string] ):
identifier[op] = literal[string]
keyword[else] :
identifier[op] = literal[string]
identifier[self] . identifier[_out] ( identifier[sprintf] ( literal[string] , identifier[x] * identifier[self] . identifier[k] ,( identifier[self] . identifier[h] - identifier[y] )* identifier[self] . identifier[k] , identifier[w] * identifier[self] . identifier[k] ,- identifier[h] * identifier[self] . identifier[k] , identifier[op] )) | def rect(self, x, y, w, h, style=''):
"""Draw a rectangle"""
if style == 'F':
op = 'f' # depends on [control=['if'], data=[]]
elif style == 'FD' or style == 'DF':
op = 'B' # depends on [control=['if'], data=[]]
else:
op = 'S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s', x * self.k, (self.h - y) * self.k, w * self.k, -h * self.k, op)) |
def p_path(self, p):
"""path : path UNION path
| path SEQ path
| path STAR
| temp_formula TEST
| propositional"""
if len(p)==2:
p[0] = RegExpPropositional(p[1])
elif len(p)==3:
if p[2]==Symbols.PATH_TEST.value:
p[0] = RegExpTest(p[1])
elif p[2] == Symbols.PATH_STAR.value:
p[0] = RegExpStar(p[1])
else:
raise ValueError
elif len(p)==4:
if p[2]==Symbols.PATH_UNION.value:
p[0] = RegExpUnion([p[1], p[3]])
elif p[2] == Symbols.PATH_SEQUENCE.value:
p[0] = RegExpSequence([p[1], p[3]])
else:
raise ValueError
else:
raise ValueError | def function[p_path, parameter[self, p]]:
constant[path : path UNION path
| path SEQ path
| path STAR
| temp_formula TEST
| propositional]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:]
call[name[p]][constant[0]] assign[=] call[name[RegExpPropositional], parameter[call[name[p]][constant[1]]]] | keyword[def] identifier[p_path] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]= identifier[RegExpPropositional] ( identifier[p] [ literal[int] ])
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
keyword[if] identifier[p] [ literal[int] ]== identifier[Symbols] . identifier[PATH_TEST] . identifier[value] :
identifier[p] [ literal[int] ]= identifier[RegExpTest] ( identifier[p] [ literal[int] ])
keyword[elif] identifier[p] [ literal[int] ]== identifier[Symbols] . identifier[PATH_STAR] . identifier[value] :
identifier[p] [ literal[int] ]= identifier[RegExpStar] ( identifier[p] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[ValueError]
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
keyword[if] identifier[p] [ literal[int] ]== identifier[Symbols] . identifier[PATH_UNION] . identifier[value] :
identifier[p] [ literal[int] ]= identifier[RegExpUnion] ([ identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]])
keyword[elif] identifier[p] [ literal[int] ]== identifier[Symbols] . identifier[PATH_SEQUENCE] . identifier[value] :
identifier[p] [ literal[int] ]= identifier[RegExpSequence] ([ identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]])
keyword[else] :
keyword[raise] identifier[ValueError]
keyword[else] :
keyword[raise] identifier[ValueError] | def p_path(self, p):
"""path : path UNION path
| path SEQ path
| path STAR
| temp_formula TEST
| propositional"""
if len(p) == 2:
p[0] = RegExpPropositional(p[1]) # depends on [control=['if'], data=[]]
elif len(p) == 3:
if p[2] == Symbols.PATH_TEST.value:
p[0] = RegExpTest(p[1]) # depends on [control=['if'], data=[]]
elif p[2] == Symbols.PATH_STAR.value:
p[0] = RegExpStar(p[1]) # depends on [control=['if'], data=[]]
else:
raise ValueError # depends on [control=['if'], data=[]]
elif len(p) == 4:
if p[2] == Symbols.PATH_UNION.value:
p[0] = RegExpUnion([p[1], p[3]]) # depends on [control=['if'], data=[]]
elif p[2] == Symbols.PATH_SEQUENCE.value:
p[0] = RegExpSequence([p[1], p[3]]) # depends on [control=['if'], data=[]]
else:
raise ValueError # depends on [control=['if'], data=[]]
else:
raise ValueError |
def _run_scalpel_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect indels with Scalpel.
Single sample mode.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(align_bams) > 1:
message = ("Scalpel does not currently support batch calling!")
raise ValueError(message)
input_bams = " ".join("%s" % x for x in align_bams)
tmp_path = "%s-scalpel-work" % utils.splitext_plus(out_file)[0]
tx_tmp_path = "%s-scalpel-work" % utils.splitext_plus(tx_out_file)[0]
if os.path.exists(tmp_path):
utils.remove_safe(tmp_path)
opts = " ".join(_scalpel_options_from_config(items, config, out_file, region, tmp_path))
opts += " --dir %s" % tx_tmp_path
min_cov = "3" # minimum coverage
opts += " --mincov %s" % min_cov
perl_exports = utils.get_perl_exports(os.path.dirname(tx_out_file))
cmd = ("{perl_exports} && "
"scalpel-discovery --single {opts} --ref {ref_file} --bam {input_bams} ")
do.run(cmd.format(**locals()), "Genotyping with Scalpel", {})
shutil.move(tx_tmp_path, tmp_path)
# parse produced variant file further
scalpel_tmp_file = bgzip_and_index(os.path.join(tmp_path, "variants.indel.vcf"), config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
bcftools_cmd_chi2 = get_scalpel_bcftools_filter_expression("chi2", config)
sample_name_str = items[0]["name"][1]
fix_ambig = vcfutils.fix_ambiguous_cl()
add_contig = vcfutils.add_contig_to_header_cl(dd.get_ref_file(items[0]), tx_out_file)
cl2 = ("{bcftools_cmd_chi2} {scalpel_tmp_file} | "
r"sed 's/FORMAT\tsample\(_name\)\{{0,1\}}/FORMAT\t{sample_name_str}/g' "
"| {fix_ambig} | vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort "
"| {add_contig} {compress_cmd} > {tx_out_file}")
do.run(cl2.format(**locals()), "Finalising Scalpel variants", {})
return out_file | def function[_run_scalpel_caller, parameter[align_bams, items, ref_file, assoc_files, region, out_file]]:
constant[Detect indels with Scalpel.
Single sample mode.
]
variable[config] assign[=] call[call[name[items]][constant[0]]][constant[config]]
if compare[name[out_file] is constant[None]] begin[:]
variable[out_file] assign[=] binary_operation[constant[%s-variants.vcf.gz] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.splitext, parameter[call[name[align_bams]][constant[0]]]]][constant[0]]]
if <ast.UnaryOp object at 0x7da20c76c3d0> begin[:]
with call[name[file_transaction], parameter[name[config], name[out_file]]] begin[:]
if compare[call[name[len], parameter[name[align_bams]]] greater[>] constant[1]] begin[:]
variable[message] assign[=] constant[Scalpel does not currently support batch calling!]
<ast.Raise object at 0x7da1b1713790>
variable[input_bams] assign[=] call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b1711990>]]
variable[tmp_path] assign[=] binary_operation[constant[%s-scalpel-work] <ast.Mod object at 0x7da2590d6920> call[call[name[utils].splitext_plus, parameter[name[out_file]]]][constant[0]]]
variable[tx_tmp_path] assign[=] binary_operation[constant[%s-scalpel-work] <ast.Mod object at 0x7da2590d6920> call[call[name[utils].splitext_plus, parameter[name[tx_out_file]]]][constant[0]]]
if call[name[os].path.exists, parameter[name[tmp_path]]] begin[:]
call[name[utils].remove_safe, parameter[name[tmp_path]]]
variable[opts] assign[=] call[constant[ ].join, parameter[call[name[_scalpel_options_from_config], parameter[name[items], name[config], name[out_file], name[region], name[tmp_path]]]]]
<ast.AugAssign object at 0x7da20c76d840>
variable[min_cov] assign[=] constant[3]
<ast.AugAssign object at 0x7da1b2347fa0>
variable[perl_exports] assign[=] call[name[utils].get_perl_exports, parameter[call[name[os].path.dirname, parameter[name[tx_out_file]]]]]
variable[cmd] assign[=] constant[{perl_exports} && scalpel-discovery --single {opts} --ref {ref_file} --bam {input_bams} ]
call[name[do].run, parameter[call[name[cmd].format, parameter[]], constant[Genotyping with Scalpel], dictionary[[], []]]]
call[name[shutil].move, parameter[name[tx_tmp_path], name[tmp_path]]]
variable[scalpel_tmp_file] assign[=] call[name[bgzip_and_index], parameter[call[name[os].path.join, parameter[name[tmp_path], constant[variants.indel.vcf]]], name[config]]]
variable[compress_cmd] assign[=] <ast.IfExp object at 0x7da1b17a6b90>
variable[bcftools_cmd_chi2] assign[=] call[name[get_scalpel_bcftools_filter_expression], parameter[constant[chi2], name[config]]]
variable[sample_name_str] assign[=] call[call[call[name[items]][constant[0]]][constant[name]]][constant[1]]
variable[fix_ambig] assign[=] call[name[vcfutils].fix_ambiguous_cl, parameter[]]
variable[add_contig] assign[=] call[name[vcfutils].add_contig_to_header_cl, parameter[call[name[dd].get_ref_file, parameter[call[name[items]][constant[0]]]], name[tx_out_file]]]
variable[cl2] assign[=] constant[{bcftools_cmd_chi2} {scalpel_tmp_file} | sed 's/FORMAT\tsample\(_name\)\{{0,1\}}/FORMAT\t{sample_name_str}/g' | {fix_ambig} | vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | {add_contig} {compress_cmd} > {tx_out_file}]
call[name[do].run, parameter[call[name[cl2].format, parameter[]], constant[Finalising Scalpel variants], dictionary[[], []]]]
return[name[out_file]] | keyword[def] identifier[_run_scalpel_caller] ( identifier[align_bams] , identifier[items] , identifier[ref_file] , identifier[assoc_files] ,
identifier[region] = keyword[None] , identifier[out_file] = keyword[None] ):
literal[string]
identifier[config] = identifier[items] [ literal[int] ][ literal[string] ]
keyword[if] identifier[out_file] keyword[is] keyword[None] :
identifier[out_file] = literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[align_bams] [ literal[int] ])[ literal[int] ]
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ):
keyword[with] identifier[file_transaction] ( identifier[config] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
keyword[if] identifier[len] ( identifier[align_bams] )> literal[int] :
identifier[message] =( literal[string] )
keyword[raise] identifier[ValueError] ( identifier[message] )
identifier[input_bams] = literal[string] . identifier[join] ( literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[align_bams] )
identifier[tmp_path] = literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[out_file] )[ literal[int] ]
identifier[tx_tmp_path] = literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[tx_out_file] )[ literal[int] ]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[tmp_path] ):
identifier[utils] . identifier[remove_safe] ( identifier[tmp_path] )
identifier[opts] = literal[string] . identifier[join] ( identifier[_scalpel_options_from_config] ( identifier[items] , identifier[config] , identifier[out_file] , identifier[region] , identifier[tmp_path] ))
identifier[opts] += literal[string] % identifier[tx_tmp_path]
identifier[min_cov] = literal[string]
identifier[opts] += literal[string] % identifier[min_cov]
identifier[perl_exports] = identifier[utils] . identifier[get_perl_exports] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[tx_out_file] ))
identifier[cmd] =( literal[string]
literal[string] )
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] ,{})
identifier[shutil] . identifier[move] ( identifier[tx_tmp_path] , identifier[tmp_path] )
identifier[scalpel_tmp_file] = identifier[bgzip_and_index] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_path] , literal[string] ), identifier[config] )
identifier[compress_cmd] = literal[string] keyword[if] identifier[out_file] . identifier[endswith] ( literal[string] ) keyword[else] literal[string]
identifier[bcftools_cmd_chi2] = identifier[get_scalpel_bcftools_filter_expression] ( literal[string] , identifier[config] )
identifier[sample_name_str] = identifier[items] [ literal[int] ][ literal[string] ][ literal[int] ]
identifier[fix_ambig] = identifier[vcfutils] . identifier[fix_ambiguous_cl] ()
identifier[add_contig] = identifier[vcfutils] . identifier[add_contig_to_header_cl] ( identifier[dd] . identifier[get_ref_file] ( identifier[items] [ literal[int] ]), identifier[tx_out_file] )
identifier[cl2] =( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[do] . identifier[run] ( identifier[cl2] . identifier[format] (** identifier[locals] ()), literal[string] ,{})
keyword[return] identifier[out_file] | def _run_scalpel_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Detect indels with Scalpel.
Single sample mode.
"""
config = items[0]['config']
if out_file is None:
out_file = '%s-variants.vcf.gz' % os.path.splitext(align_bams[0])[0] # depends on [control=['if'], data=['out_file']]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(align_bams) > 1:
message = 'Scalpel does not currently support batch calling!'
raise ValueError(message) # depends on [control=['if'], data=[]]
input_bams = ' '.join(('%s' % x for x in align_bams))
tmp_path = '%s-scalpel-work' % utils.splitext_plus(out_file)[0]
tx_tmp_path = '%s-scalpel-work' % utils.splitext_plus(tx_out_file)[0]
if os.path.exists(tmp_path):
utils.remove_safe(tmp_path) # depends on [control=['if'], data=[]]
opts = ' '.join(_scalpel_options_from_config(items, config, out_file, region, tmp_path))
opts += ' --dir %s' % tx_tmp_path
min_cov = '3' # minimum coverage
opts += ' --mincov %s' % min_cov
perl_exports = utils.get_perl_exports(os.path.dirname(tx_out_file))
cmd = '{perl_exports} && scalpel-discovery --single {opts} --ref {ref_file} --bam {input_bams} '
do.run(cmd.format(**locals()), 'Genotyping with Scalpel', {})
shutil.move(tx_tmp_path, tmp_path)
# parse produced variant file further
scalpel_tmp_file = bgzip_and_index(os.path.join(tmp_path, 'variants.indel.vcf'), config)
compress_cmd = '| bgzip -c' if out_file.endswith('gz') else ''
bcftools_cmd_chi2 = get_scalpel_bcftools_filter_expression('chi2', config)
sample_name_str = items[0]['name'][1]
fix_ambig = vcfutils.fix_ambiguous_cl()
add_contig = vcfutils.add_contig_to_header_cl(dd.get_ref_file(items[0]), tx_out_file)
cl2 = "{bcftools_cmd_chi2} {scalpel_tmp_file} | sed 's/FORMAT\\tsample\\(_name\\)\\{{0,1\\}}/FORMAT\\t{sample_name_str}/g' | {fix_ambig} | vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | {add_contig} {compress_cmd} > {tx_out_file}"
do.run(cl2.format(**locals()), 'Finalising Scalpel variants', {}) # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return out_file |
def set_file_license_comment(self, doc, text):
"""
Raises OrderError if no package or file defined.
Raises SPDXValueError if text is not free form text.
Raises CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
if validations.validate_file_lics_comment(text):
self.file(doc).license_comment = str_from_text(text)
else:
raise SPDXValueError('File::LicenseComment')
else:
raise CardinalityError('File::LicenseComment')
else:
raise OrderError('File::LicenseComment') | def function[set_file_license_comment, parameter[self, doc, text]]:
constant[
Raises OrderError if no package or file defined.
Raises SPDXValueError if text is not free form text.
Raises CardinalityError if more than one per file.
]
if <ast.BoolOp object at 0x7da1b0169300> begin[:]
if <ast.UnaryOp object at 0x7da1b016ada0> begin[:]
name[self].file_license_comment_set assign[=] constant[True]
if call[name[validations].validate_file_lics_comment, parameter[name[text]]] begin[:]
call[name[self].file, parameter[name[doc]]].license_comment assign[=] call[name[str_from_text], parameter[name[text]]] | keyword[def] identifier[set_file_license_comment] ( identifier[self] , identifier[doc] , identifier[text] ):
literal[string]
keyword[if] identifier[self] . identifier[has_package] ( identifier[doc] ) keyword[and] identifier[self] . identifier[has_file] ( identifier[doc] ):
keyword[if] keyword[not] identifier[self] . identifier[file_license_comment_set] :
identifier[self] . identifier[file_license_comment_set] = keyword[True]
keyword[if] identifier[validations] . identifier[validate_file_lics_comment] ( identifier[text] ):
identifier[self] . identifier[file] ( identifier[doc] ). identifier[license_comment] = identifier[str_from_text] ( identifier[text] )
keyword[else] :
keyword[raise] identifier[SPDXValueError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[CardinalityError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[OrderError] ( literal[string] ) | def set_file_license_comment(self, doc, text):
"""
Raises OrderError if no package or file defined.
Raises SPDXValueError if text is not free form text.
Raises CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
if validations.validate_file_lics_comment(text):
self.file(doc).license_comment = str_from_text(text) # depends on [control=['if'], data=[]]
else:
raise SPDXValueError('File::LicenseComment') # depends on [control=['if'], data=[]]
else:
raise CardinalityError('File::LicenseComment') # depends on [control=['if'], data=[]]
else:
raise OrderError('File::LicenseComment') |
def run_project(
project_directory: str,
output_directory: str = None,
logging_path: str = None,
reader_path: str = None,
reload_project_libraries: bool = False,
**kwargs
) -> ExecutionResult:
"""
Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project.
"""
from cauldron.cli import batcher
return batcher.run_project(
project_directory=project_directory,
output_directory=output_directory,
log_path=logging_path,
reader_path=reader_path,
reload_project_libraries=reload_project_libraries,
shared_data=kwargs
) | def function[run_project, parameter[project_directory, output_directory, logging_path, reader_path, reload_project_libraries]]:
constant[
Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project.
]
from relative_module[cauldron.cli] import module[batcher]
return[call[name[batcher].run_project, parameter[]]] | keyword[def] identifier[run_project] (
identifier[project_directory] : identifier[str] ,
identifier[output_directory] : identifier[str] = keyword[None] ,
identifier[logging_path] : identifier[str] = keyword[None] ,
identifier[reader_path] : identifier[str] = keyword[None] ,
identifier[reload_project_libraries] : identifier[bool] = keyword[False] ,
** identifier[kwargs]
)-> identifier[ExecutionResult] :
literal[string]
keyword[from] identifier[cauldron] . identifier[cli] keyword[import] identifier[batcher]
keyword[return] identifier[batcher] . identifier[run_project] (
identifier[project_directory] = identifier[project_directory] ,
identifier[output_directory] = identifier[output_directory] ,
identifier[log_path] = identifier[logging_path] ,
identifier[reader_path] = identifier[reader_path] ,
identifier[reload_project_libraries] = identifier[reload_project_libraries] ,
identifier[shared_data] = identifier[kwargs]
) | def run_project(project_directory: str, output_directory: str=None, logging_path: str=None, reader_path: str=None, reload_project_libraries: bool=False, **kwargs) -> ExecutionResult:
"""
Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project.
"""
from cauldron.cli import batcher
return batcher.run_project(project_directory=project_directory, output_directory=output_directory, log_path=logging_path, reader_path=reader_path, reload_project_libraries=reload_project_libraries, shared_data=kwargs) |
def _process_attachments(self, email):
"""
Convert the attachments in the email to the appropriate format for
sending with Connection.send().
"""
for a in email.attachments:
if isinstance(a, MIMEBase):
if not a.is_multipart():
obj = {
'filename': a.get_filename(),
'content_type': a.get_content_type(),
}
if a.get('content-transfer-encoding') == 'base64':
obj['content'] = a.get_payload()
obj['encoded'] = True
else:
obj['content'] = a.get_payload(decode=True)
yield obj
else:
yield {
'filename': a[0],
'content_type': a[2],
'content': a[1],
} | def function[_process_attachments, parameter[self, email]]:
constant[
Convert the attachments in the email to the appropriate format for
sending with Connection.send().
]
for taget[name[a]] in starred[name[email].attachments] begin[:]
if call[name[isinstance], parameter[name[a], name[MIMEBase]]] begin[:]
if <ast.UnaryOp object at 0x7da1b09eb850> begin[:]
variable[obj] assign[=] dictionary[[<ast.Constant object at 0x7da1b09eb940>, <ast.Constant object at 0x7da1b09e93c0>], [<ast.Call object at 0x7da1b09e99c0>, <ast.Call object at 0x7da1b09ea7d0>]]
if compare[call[name[a].get, parameter[constant[content-transfer-encoding]]] equal[==] constant[base64]] begin[:]
call[name[obj]][constant[content]] assign[=] call[name[a].get_payload, parameter[]]
call[name[obj]][constant[encoded]] assign[=] constant[True]
<ast.Yield object at 0x7da1b09bc9d0> | keyword[def] identifier[_process_attachments] ( identifier[self] , identifier[email] ):
literal[string]
keyword[for] identifier[a] keyword[in] identifier[email] . identifier[attachments] :
keyword[if] identifier[isinstance] ( identifier[a] , identifier[MIMEBase] ):
keyword[if] keyword[not] identifier[a] . identifier[is_multipart] ():
identifier[obj] ={
literal[string] : identifier[a] . identifier[get_filename] (),
literal[string] : identifier[a] . identifier[get_content_type] (),
}
keyword[if] identifier[a] . identifier[get] ( literal[string] )== literal[string] :
identifier[obj] [ literal[string] ]= identifier[a] . identifier[get_payload] ()
identifier[obj] [ literal[string] ]= keyword[True]
keyword[else] :
identifier[obj] [ literal[string] ]= identifier[a] . identifier[get_payload] ( identifier[decode] = keyword[True] )
keyword[yield] identifier[obj]
keyword[else] :
keyword[yield] {
literal[string] : identifier[a] [ literal[int] ],
literal[string] : identifier[a] [ literal[int] ],
literal[string] : identifier[a] [ literal[int] ],
} | def _process_attachments(self, email):
"""
Convert the attachments in the email to the appropriate format for
sending with Connection.send().
"""
for a in email.attachments:
if isinstance(a, MIMEBase):
if not a.is_multipart():
obj = {'filename': a.get_filename(), 'content_type': a.get_content_type()}
if a.get('content-transfer-encoding') == 'base64':
obj['content'] = a.get_payload()
obj['encoded'] = True # depends on [control=['if'], data=[]]
else:
obj['content'] = a.get_payload(decode=True)
yield obj # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
yield {'filename': a[0], 'content_type': a[2], 'content': a[1]} # depends on [control=['for'], data=['a']] |
def get_klout_id(tweet):
"""
Warning: Klout is deprecated and is being removed from Tweet payloads May 2018. \n
See https://developer.twitter.com/en/docs/tweets/enrichments/overview/klout \n
Get the Klout ID of the user (str) (if it exists)
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the user's Klout ID (if it exists), else return None
Example:
>>> from tweet_parser.getter_methods.tweet_user import get_klout_id
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "user":
... {"derived": {"klout":
... {"user_id":"1234567890"}}}
... }
>>> get_klout_id(original_format_dict)
'1234567890'
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "gnip":
... {"klout_profile": {
... "klout_user_id": "1234567890"}
... }}
>>> get_klout_id(activity_streams_format_dict)
'1234567890'
"""
try:
if is_original_format(tweet):
klout_id = tweet['user']['derived']['klout']['user_id']
else:
klout_id = tweet['gnip']['klout_profile']['klout_user_id']
return klout_id
except KeyError:
return None | def function[get_klout_id, parameter[tweet]]:
constant[
Warning: Klout is deprecated and is being removed from Tweet payloads May 2018.
See https://developer.twitter.com/en/docs/tweets/enrichments/overview/klout
Get the Klout ID of the user (str) (if it exists)
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the user's Klout ID (if it exists), else return None
Example:
>>> from tweet_parser.getter_methods.tweet_user import get_klout_id
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "user":
... {"derived": {"klout":
... {"user_id":"1234567890"}}}
... }
>>> get_klout_id(original_format_dict)
'1234567890'
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "gnip":
... {"klout_profile": {
... "klout_user_id": "1234567890"}
... }}
>>> get_klout_id(activity_streams_format_dict)
'1234567890'
]
<ast.Try object at 0x7da1b0e9c970> | keyword[def] identifier[get_klout_id] ( identifier[tweet] ):
literal[string]
keyword[try] :
keyword[if] identifier[is_original_format] ( identifier[tweet] ):
identifier[klout_id] = identifier[tweet] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
keyword[else] :
identifier[klout_id] = identifier[tweet] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[return] identifier[klout_id]
keyword[except] identifier[KeyError] :
keyword[return] keyword[None] | def get_klout_id(tweet):
"""
Warning: Klout is deprecated and is being removed from Tweet payloads May 2018.
See https://developer.twitter.com/en/docs/tweets/enrichments/overview/klout
Get the Klout ID of the user (str) (if it exists)
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the user's Klout ID (if it exists), else return None
Example:
>>> from tweet_parser.getter_methods.tweet_user import get_klout_id
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "user":
... {"derived": {"klout":
... {"user_id":"1234567890"}}}
... }
>>> get_klout_id(original_format_dict)
'1234567890'
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "gnip":
... {"klout_profile": {
... "klout_user_id": "1234567890"}
... }}
>>> get_klout_id(activity_streams_format_dict)
'1234567890'
"""
try:
if is_original_format(tweet):
klout_id = tweet['user']['derived']['klout']['user_id'] # depends on [control=['if'], data=[]]
else:
klout_id = tweet['gnip']['klout_profile']['klout_user_id']
return klout_id # depends on [control=['try'], data=[]]
except KeyError:
return None # depends on [control=['except'], data=[]] |
def dict_take_gen(dict_, keys, *d):
r"""
generate multiple values from a dictionary
Args:
dict_ (dict):
keys (list):
Varargs:
d: if specified is default for key errors
CommandLine:
python -m utool.util_dict --test-dict_take_gen
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(dict_take_gen(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(dict_take_gen(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> print(result)
correctly got key error
"""
if isinstance(keys, six.string_types):
# hack for string keys that makes copy-past easier
keys = keys.split(', ')
if len(d) == 0:
# no default given throws key error
dictget = dict_.__getitem__
elif len(d) == 1:
# default given does not throw key erro
dictget = dict_.get
else:
raise ValueError('len(d) must be 1 or 0')
for key in keys:
if HAVE_NUMPY and isinstance(key, np.ndarray):
# recursive call
yield list(dict_take_gen(dict_, key, *d))
else:
yield dictget(key, *d) | def function[dict_take_gen, parameter[dict_, keys]]:
constant[
generate multiple values from a dictionary
Args:
dict_ (dict):
keys (list):
Varargs:
d: if specified is default for key errors
CommandLine:
python -m utool.util_dict --test-dict_take_gen
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(dict_take_gen(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(dict_take_gen(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> print(result)
correctly got key error
]
if call[name[isinstance], parameter[name[keys], name[six].string_types]] begin[:]
variable[keys] assign[=] call[name[keys].split, parameter[constant[, ]]]
if compare[call[name[len], parameter[name[d]]] equal[==] constant[0]] begin[:]
variable[dictget] assign[=] name[dict_].__getitem__
for taget[name[key]] in starred[name[keys]] begin[:]
if <ast.BoolOp object at 0x7da1b24b4370> begin[:]
<ast.Yield object at 0x7da1b24b5060> | keyword[def] identifier[dict_take_gen] ( identifier[dict_] , identifier[keys] ,* identifier[d] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[keys] , identifier[six] . identifier[string_types] ):
identifier[keys] = identifier[keys] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[d] )== literal[int] :
identifier[dictget] = identifier[dict_] . identifier[__getitem__]
keyword[elif] identifier[len] ( identifier[d] )== literal[int] :
identifier[dictget] = identifier[dict_] . identifier[get]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[if] identifier[HAVE_NUMPY] keyword[and] identifier[isinstance] ( identifier[key] , identifier[np] . identifier[ndarray] ):
keyword[yield] identifier[list] ( identifier[dict_take_gen] ( identifier[dict_] , identifier[key] ,* identifier[d] ))
keyword[else] :
keyword[yield] identifier[dictget] ( identifier[key] ,* identifier[d] ) | def dict_take_gen(dict_, keys, *d):
"""
generate multiple values from a dictionary
Args:
dict_ (dict):
keys (list):
Varargs:
d: if specified is default for key errors
CommandLine:
python -m utool.util_dict --test-dict_take_gen
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(dict_take_gen(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(dict_take_gen(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> print(result)
correctly got key error
"""
if isinstance(keys, six.string_types):
# hack for string keys that makes copy-past easier
keys = keys.split(', ') # depends on [control=['if'], data=[]]
if len(d) == 0:
# no default given throws key error
dictget = dict_.__getitem__ # depends on [control=['if'], data=[]]
elif len(d) == 1:
# default given does not throw key erro
dictget = dict_.get # depends on [control=['if'], data=[]]
else:
raise ValueError('len(d) must be 1 or 0')
for key in keys:
if HAVE_NUMPY and isinstance(key, np.ndarray):
# recursive call
yield list(dict_take_gen(dict_, key, *d)) # depends on [control=['if'], data=[]]
else:
yield dictget(key, *d) # depends on [control=['for'], data=['key']] |
def escape_for_cmd_exe(arg):
'''
Escape an argument string to be suitable to be passed to
cmd.exe on Windows
This method takes an argument that is expected to already be properly
escaped for the receiving program to be properly parsed. This argument
will be further escaped to pass the interpolation performed by cmd.exe
unchanged.
Any meta-characters will be escaped, removing the ability to e.g. use
redirects or variables.
Args:
arg (str): a single command line argument to escape for cmd.exe
Returns:
str: an escaped string suitable to be passed as a program argument to cmd.exe
'''
meta_chars = '()%!^"<>&|'
meta_re = re.compile('(' + '|'.join(re.escape(char) for char in list(meta_chars)) + ')')
meta_map = {char: "^{0}".format(char) for char in meta_chars}
def escape_meta_chars(m):
char = m.group(1)
return meta_map[char]
return meta_re.sub(escape_meta_chars, arg) | def function[escape_for_cmd_exe, parameter[arg]]:
constant[
Escape an argument string to be suitable to be passed to
cmd.exe on Windows
This method takes an argument that is expected to already be properly
escaped for the receiving program to be properly parsed. This argument
will be further escaped to pass the interpolation performed by cmd.exe
unchanged.
Any meta-characters will be escaped, removing the ability to e.g. use
redirects or variables.
Args:
arg (str): a single command line argument to escape for cmd.exe
Returns:
str: an escaped string suitable to be passed as a program argument to cmd.exe
]
variable[meta_chars] assign[=] constant[()%!^"<>&|]
variable[meta_re] assign[=] call[name[re].compile, parameter[binary_operation[binary_operation[constant[(] + call[constant[|].join, parameter[<ast.GeneratorExp object at 0x7da1b215ea70>]]] + constant[)]]]]
variable[meta_map] assign[=] <ast.DictComp object at 0x7da1b215c430>
def function[escape_meta_chars, parameter[m]]:
variable[char] assign[=] call[name[m].group, parameter[constant[1]]]
return[call[name[meta_map]][name[char]]]
return[call[name[meta_re].sub, parameter[name[escape_meta_chars], name[arg]]]] | keyword[def] identifier[escape_for_cmd_exe] ( identifier[arg] ):
literal[string]
identifier[meta_chars] = literal[string]
identifier[meta_re] = identifier[re] . identifier[compile] ( literal[string] + literal[string] . identifier[join] ( identifier[re] . identifier[escape] ( identifier[char] ) keyword[for] identifier[char] keyword[in] identifier[list] ( identifier[meta_chars] ))+ literal[string] )
identifier[meta_map] ={ identifier[char] : literal[string] . identifier[format] ( identifier[char] ) keyword[for] identifier[char] keyword[in] identifier[meta_chars] }
keyword[def] identifier[escape_meta_chars] ( identifier[m] ):
identifier[char] = identifier[m] . identifier[group] ( literal[int] )
keyword[return] identifier[meta_map] [ identifier[char] ]
keyword[return] identifier[meta_re] . identifier[sub] ( identifier[escape_meta_chars] , identifier[arg] ) | def escape_for_cmd_exe(arg):
"""
Escape an argument string to be suitable to be passed to
cmd.exe on Windows
This method takes an argument that is expected to already be properly
escaped for the receiving program to be properly parsed. This argument
will be further escaped to pass the interpolation performed by cmd.exe
unchanged.
Any meta-characters will be escaped, removing the ability to e.g. use
redirects or variables.
Args:
arg (str): a single command line argument to escape for cmd.exe
Returns:
str: an escaped string suitable to be passed as a program argument to cmd.exe
"""
meta_chars = '()%!^"<>&|'
meta_re = re.compile('(' + '|'.join((re.escape(char) for char in list(meta_chars))) + ')')
meta_map = {char: '^{0}'.format(char) for char in meta_chars}
def escape_meta_chars(m):
char = m.group(1)
return meta_map[char]
return meta_re.sub(escape_meta_chars, arg) |
def copy(self, keys = None):
"""
Return a copy of the segmentlistdict object. The return
value is a new object with a new offsets attribute, with
references to the original keys, and shallow copies of the
segment lists. Modifications made to the offset dictionary
or segmentlists in the object returned by this method will
not affect the original, but without using much memory
until such modifications are made. If the optional keys
argument is not None, then should be an iterable of keys
and only those segmentlists will be copied (KeyError is
raised if any of those keys are not in the
segmentlistdict).
More details. There are two "built-in" ways to create a
copy of a segmentlist object. The first is to initialize a
new object from an existing one with
>>> old = segmentlistdict()
>>> new = segmentlistdict(old)
This creates a copy of the dictionary, but not of its
contents. That is, this creates new with references to the
segmentlists in old, therefore changes to the segmentlists
in either new or old are reflected in both. The second
method is
>>> new = old.copy()
This creates a copy of the dictionary and of the
segmentlists, but with references to the segment objects in
the original segmentlists. Since segments are immutable,
this effectively creates a completely independent working
copy but without the memory cost of a full duplication of
the data.
"""
if keys is None:
keys = self
new = self.__class__()
for key in keys:
new[key] = _shallowcopy(self[key])
dict.__setitem__(new.offsets, key, self.offsets[key])
return new | def function[copy, parameter[self, keys]]:
constant[
Return a copy of the segmentlistdict object. The return
value is a new object with a new offsets attribute, with
references to the original keys, and shallow copies of the
segment lists. Modifications made to the offset dictionary
or segmentlists in the object returned by this method will
not affect the original, but without using much memory
until such modifications are made. If the optional keys
argument is not None, then should be an iterable of keys
and only those segmentlists will be copied (KeyError is
raised if any of those keys are not in the
segmentlistdict).
More details. There are two "built-in" ways to create a
copy of a segmentlist object. The first is to initialize a
new object from an existing one with
>>> old = segmentlistdict()
>>> new = segmentlistdict(old)
This creates a copy of the dictionary, but not of its
contents. That is, this creates new with references to the
segmentlists in old, therefore changes to the segmentlists
in either new or old are reflected in both. The second
method is
>>> new = old.copy()
This creates a copy of the dictionary and of the
segmentlists, but with references to the segment objects in
the original segmentlists. Since segments are immutable,
this effectively creates a completely independent working
copy but without the memory cost of a full duplication of
the data.
]
if compare[name[keys] is constant[None]] begin[:]
variable[keys] assign[=] name[self]
variable[new] assign[=] call[name[self].__class__, parameter[]]
for taget[name[key]] in starred[name[keys]] begin[:]
call[name[new]][name[key]] assign[=] call[name[_shallowcopy], parameter[call[name[self]][name[key]]]]
call[name[dict].__setitem__, parameter[name[new].offsets, name[key], call[name[self].offsets][name[key]]]]
return[name[new]] | keyword[def] identifier[copy] ( identifier[self] , identifier[keys] = keyword[None] ):
literal[string]
keyword[if] identifier[keys] keyword[is] keyword[None] :
identifier[keys] = identifier[self]
identifier[new] = identifier[self] . identifier[__class__] ()
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[new] [ identifier[key] ]= identifier[_shallowcopy] ( identifier[self] [ identifier[key] ])
identifier[dict] . identifier[__setitem__] ( identifier[new] . identifier[offsets] , identifier[key] , identifier[self] . identifier[offsets] [ identifier[key] ])
keyword[return] identifier[new] | def copy(self, keys=None):
"""
Return a copy of the segmentlistdict object. The return
value is a new object with a new offsets attribute, with
references to the original keys, and shallow copies of the
segment lists. Modifications made to the offset dictionary
or segmentlists in the object returned by this method will
not affect the original, but without using much memory
until such modifications are made. If the optional keys
argument is not None, then should be an iterable of keys
and only those segmentlists will be copied (KeyError is
raised if any of those keys are not in the
segmentlistdict).
More details. There are two "built-in" ways to create a
copy of a segmentlist object. The first is to initialize a
new object from an existing one with
>>> old = segmentlistdict()
>>> new = segmentlistdict(old)
This creates a copy of the dictionary, but not of its
contents. That is, this creates new with references to the
segmentlists in old, therefore changes to the segmentlists
in either new or old are reflected in both. The second
method is
>>> new = old.copy()
This creates a copy of the dictionary and of the
segmentlists, but with references to the segment objects in
the original segmentlists. Since segments are immutable,
this effectively creates a completely independent working
copy but without the memory cost of a full duplication of
the data.
"""
if keys is None:
keys = self # depends on [control=['if'], data=['keys']]
new = self.__class__()
for key in keys:
new[key] = _shallowcopy(self[key])
dict.__setitem__(new.offsets, key, self.offsets[key]) # depends on [control=['for'], data=['key']]
return new |
def put(
self,
id,
name,
description,
private,
runs_executable_tasks,
runs_docker_container_tasks,
runs_singularity_container_tasks,
active,
whitelists,
):
"""Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json()) | def function[put, parameter[self, id, name, description, private, runs_executable_tasks, runs_docker_container_tasks, runs_singularity_container_tasks, active, whitelists]]:
constant[Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
]
variable[request_url] assign[=] binary_operation[name[self]._client.base_api_url + call[name[self].detail_url.format, parameter[]]]
variable[data_to_put] assign[=] dictionary[[<ast.Constant object at 0x7da1b11d3160>, <ast.Constant object at 0x7da1b11d3a00>, <ast.Constant object at 0x7da1b11d3910>, <ast.Constant object at 0x7da1b11d3370>, <ast.Constant object at 0x7da1b11d3a90>, <ast.Constant object at 0x7da1b11d2bc0>, <ast.Constant object at 0x7da1b11d3970>, <ast.Constant object at 0x7da1b11d2ce0>], [<ast.Name object at 0x7da1b11d3760>, <ast.Name object at 0x7da1b11d3eb0>, <ast.Name object at 0x7da1b11d3460>, <ast.Name object at 0x7da1b11d3a30>, <ast.Name object at 0x7da1b11d35e0>, <ast.Name object at 0x7da1b11d2b90>, <ast.Name object at 0x7da1b11d3220>, <ast.Name object at 0x7da1b11d3670>]]
variable[response] assign[=] call[name[self]._client.session.put, parameter[name[request_url]]]
call[name[self].validate_request_success, parameter[]]
return[call[name[self].response_data_to_model_instance, parameter[call[name[response].json, parameter[]]]]] | keyword[def] identifier[put] (
identifier[self] ,
identifier[id] ,
identifier[name] ,
identifier[description] ,
identifier[private] ,
identifier[runs_executable_tasks] ,
identifier[runs_docker_container_tasks] ,
identifier[runs_singularity_container_tasks] ,
identifier[active] ,
identifier[whitelists] ,
):
literal[string]
identifier[request_url] = identifier[self] . identifier[_client] . identifier[base_api_url] + identifier[self] . identifier[detail_url] . identifier[format] ( identifier[id] = identifier[id] )
identifier[data_to_put] ={
literal[string] : identifier[name] ,
literal[string] : identifier[description] ,
literal[string] : identifier[private] ,
literal[string] : identifier[runs_executable_tasks] ,
literal[string] : identifier[runs_docker_container_tasks] ,
literal[string] : identifier[runs_singularity_container_tasks] ,
literal[string] : identifier[active] ,
literal[string] : identifier[whitelists] ,
}
identifier[response] = identifier[self] . identifier[_client] . identifier[session] . identifier[put] ( identifier[request_url] , identifier[data] = identifier[data_to_put] )
identifier[self] . identifier[validate_request_success] (
identifier[response_text] = identifier[response] . identifier[text] ,
identifier[request_url] = identifier[request_url] ,
identifier[status_code] = identifier[response] . identifier[status_code] ,
identifier[expected_status_code] = identifier[HTTP_200_OK] ,
)
keyword[return] identifier[self] . identifier[response_data_to_model_instance] ( identifier[response] . identifier[json] ()) | def put(self, id, name, description, private, runs_executable_tasks, runs_docker_container_tasks, runs_singularity_container_tasks, active, whitelists):
"""Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {'name': name, 'description': description, 'private': private, 'runs_executable_tasks': runs_executable_tasks, 'runs_docker_container_tasks': runs_docker_container_tasks, 'runs_singularity_container_tasks': runs_singularity_container_tasks, 'active': active, 'whitelists': whitelists}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json()) |
def generate_highlight_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to highlight a witness'
text with its matches in a result."""
parser = subparsers.add_parser(
'highlight', description=constants.HIGHLIGHT_DESCRIPTION,
epilog=constants.HIGHLIGHT_EPILOG, formatter_class=ParagraphFormatter,
help=constants.HIGHLIGHT_HELP)
parser.set_defaults(func=highlight_text)
utils.add_common_arguments(parser)
parser.add_argument('-m', '--minus-ngrams', metavar='NGRAMS',
help=constants.HIGHLIGHT_MINUS_NGRAMS_HELP)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-n', '--ngrams', action='append', metavar='NGRAMS',
help=constants.HIGHLIGHT_NGRAMS_HELP)
group.add_argument('-r', '--results', metavar='RESULTS',
help=constants.HIGHLIGHT_RESULTS_HELP)
parser.add_argument('-l', '--label', action='append', metavar='LABEL',
help=constants.HIGHLIGHT_LABEL_HELP)
utils.add_corpus_arguments(parser)
parser.add_argument('base_name', help=constants.HIGHLIGHT_BASE_NAME_HELP,
metavar='BASE_NAME')
parser.add_argument('output', metavar='OUTPUT',
help=constants.REPORT_OUTPUT_HELP) | def function[generate_highlight_subparser, parameter[subparsers]]:
constant[Adds a sub-command parser to `subparsers` to highlight a witness'
text with its matches in a result.]
variable[parser] assign[=] call[name[subparsers].add_parser, parameter[constant[highlight]]]
call[name[parser].set_defaults, parameter[]]
call[name[utils].add_common_arguments, parameter[name[parser]]]
call[name[parser].add_argument, parameter[constant[-m], constant[--minus-ngrams]]]
variable[group] assign[=] call[name[parser].add_mutually_exclusive_group, parameter[]]
call[name[group].add_argument, parameter[constant[-n], constant[--ngrams]]]
call[name[group].add_argument, parameter[constant[-r], constant[--results]]]
call[name[parser].add_argument, parameter[constant[-l], constant[--label]]]
call[name[utils].add_corpus_arguments, parameter[name[parser]]]
call[name[parser].add_argument, parameter[constant[base_name]]]
call[name[parser].add_argument, parameter[constant[output]]] | keyword[def] identifier[generate_highlight_subparser] ( identifier[subparsers] ):
literal[string]
identifier[parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[description] = identifier[constants] . identifier[HIGHLIGHT_DESCRIPTION] ,
identifier[epilog] = identifier[constants] . identifier[HIGHLIGHT_EPILOG] , identifier[formatter_class] = identifier[ParagraphFormatter] ,
identifier[help] = identifier[constants] . identifier[HIGHLIGHT_HELP] )
identifier[parser] . identifier[set_defaults] ( identifier[func] = identifier[highlight_text] )
identifier[utils] . identifier[add_common_arguments] ( identifier[parser] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = identifier[constants] . identifier[HIGHLIGHT_MINUS_NGRAMS_HELP] )
identifier[group] = identifier[parser] . identifier[add_mutually_exclusive_group] ( identifier[required] = keyword[True] )
identifier[group] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = identifier[constants] . identifier[HIGHLIGHT_NGRAMS_HELP] )
identifier[group] . identifier[add_argument] ( literal[string] , literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = identifier[constants] . identifier[HIGHLIGHT_RESULTS_HELP] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = identifier[constants] . identifier[HIGHLIGHT_LABEL_HELP] )
identifier[utils] . identifier[add_corpus_arguments] ( identifier[parser] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = identifier[constants] . identifier[HIGHLIGHT_BASE_NAME_HELP] ,
identifier[metavar] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = identifier[constants] . identifier[REPORT_OUTPUT_HELP] ) | def generate_highlight_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to highlight a witness'
text with its matches in a result."""
parser = subparsers.add_parser('highlight', description=constants.HIGHLIGHT_DESCRIPTION, epilog=constants.HIGHLIGHT_EPILOG, formatter_class=ParagraphFormatter, help=constants.HIGHLIGHT_HELP)
parser.set_defaults(func=highlight_text)
utils.add_common_arguments(parser)
parser.add_argument('-m', '--minus-ngrams', metavar='NGRAMS', help=constants.HIGHLIGHT_MINUS_NGRAMS_HELP)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-n', '--ngrams', action='append', metavar='NGRAMS', help=constants.HIGHLIGHT_NGRAMS_HELP)
group.add_argument('-r', '--results', metavar='RESULTS', help=constants.HIGHLIGHT_RESULTS_HELP)
parser.add_argument('-l', '--label', action='append', metavar='LABEL', help=constants.HIGHLIGHT_LABEL_HELP)
utils.add_corpus_arguments(parser)
parser.add_argument('base_name', help=constants.HIGHLIGHT_BASE_NAME_HELP, metavar='BASE_NAME')
parser.add_argument('output', metavar='OUTPUT', help=constants.REPORT_OUTPUT_HELP) |
def fromHexString(cls, value, internalFormat=False, prepend=None):
"""Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
"""
try:
value = SizedInteger(value, 16).setBitLength(len(value) * 4)
except ValueError:
raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1]))
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value | def function[fromHexString, parameter[cls, value, internalFormat, prepend]]:
constant[Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
]
<ast.Try object at 0x7da20c6aa740>
if compare[name[prepend] is_not constant[None]] begin[:]
variable[value] assign[=] call[call[name[SizedInteger], parameter[binary_operation[binary_operation[call[name[SizedInteger], parameter[name[prepend]]] <ast.LShift object at 0x7da2590d69e0> call[name[len], parameter[name[value]]]] <ast.BitOr object at 0x7da2590d6aa0> name[value]]]].setBitLength, parameter[binary_operation[call[name[len], parameter[name[prepend]]] + call[name[len], parameter[name[value]]]]]]
if <ast.UnaryOp object at 0x7da20c6ab670> begin[:]
variable[value] assign[=] call[name[cls], parameter[name[value]]]
return[name[value]] | keyword[def] identifier[fromHexString] ( identifier[cls] , identifier[value] , identifier[internalFormat] = keyword[False] , identifier[prepend] = keyword[None] ):
literal[string]
keyword[try] :
identifier[value] = identifier[SizedInteger] ( identifier[value] , literal[int] ). identifier[setBitLength] ( identifier[len] ( identifier[value] )* literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[error] . identifier[PyAsn1Error] ( literal[string] %( identifier[cls] . identifier[__name__] , identifier[sys] . identifier[exc_info] ()[ literal[int] ]))
keyword[if] identifier[prepend] keyword[is] keyword[not] keyword[None] :
identifier[value] = identifier[SizedInteger] (
( identifier[SizedInteger] ( identifier[prepend] )<< identifier[len] ( identifier[value] ))| identifier[value]
). identifier[setBitLength] ( identifier[len] ( identifier[prepend] )+ identifier[len] ( identifier[value] ))
keyword[if] keyword[not] identifier[internalFormat] :
identifier[value] = identifier[cls] ( identifier[value] )
keyword[return] identifier[value] | def fromHexString(cls, value, internalFormat=False, prepend=None):
"""Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
"""
try:
value = SizedInteger(value, 16).setBitLength(len(value) * 4) # depends on [control=['try'], data=[]]
except ValueError:
raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1])) # depends on [control=['except'], data=[]]
if prepend is not None:
value = SizedInteger(SizedInteger(prepend) << len(value) | value).setBitLength(len(prepend) + len(value)) # depends on [control=['if'], data=['prepend']]
if not internalFormat:
value = cls(value) # depends on [control=['if'], data=[]]
return value |
def query_nodes(self,
bel: Optional[str] = None,
type: Optional[str] = None,
namespace: Optional[str] = None,
name: Optional[str] = None,
) -> List[Node]:
"""Query nodes in the database.
:param bel: BEL term that describes the biological entity. e.g. ``p(HGNC:APP)``
:param type: Type of the biological entity. e.g. Protein
:param namespace: Namespace keyword that is used in BEL. e.g. HGNC
:param name: Name of the biological entity. e.g. APP
"""
q = self.session.query(Node)
if bel:
q = q.filter(Node.bel.contains(bel))
if type:
q = q.filter(Node.type == type)
if namespace or name:
q = q.join(NamespaceEntry)
if namespace:
q = q.join(Namespace).filter(Namespace.keyword.contains(namespace))
if name:
q = q.filter(NamespaceEntry.name.contains(name))
return q | def function[query_nodes, parameter[self, bel, type, namespace, name]]:
constant[Query nodes in the database.
:param bel: BEL term that describes the biological entity. e.g. ``p(HGNC:APP)``
:param type: Type of the biological entity. e.g. Protein
:param namespace: Namespace keyword that is used in BEL. e.g. HGNC
:param name: Name of the biological entity. e.g. APP
]
variable[q] assign[=] call[name[self].session.query, parameter[name[Node]]]
if name[bel] begin[:]
variable[q] assign[=] call[name[q].filter, parameter[call[name[Node].bel.contains, parameter[name[bel]]]]]
if name[type] begin[:]
variable[q] assign[=] call[name[q].filter, parameter[compare[name[Node].type equal[==] name[type]]]]
if <ast.BoolOp object at 0x7da20e960550> begin[:]
variable[q] assign[=] call[name[q].join, parameter[name[NamespaceEntry]]]
if name[namespace] begin[:]
variable[q] assign[=] call[call[name[q].join, parameter[name[Namespace]]].filter, parameter[call[name[Namespace].keyword.contains, parameter[name[namespace]]]]]
if name[name] begin[:]
variable[q] assign[=] call[name[q].filter, parameter[call[name[NamespaceEntry].name.contains, parameter[name[name]]]]]
return[name[q]] | keyword[def] identifier[query_nodes] ( identifier[self] ,
identifier[bel] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[type] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[namespace] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[name] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
)-> identifier[List] [ identifier[Node] ]:
literal[string]
identifier[q] = identifier[self] . identifier[session] . identifier[query] ( identifier[Node] )
keyword[if] identifier[bel] :
identifier[q] = identifier[q] . identifier[filter] ( identifier[Node] . identifier[bel] . identifier[contains] ( identifier[bel] ))
keyword[if] identifier[type] :
identifier[q] = identifier[q] . identifier[filter] ( identifier[Node] . identifier[type] == identifier[type] )
keyword[if] identifier[namespace] keyword[or] identifier[name] :
identifier[q] = identifier[q] . identifier[join] ( identifier[NamespaceEntry] )
keyword[if] identifier[namespace] :
identifier[q] = identifier[q] . identifier[join] ( identifier[Namespace] ). identifier[filter] ( identifier[Namespace] . identifier[keyword] . identifier[contains] ( identifier[namespace] ))
keyword[if] identifier[name] :
identifier[q] = identifier[q] . identifier[filter] ( identifier[NamespaceEntry] . identifier[name] . identifier[contains] ( identifier[name] ))
keyword[return] identifier[q] | def query_nodes(self, bel: Optional[str]=None, type: Optional[str]=None, namespace: Optional[str]=None, name: Optional[str]=None) -> List[Node]:
"""Query nodes in the database.
:param bel: BEL term that describes the biological entity. e.g. ``p(HGNC:APP)``
:param type: Type of the biological entity. e.g. Protein
:param namespace: Namespace keyword that is used in BEL. e.g. HGNC
:param name: Name of the biological entity. e.g. APP
"""
q = self.session.query(Node)
if bel:
q = q.filter(Node.bel.contains(bel)) # depends on [control=['if'], data=[]]
if type:
q = q.filter(Node.type == type) # depends on [control=['if'], data=[]]
if namespace or name:
q = q.join(NamespaceEntry)
if namespace:
q = q.join(Namespace).filter(Namespace.keyword.contains(namespace)) # depends on [control=['if'], data=[]]
if name:
q = q.filter(NamespaceEntry.name.contains(name)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return q |
def flatten_dist_egginfo_json(
source_dists, filename=DEFAULT_JSON, dep_keys=DEP_KEYS,
working_set=None):
"""
Flatten a distribution's egginfo json, with the depended keys to be
flattened.
Originally this was done for this:
Resolve a distribution's (dev)dependencies through the working set
and generate a flattened version package.json, returned as a dict,
from the resolved distributions.
Default working set is the one from pkg_resources.
The generated package.json dict is done by grabbing all package.json
metadata from all parent Python packages, starting from the highest
level and down to the lowest. The current distribution's
dependencies will be layered on top along with its other package
information. This has the effect of child packages overriding
node/npm dependencies which is by the design of this function. If
nested dependencies are desired, just rely on npm only for all
dependency management.
Flat is better than nested.
"""
working_set = working_set or default_working_set
obj = {}
# TODO figure out the best way to explicitly report back to caller
# how the keys came to be (from which dist). Perhaps create a
# detailed function based on this, retain this one to return the
# distilled results.
depends = {dep: {} for dep in dep_keys}
# Go from the earliest package down to the latest one, as we will
# flatten children's d(evD)ependencies on top of parent's.
for dist in source_dists:
obj = read_dist_egginfo_json(dist, filename)
if not obj:
continue
logger.debug("merging '%s' for required '%s'", filename, dist)
for dep in dep_keys:
depends[dep].update(obj.get(dep, {}))
if obj is None:
# top level object does not have egg-info defined
return depends
for dep in dep_keys:
# filtering out all the nulls.
obj[dep] = {k: v for k, v in depends[dep].items() if v is not None}
return obj | def function[flatten_dist_egginfo_json, parameter[source_dists, filename, dep_keys, working_set]]:
constant[
Flatten a distribution's egginfo json, with the depended keys to be
flattened.
Originally this was done for this:
Resolve a distribution's (dev)dependencies through the working set
and generate a flattened version package.json, returned as a dict,
from the resolved distributions.
Default working set is the one from pkg_resources.
The generated package.json dict is done by grabbing all package.json
metadata from all parent Python packages, starting from the highest
level and down to the lowest. The current distribution's
dependencies will be layered on top along with its other package
information. This has the effect of child packages overriding
node/npm dependencies which is by the design of this function. If
nested dependencies are desired, just rely on npm only for all
dependency management.
Flat is better than nested.
]
variable[working_set] assign[=] <ast.BoolOp object at 0x7da1b195fb20>
variable[obj] assign[=] dictionary[[], []]
variable[depends] assign[=] <ast.DictComp object at 0x7da1b195c640>
for taget[name[dist]] in starred[name[source_dists]] begin[:]
variable[obj] assign[=] call[name[read_dist_egginfo_json], parameter[name[dist], name[filename]]]
if <ast.UnaryOp object at 0x7da1b195c580> begin[:]
continue
call[name[logger].debug, parameter[constant[merging '%s' for required '%s'], name[filename], name[dist]]]
for taget[name[dep]] in starred[name[dep_keys]] begin[:]
call[call[name[depends]][name[dep]].update, parameter[call[name[obj].get, parameter[name[dep], dictionary[[], []]]]]]
if compare[name[obj] is constant[None]] begin[:]
return[name[depends]]
for taget[name[dep]] in starred[name[dep_keys]] begin[:]
call[name[obj]][name[dep]] assign[=] <ast.DictComp object at 0x7da1b195fa60>
return[name[obj]] | keyword[def] identifier[flatten_dist_egginfo_json] (
identifier[source_dists] , identifier[filename] = identifier[DEFAULT_JSON] , identifier[dep_keys] = identifier[DEP_KEYS] ,
identifier[working_set] = keyword[None] ):
literal[string]
identifier[working_set] = identifier[working_set] keyword[or] identifier[default_working_set]
identifier[obj] ={}
identifier[depends] ={ identifier[dep] :{} keyword[for] identifier[dep] keyword[in] identifier[dep_keys] }
keyword[for] identifier[dist] keyword[in] identifier[source_dists] :
identifier[obj] = identifier[read_dist_egginfo_json] ( identifier[dist] , identifier[filename] )
keyword[if] keyword[not] identifier[obj] :
keyword[continue]
identifier[logger] . identifier[debug] ( literal[string] , identifier[filename] , identifier[dist] )
keyword[for] identifier[dep] keyword[in] identifier[dep_keys] :
identifier[depends] [ identifier[dep] ]. identifier[update] ( identifier[obj] . identifier[get] ( identifier[dep] ,{}))
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[return] identifier[depends]
keyword[for] identifier[dep] keyword[in] identifier[dep_keys] :
identifier[obj] [ identifier[dep] ]={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[depends] [ identifier[dep] ]. identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] }
keyword[return] identifier[obj] | def flatten_dist_egginfo_json(source_dists, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None):
"""
Flatten a distribution's egginfo json, with the depended keys to be
flattened.
Originally this was done for this:
Resolve a distribution's (dev)dependencies through the working set
and generate a flattened version package.json, returned as a dict,
from the resolved distributions.
Default working set is the one from pkg_resources.
The generated package.json dict is done by grabbing all package.json
metadata from all parent Python packages, starting from the highest
level and down to the lowest. The current distribution's
dependencies will be layered on top along with its other package
information. This has the effect of child packages overriding
node/npm dependencies which is by the design of this function. If
nested dependencies are desired, just rely on npm only for all
dependency management.
Flat is better than nested.
"""
working_set = working_set or default_working_set
obj = {}
# TODO figure out the best way to explicitly report back to caller
# how the keys came to be (from which dist). Perhaps create a
# detailed function based on this, retain this one to return the
# distilled results.
depends = {dep: {} for dep in dep_keys}
# Go from the earliest package down to the latest one, as we will
# flatten children's d(evD)ependencies on top of parent's.
for dist in source_dists:
obj = read_dist_egginfo_json(dist, filename)
if not obj:
continue # depends on [control=['if'], data=[]]
logger.debug("merging '%s' for required '%s'", filename, dist)
for dep in dep_keys:
depends[dep].update(obj.get(dep, {})) # depends on [control=['for'], data=['dep']] # depends on [control=['for'], data=['dist']]
if obj is None:
# top level object does not have egg-info defined
return depends # depends on [control=['if'], data=[]]
for dep in dep_keys:
# filtering out all the nulls.
obj[dep] = {k: v for (k, v) in depends[dep].items() if v is not None} # depends on [control=['for'], data=['dep']]
return obj |
def add_section(self, section):
"""You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
"""
if not issubclass(section.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(section.__class__))
self.sections[section.name] = section
return section | def function[add_section, parameter[self, section]]:
constant[You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
]
if <ast.UnaryOp object at 0x7da18f09c070> begin[:]
<ast.Raise object at 0x7da18f09ceb0>
call[name[self].sections][name[section].name] assign[=] name[section]
return[name[section]] | keyword[def] identifier[add_section] ( identifier[self] , identifier[section] ):
literal[string]
keyword[if] keyword[not] identifier[issubclass] ( identifier[section] . identifier[__class__] , identifier[SubSection] ):
keyword[raise] identifier[TypeError] ( literal[string] + identifier[str] ( identifier[section] . identifier[__class__] ))
identifier[self] . identifier[sections] [ identifier[section] . identifier[name] ]= identifier[section]
keyword[return] identifier[section] | def add_section(self, section):
"""You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
"""
if not issubclass(section.__class__, SubSection):
raise TypeError('Argument should be a subclass of SubSection, not :' + str(section.__class__)) # depends on [control=['if'], data=[]]
self.sections[section.name] = section
return section |
def score_and_rank_for_in(self, leaderboard_name, member):
'''
Retrieve the score and rank for a member in the named leaderboard.
@param leaderboard_name [String]Name of the leaderboard.
@param member [String] Member name.
@return the score and rank for a member in the named leaderboard as a Hash.
'''
pipeline = self.redis_connection.pipeline()
pipeline.zscore(leaderboard_name, member)
if self.order == self.ASC:
pipeline.zrank(leaderboard_name, member)
else:
pipeline.zrevrank(leaderboard_name, member)
responses = pipeline.execute()
if responses[0] is not None:
responses[0] = float(responses[0])
if self.order == self.ASC:
try:
responses[1] = self.redis_connection.zcount(
leaderboard_name, '-inf', "(%s" % str(float(responses[0]))) + 1
except:
responses[1] = None
else:
try:
responses[1] = self.redis_connection.zcount(
leaderboard_name, "(%s" % str(float(responses[0])), '+inf') + 1
except:
responses[1] = None
return {
self.MEMBER_KEY: member,
self.SCORE_KEY: responses[0],
self.RANK_KEY: responses[1]
} | def function[score_and_rank_for_in, parameter[self, leaderboard_name, member]]:
constant[
Retrieve the score and rank for a member in the named leaderboard.
@param leaderboard_name [String]Name of the leaderboard.
@param member [String] Member name.
@return the score and rank for a member in the named leaderboard as a Hash.
]
variable[pipeline] assign[=] call[name[self].redis_connection.pipeline, parameter[]]
call[name[pipeline].zscore, parameter[name[leaderboard_name], name[member]]]
if compare[name[self].order equal[==] name[self].ASC] begin[:]
call[name[pipeline].zrank, parameter[name[leaderboard_name], name[member]]]
variable[responses] assign[=] call[name[pipeline].execute, parameter[]]
if compare[call[name[responses]][constant[0]] is_not constant[None]] begin[:]
call[name[responses]][constant[0]] assign[=] call[name[float], parameter[call[name[responses]][constant[0]]]]
if compare[name[self].order equal[==] name[self].ASC] begin[:]
<ast.Try object at 0x7da1b04b5c00>
return[dictionary[[<ast.Attribute object at 0x7da18eb57d60>, <ast.Attribute object at 0x7da18eb550c0>, <ast.Attribute object at 0x7da18eb57820>], [<ast.Name object at 0x7da18eb55750>, <ast.Subscript object at 0x7da18eb54c40>, <ast.Subscript object at 0x7da18eb54be0>]]] | keyword[def] identifier[score_and_rank_for_in] ( identifier[self] , identifier[leaderboard_name] , identifier[member] ):
literal[string]
identifier[pipeline] = identifier[self] . identifier[redis_connection] . identifier[pipeline] ()
identifier[pipeline] . identifier[zscore] ( identifier[leaderboard_name] , identifier[member] )
keyword[if] identifier[self] . identifier[order] == identifier[self] . identifier[ASC] :
identifier[pipeline] . identifier[zrank] ( identifier[leaderboard_name] , identifier[member] )
keyword[else] :
identifier[pipeline] . identifier[zrevrank] ( identifier[leaderboard_name] , identifier[member] )
identifier[responses] = identifier[pipeline] . identifier[execute] ()
keyword[if] identifier[responses] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
identifier[responses] [ literal[int] ]= identifier[float] ( identifier[responses] [ literal[int] ])
keyword[if] identifier[self] . identifier[order] == identifier[self] . identifier[ASC] :
keyword[try] :
identifier[responses] [ literal[int] ]= identifier[self] . identifier[redis_connection] . identifier[zcount] (
identifier[leaderboard_name] , literal[string] , literal[string] % identifier[str] ( identifier[float] ( identifier[responses] [ literal[int] ])))+ literal[int]
keyword[except] :
identifier[responses] [ literal[int] ]= keyword[None]
keyword[else] :
keyword[try] :
identifier[responses] [ literal[int] ]= identifier[self] . identifier[redis_connection] . identifier[zcount] (
identifier[leaderboard_name] , literal[string] % identifier[str] ( identifier[float] ( identifier[responses] [ literal[int] ])), literal[string] )+ literal[int]
keyword[except] :
identifier[responses] [ literal[int] ]= keyword[None]
keyword[return] {
identifier[self] . identifier[MEMBER_KEY] : identifier[member] ,
identifier[self] . identifier[SCORE_KEY] : identifier[responses] [ literal[int] ],
identifier[self] . identifier[RANK_KEY] : identifier[responses] [ literal[int] ]
} | def score_and_rank_for_in(self, leaderboard_name, member):
"""
Retrieve the score and rank for a member in the named leaderboard.
@param leaderboard_name [String]Name of the leaderboard.
@param member [String] Member name.
@return the score and rank for a member in the named leaderboard as a Hash.
"""
pipeline = self.redis_connection.pipeline()
pipeline.zscore(leaderboard_name, member)
if self.order == self.ASC:
pipeline.zrank(leaderboard_name, member) # depends on [control=['if'], data=[]]
else:
pipeline.zrevrank(leaderboard_name, member)
responses = pipeline.execute()
if responses[0] is not None:
responses[0] = float(responses[0]) # depends on [control=['if'], data=[]]
if self.order == self.ASC:
try:
responses[1] = self.redis_connection.zcount(leaderboard_name, '-inf', '(%s' % str(float(responses[0]))) + 1 # depends on [control=['try'], data=[]]
except:
responses[1] = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
responses[1] = self.redis_connection.zcount(leaderboard_name, '(%s' % str(float(responses[0])), '+inf') + 1 # depends on [control=['try'], data=[]]
except:
responses[1] = None # depends on [control=['except'], data=[]]
return {self.MEMBER_KEY: member, self.SCORE_KEY: responses[0], self.RANK_KEY: responses[1]} |
def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
"""
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout() | def function[height_max, parameter[self, height_max]]:
constant[Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
]
if compare[name[height_max] is constant[None]] begin[:]
call[name[self]._height_limits][constant[1]] assign[=] constant[None]
return[None]
variable[height_max] assign[=] call[name[float], parameter[name[height_max]]]
assert[compare[constant[0] less_or_equal[<=] name[self].height_min]]
call[name[self]._height_limits][constant[1]] assign[=] name[height_max]
call[name[self]._update_layout, parameter[]] | keyword[def] identifier[height_max] ( identifier[self] , identifier[height_max] ):
literal[string]
keyword[if] identifier[height_max] keyword[is] keyword[None] :
identifier[self] . identifier[_height_limits] [ literal[int] ]= keyword[None]
keyword[return]
identifier[height_max] = identifier[float] ( identifier[height_max] )
keyword[assert] ( literal[int] <= identifier[self] . identifier[height_min] <= identifier[height_max] )
identifier[self] . identifier[_height_limits] [ literal[int] ]= identifier[height_max]
identifier[self] . identifier[_update_layout] () | def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
"""
if height_max is None:
self._height_limits[1] = None
return # depends on [control=['if'], data=[]]
height_max = float(height_max)
assert 0 <= self.height_min <= height_max
self._height_limits[1] = height_max
self._update_layout() |
def __processUsers(self):
"""Process users of the queue."""
while self.__usersToProccess.empty() and not self.__end:
pass
while not self.__end or not self.__usersToProccess.empty():
self.__lockGetUser.acquire()
try:
new_user = self.__usersToProccess.get(False)
except Empty:
self.__lockGetUser.release()
return
else:
self.__lockGetUser.release()
self.__addUser(new_user)
self.__logger.info("__processUsers:" +
str(self.__usersToProccess.qsize()) +
" users to process") | def function[__processUsers, parameter[self]]:
constant[Process users of the queue.]
while <ast.BoolOp object at 0x7da1b0a045b0> begin[:]
pass
while <ast.BoolOp object at 0x7da1b0a04190> begin[:]
call[name[self].__lockGetUser.acquire, parameter[]]
<ast.Try object at 0x7da1b0a04400> | keyword[def] identifier[__processUsers] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[__usersToProccess] . identifier[empty] () keyword[and] keyword[not] identifier[self] . identifier[__end] :
keyword[pass]
keyword[while] keyword[not] identifier[self] . identifier[__end] keyword[or] keyword[not] identifier[self] . identifier[__usersToProccess] . identifier[empty] ():
identifier[self] . identifier[__lockGetUser] . identifier[acquire] ()
keyword[try] :
identifier[new_user] = identifier[self] . identifier[__usersToProccess] . identifier[get] ( keyword[False] )
keyword[except] identifier[Empty] :
identifier[self] . identifier[__lockGetUser] . identifier[release] ()
keyword[return]
keyword[else] :
identifier[self] . identifier[__lockGetUser] . identifier[release] ()
identifier[self] . identifier[__addUser] ( identifier[new_user] )
identifier[self] . identifier[__logger] . identifier[info] ( literal[string] +
identifier[str] ( identifier[self] . identifier[__usersToProccess] . identifier[qsize] ())+
literal[string] ) | def __processUsers(self):
"""Process users of the queue."""
while self.__usersToProccess.empty() and (not self.__end):
pass # depends on [control=['while'], data=[]]
while not self.__end or not self.__usersToProccess.empty():
self.__lockGetUser.acquire()
try:
new_user = self.__usersToProccess.get(False) # depends on [control=['try'], data=[]]
except Empty:
self.__lockGetUser.release()
return # depends on [control=['except'], data=[]]
else:
self.__lockGetUser.release()
self.__addUser(new_user)
self.__logger.info('__processUsers:' + str(self.__usersToProccess.qsize()) + ' users to process') # depends on [control=['while'], data=[]] |
def spawn_antigen_predictors(job, transgened_files, phlat_files, univ_options, mhc_options):
"""
Based on the number of alleles obtained from node 14, this module will spawn callers to predict
MHCI:peptide and MHCII:peptide binding on the peptide files from node 17. Once all MHC:peptide
predictions are made, merge them via a follow-on job.
ARGUMENTS
1. transgened_files: REFER RETURN VALUE of run_transgene()
2. phlat_files: REFER RETURN VALUE of merge_phlat_calls()
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. mhc_options: Dict of dicts of parameters specific to mhci and mhcii
respectively
mhc_options
|- 'mhci'
| |- 'method_file': <JSid for json file containing data
| | linking alleles, peptide lengths, and
| | prediction methods>
| +- 'pred': String describing prediction method to use
+- 'mhcii'
|- 'method_file': <JSid for json file containing data
| linking alleles and prediction methods>
+- 'pred': String describing prediction method to use
RETURN VALUES
1. tuple of (mhci_preds, mhcii_preds)
mhci_preds: Dict of return value from running predictions on a given
mhc for all peptides of length 9 and 10.
mhci_preds
|- <MHC molecule 1>_9_mer.faa: <PromisedJobReturnValue>
|- <MHC molecule 1>_10_mer.faa: <PromisedJobReturnValue>
|
..
+- <MHC molecule n>_10_mer.faa: <PromisedJobReturnValue>
mhcii_preds: Dict of return value from running predictions on a given
mhc for all peptides of length 15.
mhci_preds
|- <MHC molecule 1>_15_mer.faa: <PromisedJobReturnValue>
|
..
+- <MHC molecule n>_15_mer.faa: <PromisedJobReturnValue>
This module corresponds to node 18 on the tree
"""
job.fileStore.logToMaster('Running spawn_anti on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
mhci_options, mhcii_options = mhc_options
pept_files = {
'9_mer.faa': transgened_files['transgened_tumor_9_mer_snpeffed.faa'],
'10_mer.faa': transgened_files['transgened_tumor_10_mer_snpeffed.faa'],
'15_mer.faa': transgened_files['transgened_tumor_15_mer_snpeffed.faa']}
input_files = {
'mhci_alleles.list': phlat_files['mhci_alleles.list'],
'mhcii_alleles.list': phlat_files['mhcii_alleles.list'],
'mhci_restrictions.list': mhci_options['method_file'],
'mhcii_restrictions.list': mhcii_options['method_file']}
input_files = get_files_from_filestore(job, input_files, work_dir)
# pept_files = get_files_from_filestore(job, pept_files, work_dir)
mhci_alleles, mhcii_alleles = [], []
with open(input_files['mhci_alleles.list'], 'r') as mhci_file:
for line in mhci_file:
mhci_alleles.append(line.strip())
with open(input_files['mhcii_alleles.list'], 'r') as mhcii_file:
for line in mhcii_file:
mhcii_alleles.append(line.strip())
# This file contains the list of allele:pept length combinations supported
# by each prediction type.
with open(input_files['mhci_restrictions.list'], 'r') as restfile:
mhci_restrictions = json.load(restfile)
with open(input_files['mhcii_restrictions.list'], 'r') as restfile:
mhcii_restrictions = json.load(restfile)
# Make a regexp to convert non alphanumeric characters in HLA names to _
strip_allele_re = re.compile('[^A-Z0-9]')
# For each mhci allele:peptfile combination, spawn a job and store the job handle in the dict.
# Then do the same for mhcii
mhci_preds, mhcii_preds = {}, {}
for allele in mhci_alleles:
stripped_allele = re.sub(strip_allele_re, '_', allele)
for peptfile in ['9_mer.faa', '10_mer.faa']:
peplen = peptfile.split('_')[0]
# Ensure that the allele is among the list of accepted alleles
try:
if not mhci_restrictions[allele][peplen]:
continue
except KeyError:
continue
predfile = ''.join([stripped_allele, '_', peptfile[:-4], '_mer.pred'])
mhci_preds[predfile] = job.addChildJobFn(predict_mhci_binding, pept_files[peptfile],
allele, peplen, univ_options,
mhci_options, disk='10G').rv()
for allele in mhcii_alleles:
stripped_allele = re.sub(strip_allele_re, '_', allele)
predfile = ''.join([stripped_allele, '_15_mer.pred'])
if allele not in mhcii_restrictions[mhcii_options['pred']]:
continue
mhcii_preds[predfile] = job.addChildJobFn(predict_mhcii_binding, pept_files['15_mer.faa'],
allele, univ_options, mhcii_options,
disk='10G').rv()
return mhci_preds, mhcii_preds | def function[spawn_antigen_predictors, parameter[job, transgened_files, phlat_files, univ_options, mhc_options]]:
constant[
Based on the number of alleles obtained from node 14, this module will spawn callers to predict
MHCI:peptide and MHCII:peptide binding on the peptide files from node 17. Once all MHC:peptide
predictions are made, merge them via a follow-on job.
ARGUMENTS
1. transgened_files: REFER RETURN VALUE of run_transgene()
2. phlat_files: REFER RETURN VALUE of merge_phlat_calls()
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. mhc_options: Dict of dicts of parameters specific to mhci and mhcii
respectively
mhc_options
|- 'mhci'
| |- 'method_file': <JSid for json file containing data
| | linking alleles, peptide lengths, and
| | prediction methods>
| +- 'pred': String describing prediction method to use
+- 'mhcii'
|- 'method_file': <JSid for json file containing data
| linking alleles and prediction methods>
+- 'pred': String describing prediction method to use
RETURN VALUES
1. tuple of (mhci_preds, mhcii_preds)
mhci_preds: Dict of return value from running predictions on a given
mhc for all peptides of length 9 and 10.
mhci_preds
|- <MHC molecule 1>_9_mer.faa: <PromisedJobReturnValue>
|- <MHC molecule 1>_10_mer.faa: <PromisedJobReturnValue>
|
..
+- <MHC molecule n>_10_mer.faa: <PromisedJobReturnValue>
mhcii_preds: Dict of return value from running predictions on a given
mhc for all peptides of length 15.
mhci_preds
|- <MHC molecule 1>_15_mer.faa: <PromisedJobReturnValue>
|
..
+- <MHC molecule n>_15_mer.faa: <PromisedJobReturnValue>
This module corresponds to node 18 on the tree
]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Running spawn_anti on %s] <ast.Mod object at 0x7da2590d6920> call[name[univ_options]][constant[patient]]]]]
variable[work_dir] assign[=] call[name[job].fileStore.getLocalTempDir, parameter[]]
<ast.Tuple object at 0x7da2054a6f50> assign[=] name[mhc_options]
variable[pept_files] assign[=] dictionary[[<ast.Constant object at 0x7da2054a63b0>, <ast.Constant object at 0x7da2054a62c0>, <ast.Constant object at 0x7da18f58d090>], [<ast.Subscript object at 0x7da18f58e500>, <ast.Subscript object at 0x7da18f58dff0>, <ast.Subscript object at 0x7da18f58cfd0>]]
variable[input_files] assign[=] dictionary[[<ast.Constant object at 0x7da18f58e590>, <ast.Constant object at 0x7da18f58d540>, <ast.Constant object at 0x7da18f58c610>, <ast.Constant object at 0x7da18f58ea70>], [<ast.Subscript object at 0x7da18f58f3d0>, <ast.Subscript object at 0x7da18f58d360>, <ast.Subscript object at 0x7da18f58d630>, <ast.Subscript object at 0x7da18f58c9a0>]]
variable[input_files] assign[=] call[name[get_files_from_filestore], parameter[name[job], name[input_files], name[work_dir]]]
<ast.Tuple object at 0x7da1b24a0490> assign[=] tuple[[<ast.List object at 0x7da1b24a07c0>, <ast.List object at 0x7da1b24a0520>]]
with call[name[open], parameter[call[name[input_files]][constant[mhci_alleles.list]], constant[r]]] begin[:]
for taget[name[line]] in starred[name[mhci_file]] begin[:]
call[name[mhci_alleles].append, parameter[call[name[line].strip, parameter[]]]]
with call[name[open], parameter[call[name[input_files]][constant[mhcii_alleles.list]], constant[r]]] begin[:]
for taget[name[line]] in starred[name[mhcii_file]] begin[:]
call[name[mhcii_alleles].append, parameter[call[name[line].strip, parameter[]]]]
with call[name[open], parameter[call[name[input_files]][constant[mhci_restrictions.list]], constant[r]]] begin[:]
variable[mhci_restrictions] assign[=] call[name[json].load, parameter[name[restfile]]]
with call[name[open], parameter[call[name[input_files]][constant[mhcii_restrictions.list]], constant[r]]] begin[:]
variable[mhcii_restrictions] assign[=] call[name[json].load, parameter[name[restfile]]]
variable[strip_allele_re] assign[=] call[name[re].compile, parameter[constant[[^A-Z0-9]]]]
<ast.Tuple object at 0x7da18f723ca0> assign[=] tuple[[<ast.Dict object at 0x7da18f7204c0>, <ast.Dict object at 0x7da18f722bc0>]]
for taget[name[allele]] in starred[name[mhci_alleles]] begin[:]
variable[stripped_allele] assign[=] call[name[re].sub, parameter[name[strip_allele_re], constant[_], name[allele]]]
for taget[name[peptfile]] in starred[list[[<ast.Constant object at 0x7da18f723940>, <ast.Constant object at 0x7da18f721c60>]]] begin[:]
variable[peplen] assign[=] call[call[name[peptfile].split, parameter[constant[_]]]][constant[0]]
<ast.Try object at 0x7da18f720520>
variable[predfile] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da18f720ca0>, <ast.Constant object at 0x7da18f721180>, <ast.Subscript object at 0x7da18f722f50>, <ast.Constant object at 0x7da18f723760>]]]]
call[name[mhci_preds]][name[predfile]] assign[=] call[call[name[job].addChildJobFn, parameter[name[predict_mhci_binding], call[name[pept_files]][name[peptfile]], name[allele], name[peplen], name[univ_options], name[mhci_options]]].rv, parameter[]]
for taget[name[allele]] in starred[name[mhcii_alleles]] begin[:]
variable[stripped_allele] assign[=] call[name[re].sub, parameter[name[strip_allele_re], constant[_], name[allele]]]
variable[predfile] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da2044c3010>, <ast.Constant object at 0x7da2044c2800>]]]]
if compare[name[allele] <ast.NotIn object at 0x7da2590d7190> call[name[mhcii_restrictions]][call[name[mhcii_options]][constant[pred]]]] begin[:]
continue
call[name[mhcii_preds]][name[predfile]] assign[=] call[call[name[job].addChildJobFn, parameter[name[predict_mhcii_binding], call[name[pept_files]][constant[15_mer.faa]], name[allele], name[univ_options], name[mhcii_options]]].rv, parameter[]]
return[tuple[[<ast.Name object at 0x7da2044c1de0>, <ast.Name object at 0x7da2044c1000>]]] | keyword[def] identifier[spawn_antigen_predictors] ( identifier[job] , identifier[transgened_files] , identifier[phlat_files] , identifier[univ_options] , identifier[mhc_options] ):
literal[string]
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] % identifier[univ_options] [ literal[string] ])
identifier[work_dir] = identifier[job] . identifier[fileStore] . identifier[getLocalTempDir] ()
identifier[mhci_options] , identifier[mhcii_options] = identifier[mhc_options]
identifier[pept_files] ={
literal[string] : identifier[transgened_files] [ literal[string] ],
literal[string] : identifier[transgened_files] [ literal[string] ],
literal[string] : identifier[transgened_files] [ literal[string] ]}
identifier[input_files] ={
literal[string] : identifier[phlat_files] [ literal[string] ],
literal[string] : identifier[phlat_files] [ literal[string] ],
literal[string] : identifier[mhci_options] [ literal[string] ],
literal[string] : identifier[mhcii_options] [ literal[string] ]}
identifier[input_files] = identifier[get_files_from_filestore] ( identifier[job] , identifier[input_files] , identifier[work_dir] )
identifier[mhci_alleles] , identifier[mhcii_alleles] =[],[]
keyword[with] identifier[open] ( identifier[input_files] [ literal[string] ], literal[string] ) keyword[as] identifier[mhci_file] :
keyword[for] identifier[line] keyword[in] identifier[mhci_file] :
identifier[mhci_alleles] . identifier[append] ( identifier[line] . identifier[strip] ())
keyword[with] identifier[open] ( identifier[input_files] [ literal[string] ], literal[string] ) keyword[as] identifier[mhcii_file] :
keyword[for] identifier[line] keyword[in] identifier[mhcii_file] :
identifier[mhcii_alleles] . identifier[append] ( identifier[line] . identifier[strip] ())
keyword[with] identifier[open] ( identifier[input_files] [ literal[string] ], literal[string] ) keyword[as] identifier[restfile] :
identifier[mhci_restrictions] = identifier[json] . identifier[load] ( identifier[restfile] )
keyword[with] identifier[open] ( identifier[input_files] [ literal[string] ], literal[string] ) keyword[as] identifier[restfile] :
identifier[mhcii_restrictions] = identifier[json] . identifier[load] ( identifier[restfile] )
identifier[strip_allele_re] = identifier[re] . identifier[compile] ( literal[string] )
identifier[mhci_preds] , identifier[mhcii_preds] ={},{}
keyword[for] identifier[allele] keyword[in] identifier[mhci_alleles] :
identifier[stripped_allele] = identifier[re] . identifier[sub] ( identifier[strip_allele_re] , literal[string] , identifier[allele] )
keyword[for] identifier[peptfile] keyword[in] [ literal[string] , literal[string] ]:
identifier[peplen] = identifier[peptfile] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[try] :
keyword[if] keyword[not] identifier[mhci_restrictions] [ identifier[allele] ][ identifier[peplen] ]:
keyword[continue]
keyword[except] identifier[KeyError] :
keyword[continue]
identifier[predfile] = literal[string] . identifier[join] ([ identifier[stripped_allele] , literal[string] , identifier[peptfile] [:- literal[int] ], literal[string] ])
identifier[mhci_preds] [ identifier[predfile] ]= identifier[job] . identifier[addChildJobFn] ( identifier[predict_mhci_binding] , identifier[pept_files] [ identifier[peptfile] ],
identifier[allele] , identifier[peplen] , identifier[univ_options] ,
identifier[mhci_options] , identifier[disk] = literal[string] ). identifier[rv] ()
keyword[for] identifier[allele] keyword[in] identifier[mhcii_alleles] :
identifier[stripped_allele] = identifier[re] . identifier[sub] ( identifier[strip_allele_re] , literal[string] , identifier[allele] )
identifier[predfile] = literal[string] . identifier[join] ([ identifier[stripped_allele] , literal[string] ])
keyword[if] identifier[allele] keyword[not] keyword[in] identifier[mhcii_restrictions] [ identifier[mhcii_options] [ literal[string] ]]:
keyword[continue]
identifier[mhcii_preds] [ identifier[predfile] ]= identifier[job] . identifier[addChildJobFn] ( identifier[predict_mhcii_binding] , identifier[pept_files] [ literal[string] ],
identifier[allele] , identifier[univ_options] , identifier[mhcii_options] ,
identifier[disk] = literal[string] ). identifier[rv] ()
keyword[return] identifier[mhci_preds] , identifier[mhcii_preds] | def spawn_antigen_predictors(job, transgened_files, phlat_files, univ_options, mhc_options):
"""
Based on the number of alleles obtained from node 14, this module will spawn callers to predict
MHCI:peptide and MHCII:peptide binding on the peptide files from node 17. Once all MHC:peptide
predictions are made, merge them via a follow-on job.
ARGUMENTS
1. transgened_files: REFER RETURN VALUE of run_transgene()
2. phlat_files: REFER RETURN VALUE of merge_phlat_calls()
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. mhc_options: Dict of dicts of parameters specific to mhci and mhcii
respectively
mhc_options
|- 'mhci'
| |- 'method_file': <JSid for json file containing data
| | linking alleles, peptide lengths, and
| | prediction methods>
| +- 'pred': String describing prediction method to use
+- 'mhcii'
|- 'method_file': <JSid for json file containing data
| linking alleles and prediction methods>
+- 'pred': String describing prediction method to use
RETURN VALUES
1. tuple of (mhci_preds, mhcii_preds)
mhci_preds: Dict of return value from running predictions on a given
mhc for all peptides of length 9 and 10.
mhci_preds
|- <MHC molecule 1>_9_mer.faa: <PromisedJobReturnValue>
|- <MHC molecule 1>_10_mer.faa: <PromisedJobReturnValue>
|
..
+- <MHC molecule n>_10_mer.faa: <PromisedJobReturnValue>
mhcii_preds: Dict of return value from running predictions on a given
mhc for all peptides of length 15.
mhci_preds
|- <MHC molecule 1>_15_mer.faa: <PromisedJobReturnValue>
|
..
+- <MHC molecule n>_15_mer.faa: <PromisedJobReturnValue>
This module corresponds to node 18 on the tree
"""
job.fileStore.logToMaster('Running spawn_anti on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
(mhci_options, mhcii_options) = mhc_options
pept_files = {'9_mer.faa': transgened_files['transgened_tumor_9_mer_snpeffed.faa'], '10_mer.faa': transgened_files['transgened_tumor_10_mer_snpeffed.faa'], '15_mer.faa': transgened_files['transgened_tumor_15_mer_snpeffed.faa']}
input_files = {'mhci_alleles.list': phlat_files['mhci_alleles.list'], 'mhcii_alleles.list': phlat_files['mhcii_alleles.list'], 'mhci_restrictions.list': mhci_options['method_file'], 'mhcii_restrictions.list': mhcii_options['method_file']}
input_files = get_files_from_filestore(job, input_files, work_dir)
# pept_files = get_files_from_filestore(job, pept_files, work_dir)
(mhci_alleles, mhcii_alleles) = ([], [])
with open(input_files['mhci_alleles.list'], 'r') as mhci_file:
for line in mhci_file:
mhci_alleles.append(line.strip()) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['mhci_file']]
with open(input_files['mhcii_alleles.list'], 'r') as mhcii_file:
for line in mhcii_file:
mhcii_alleles.append(line.strip()) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['mhcii_file']]
# This file contains the list of allele:pept length combinations supported
# by each prediction type.
with open(input_files['mhci_restrictions.list'], 'r') as restfile:
mhci_restrictions = json.load(restfile) # depends on [control=['with'], data=['restfile']]
with open(input_files['mhcii_restrictions.list'], 'r') as restfile:
mhcii_restrictions = json.load(restfile) # depends on [control=['with'], data=['restfile']]
# Make a regexp to convert non alphanumeric characters in HLA names to _
strip_allele_re = re.compile('[^A-Z0-9]')
# For each mhci allele:peptfile combination, spawn a job and store the job handle in the dict.
# Then do the same for mhcii
(mhci_preds, mhcii_preds) = ({}, {})
for allele in mhci_alleles:
stripped_allele = re.sub(strip_allele_re, '_', allele)
for peptfile in ['9_mer.faa', '10_mer.faa']:
peplen = peptfile.split('_')[0]
# Ensure that the allele is among the list of accepted alleles
try:
if not mhci_restrictions[allele][peplen]:
continue # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]]
predfile = ''.join([stripped_allele, '_', peptfile[:-4], '_mer.pred'])
mhci_preds[predfile] = job.addChildJobFn(predict_mhci_binding, pept_files[peptfile], allele, peplen, univ_options, mhci_options, disk='10G').rv() # depends on [control=['for'], data=['peptfile']] # depends on [control=['for'], data=['allele']]
for allele in mhcii_alleles:
stripped_allele = re.sub(strip_allele_re, '_', allele)
predfile = ''.join([stripped_allele, '_15_mer.pred'])
if allele not in mhcii_restrictions[mhcii_options['pred']]:
continue # depends on [control=['if'], data=[]]
mhcii_preds[predfile] = job.addChildJobFn(predict_mhcii_binding, pept_files['15_mer.faa'], allele, univ_options, mhcii_options, disk='10G').rv() # depends on [control=['for'], data=['allele']]
return (mhci_preds, mhcii_preds) |
def from_tibiadata(cls, content, vocation=None):
"""Builds a highscores object from a TibiaData highscores response.
Notes
-----
Since TibiaData.com's response doesn't contain any indication of the vocation filter applied,
:py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually.
If the attribute is known, it can be passed for it to be assigned in this method.
Parameters
----------
content: :class:`str`
The JSON content of the response.
vocation: :class:`VocationFilter`, optional
The vocation filter to assign to the results. Note that this won't affect the parsing.
Returns
-------
:class:`Highscores`
The highscores contained in the page, or None if the content is for the highscores of a nonexistent world.
Raises
------
InvalidContent
If content is not a JSON string of the highscores response."""
json_content = parse_json(content)
try:
highscores_json = json_content["highscores"]
if "error" in highscores_json["data"]:
return None
world = highscores_json["world"]
category = highscores_json["type"]
highscores = cls(world, category)
for entry in highscores_json["data"]:
value_key = "level"
if highscores.category in [Category.ACHIEVEMENTS, Category.LOYALTY_POINTS, Category.EXPERIENCE]:
value_key = "points"
if highscores.category == Category.EXPERIENCE:
highscores.entries.append(ExpHighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key], entry["level"]))
elif highscores.category == Category.LOYALTY_POINTS:
highscores.entries.append(LoyaltyHighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key], entry["title"]))
else:
highscores.entries.append(HighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key]))
highscores.results_count = len(highscores.entries)
except KeyError:
raise InvalidContent("content is not a TibiaData highscores response.")
if isinstance(vocation, VocationFilter):
highscores.vocation = vocation
return highscores | def function[from_tibiadata, parameter[cls, content, vocation]]:
constant[Builds a highscores object from a TibiaData highscores response.
Notes
-----
Since TibiaData.com's response doesn't contain any indication of the vocation filter applied,
:py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually.
If the attribute is known, it can be passed for it to be assigned in this method.
Parameters
----------
content: :class:`str`
The JSON content of the response.
vocation: :class:`VocationFilter`, optional
The vocation filter to assign to the results. Note that this won't affect the parsing.
Returns
-------
:class:`Highscores`
The highscores contained in the page, or None if the content is for the highscores of a nonexistent world.
Raises
------
InvalidContent
If content is not a JSON string of the highscores response.]
variable[json_content] assign[=] call[name[parse_json], parameter[name[content]]]
<ast.Try object at 0x7da20c990f40>
if call[name[isinstance], parameter[name[vocation], name[VocationFilter]]] begin[:]
name[highscores].vocation assign[=] name[vocation]
return[name[highscores]] | keyword[def] identifier[from_tibiadata] ( identifier[cls] , identifier[content] , identifier[vocation] = keyword[None] ):
literal[string]
identifier[json_content] = identifier[parse_json] ( identifier[content] )
keyword[try] :
identifier[highscores_json] = identifier[json_content] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[highscores_json] [ literal[string] ]:
keyword[return] keyword[None]
identifier[world] = identifier[highscores_json] [ literal[string] ]
identifier[category] = identifier[highscores_json] [ literal[string] ]
identifier[highscores] = identifier[cls] ( identifier[world] , identifier[category] )
keyword[for] identifier[entry] keyword[in] identifier[highscores_json] [ literal[string] ]:
identifier[value_key] = literal[string]
keyword[if] identifier[highscores] . identifier[category] keyword[in] [ identifier[Category] . identifier[ACHIEVEMENTS] , identifier[Category] . identifier[LOYALTY_POINTS] , identifier[Category] . identifier[EXPERIENCE] ]:
identifier[value_key] = literal[string]
keyword[if] identifier[highscores] . identifier[category] == identifier[Category] . identifier[EXPERIENCE] :
identifier[highscores] . identifier[entries] . identifier[append] ( identifier[ExpHighscoresEntry] ( identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ],
identifier[entry] [ identifier[value_key] ], identifier[entry] [ literal[string] ]))
keyword[elif] identifier[highscores] . identifier[category] == identifier[Category] . identifier[LOYALTY_POINTS] :
identifier[highscores] . identifier[entries] . identifier[append] ( identifier[LoyaltyHighscoresEntry] ( identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ],
identifier[entry] [ identifier[value_key] ], identifier[entry] [ literal[string] ]))
keyword[else] :
identifier[highscores] . identifier[entries] . identifier[append] ( identifier[HighscoresEntry] ( identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ],
identifier[entry] [ identifier[value_key] ]))
identifier[highscores] . identifier[results_count] = identifier[len] ( identifier[highscores] . identifier[entries] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidContent] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[vocation] , identifier[VocationFilter] ):
identifier[highscores] . identifier[vocation] = identifier[vocation]
keyword[return] identifier[highscores] | def from_tibiadata(cls, content, vocation=None):
"""Builds a highscores object from a TibiaData highscores response.
Notes
-----
Since TibiaData.com's response doesn't contain any indication of the vocation filter applied,
:py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually.
If the attribute is known, it can be passed for it to be assigned in this method.
Parameters
----------
content: :class:`str`
The JSON content of the response.
vocation: :class:`VocationFilter`, optional
The vocation filter to assign to the results. Note that this won't affect the parsing.
Returns
-------
:class:`Highscores`
The highscores contained in the page, or None if the content is for the highscores of a nonexistent world.
Raises
------
InvalidContent
If content is not a JSON string of the highscores response."""
json_content = parse_json(content)
try:
highscores_json = json_content['highscores']
if 'error' in highscores_json['data']:
return None # depends on [control=['if'], data=[]]
world = highscores_json['world']
category = highscores_json['type']
highscores = cls(world, category)
for entry in highscores_json['data']:
value_key = 'level'
if highscores.category in [Category.ACHIEVEMENTS, Category.LOYALTY_POINTS, Category.EXPERIENCE]:
value_key = 'points' # depends on [control=['if'], data=[]]
if highscores.category == Category.EXPERIENCE:
highscores.entries.append(ExpHighscoresEntry(entry['name'], entry['rank'], entry['voc'], entry[value_key], entry['level'])) # depends on [control=['if'], data=[]]
elif highscores.category == Category.LOYALTY_POINTS:
highscores.entries.append(LoyaltyHighscoresEntry(entry['name'], entry['rank'], entry['voc'], entry[value_key], entry['title'])) # depends on [control=['if'], data=[]]
else:
highscores.entries.append(HighscoresEntry(entry['name'], entry['rank'], entry['voc'], entry[value_key])) # depends on [control=['for'], data=['entry']]
highscores.results_count = len(highscores.entries) # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidContent('content is not a TibiaData highscores response.') # depends on [control=['except'], data=[]]
if isinstance(vocation, VocationFilter):
highscores.vocation = vocation # depends on [control=['if'], data=[]]
return highscores |
def open(self, *args, **kwargs):
"""
Delegates to `subprocess.Popen`.
"""
args, kwargs = self.__process__(*args, **kwargs)
return Popen(args, **kwargs) | def function[open, parameter[self]]:
constant[
Delegates to `subprocess.Popen`.
]
<ast.Tuple object at 0x7da20e957340> assign[=] call[name[self].__process__, parameter[<ast.Starred object at 0x7da20e956350>]]
return[call[name[Popen], parameter[name[args]]]] | keyword[def] identifier[open] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[args] , identifier[kwargs] = identifier[self] . identifier[__process__] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[Popen] ( identifier[args] ,** identifier[kwargs] ) | def open(self, *args, **kwargs):
"""
Delegates to `subprocess.Popen`.
"""
(args, kwargs) = self.__process__(*args, **kwargs)
return Popen(args, **kwargs) |
def transliterate(text):
""" Utility to properly transliterate text. """
text = unidecode(six.text_type(text))
text = text.replace('@', 'a')
return text | def function[transliterate, parameter[text]]:
constant[ Utility to properly transliterate text. ]
variable[text] assign[=] call[name[unidecode], parameter[call[name[six].text_type, parameter[name[text]]]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[@], constant[a]]]
return[name[text]] | keyword[def] identifier[transliterate] ( identifier[text] ):
literal[string]
identifier[text] = identifier[unidecode] ( identifier[six] . identifier[text_type] ( identifier[text] ))
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[text] | def transliterate(text):
""" Utility to properly transliterate text. """
text = unidecode(six.text_type(text))
text = text.replace('@', 'a')
return text |
def urljoin(*urls):
"""
The default urlparse.urljoin behavior look strange
Standard urlparse.urljoin('http://a.com/foo', '/bar')
Expect: http://a.com/foo/bar
Actually: http://a.com/bar
This function fix that.
"""
return reduce(_urljoin, [u.strip('/')+'/' for u in urls if u.strip('/')], '').rstrip('/') | def function[urljoin, parameter[]]:
constant[
The default urlparse.urljoin behavior look strange
Standard urlparse.urljoin('http://a.com/foo', '/bar')
Expect: http://a.com/foo/bar
Actually: http://a.com/bar
This function fix that.
]
return[call[call[name[reduce], parameter[name[_urljoin], <ast.ListComp object at 0x7da20e961f30>, constant[]]].rstrip, parameter[constant[/]]]] | keyword[def] identifier[urljoin] (* identifier[urls] ):
literal[string]
keyword[return] identifier[reduce] ( identifier[_urljoin] ,[ identifier[u] . identifier[strip] ( literal[string] )+ literal[string] keyword[for] identifier[u] keyword[in] identifier[urls] keyword[if] identifier[u] . identifier[strip] ( literal[string] )], literal[string] ). identifier[rstrip] ( literal[string] ) | def urljoin(*urls):
"""
The default urlparse.urljoin behavior look strange
Standard urlparse.urljoin('http://a.com/foo', '/bar')
Expect: http://a.com/foo/bar
Actually: http://a.com/bar
This function fix that.
"""
return reduce(_urljoin, [u.strip('/') + '/' for u in urls if u.strip('/')], '').rstrip('/') |
def optimize_batch(self, batchsize=10, returns='best', paralell=True):
"""
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
"""
if returns not in ('best', 'all'):
raise ValueError('returns must be either "best" or "all"')
starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)]
if paralell:
with Pool() as p:
results = p.map(self.optimize, starts)
else:
results = map(self.optimize, starts)
results = sorted(results, key=lambda x: x.stress)
return results if returns == 'all' else results[0] | def function[optimize_batch, parameter[self, batchsize, returns, paralell]]:
constant[
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
]
if compare[name[returns] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0a2d8a0>, <ast.Constant object at 0x7da1b0a2de70>]]] begin[:]
<ast.Raise object at 0x7da1b0a2cc70>
variable[starts] assign[=] <ast.ListComp object at 0x7da1b0a2ea10>
if name[paralell] begin[:]
with call[name[Pool], parameter[]] begin[:]
variable[results] assign[=] call[name[p].map, parameter[name[self].optimize, name[starts]]]
variable[results] assign[=] call[name[sorted], parameter[name[results]]]
return[<ast.IfExp object at 0x7da1b0a2d450>] | keyword[def] identifier[optimize_batch] ( identifier[self] , identifier[batchsize] = literal[int] , identifier[returns] = literal[string] , identifier[paralell] = keyword[True] ):
literal[string]
keyword[if] identifier[returns] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[starts] =[ identifier[np] . identifier[random] . identifier[rand] ( identifier[self] . identifier[m] * literal[int] )* literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[batchsize] )]
keyword[if] identifier[paralell] :
keyword[with] identifier[Pool] () keyword[as] identifier[p] :
identifier[results] = identifier[p] . identifier[map] ( identifier[self] . identifier[optimize] , identifier[starts] )
keyword[else] :
identifier[results] = identifier[map] ( identifier[self] . identifier[optimize] , identifier[starts] )
identifier[results] = identifier[sorted] ( identifier[results] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[stress] )
keyword[return] identifier[results] keyword[if] identifier[returns] == literal[string] keyword[else] identifier[results] [ literal[int] ] | def optimize_batch(self, batchsize=10, returns='best', paralell=True):
"""
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
"""
if returns not in ('best', 'all'):
raise ValueError('returns must be either "best" or "all"') # depends on [control=['if'], data=[]]
starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)]
if paralell:
with Pool() as p:
results = p.map(self.optimize, starts) # depends on [control=['with'], data=['p']] # depends on [control=['if'], data=[]]
else:
results = map(self.optimize, starts)
results = sorted(results, key=lambda x: x.stress)
return results if returns == 'all' else results[0] |
def _filterArgsAndKwargs(
self,
originalConstructorExpectedArgList,
syntheticMemberList,
positionalArgumentKeyValueList,
keywordedArgDict):
"""Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*)
"""
# List is initialized with all variadic arguments.
positionalArgumentKeyValueList = copy.copy(positionalArgumentKeyValueList)
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
# Dict is initialized with all keyworded arguments.
keywordedArgDict = keywordedArgDict.copy()
for syntheticMember in syntheticMemberList:
argumentName = syntheticMember.memberName()
# Argument is expected by the original constructor.
if argumentName in originalConstructorExpectedArgList:
continue
# We filter args only if original constructor does not expected variadic args.
if argumentName in positionalArgumentDict:
positionalArgumentKeyValueList = list(filter(lambda pair: pair[0] != argumentName,
positionalArgumentKeyValueList))
# We filter args only if original constructor does not expected keyworded args.
if argumentName in keywordedArgDict:
del keywordedArgDict[argumentName]
positionalArgumentTuple = tuple([value for _, value in positionalArgumentKeyValueList])
return positionalArgumentTuple, keywordedArgDict | def function[_filterArgsAndKwargs, parameter[self, originalConstructorExpectedArgList, syntheticMemberList, positionalArgumentKeyValueList, keywordedArgDict]]:
constant[Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*)
]
variable[positionalArgumentKeyValueList] assign[=] call[name[copy].copy, parameter[name[positionalArgumentKeyValueList]]]
variable[positionalArgumentDict] assign[=] call[name[dict], parameter[name[positionalArgumentKeyValueList]]]
variable[keywordedArgDict] assign[=] call[name[keywordedArgDict].copy, parameter[]]
for taget[name[syntheticMember]] in starred[name[syntheticMemberList]] begin[:]
variable[argumentName] assign[=] call[name[syntheticMember].memberName, parameter[]]
if compare[name[argumentName] in name[originalConstructorExpectedArgList]] begin[:]
continue
if compare[name[argumentName] in name[positionalArgumentDict]] begin[:]
variable[positionalArgumentKeyValueList] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b0ae01f0>, name[positionalArgumentKeyValueList]]]]]
if compare[name[argumentName] in name[keywordedArgDict]] begin[:]
<ast.Delete object at 0x7da204623d30>
variable[positionalArgumentTuple] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da204620cd0>]]
return[tuple[[<ast.Name object at 0x7da204620b50>, <ast.Name object at 0x7da204622ce0>]]] | keyword[def] identifier[_filterArgsAndKwargs] (
identifier[self] ,
identifier[originalConstructorExpectedArgList] ,
identifier[syntheticMemberList] ,
identifier[positionalArgumentKeyValueList] ,
identifier[keywordedArgDict] ):
literal[string]
identifier[positionalArgumentKeyValueList] = identifier[copy] . identifier[copy] ( identifier[positionalArgumentKeyValueList] )
identifier[positionalArgumentDict] = identifier[dict] ( identifier[positionalArgumentKeyValueList] )
identifier[keywordedArgDict] = identifier[keywordedArgDict] . identifier[copy] ()
keyword[for] identifier[syntheticMember] keyword[in] identifier[syntheticMemberList] :
identifier[argumentName] = identifier[syntheticMember] . identifier[memberName] ()
keyword[if] identifier[argumentName] keyword[in] identifier[originalConstructorExpectedArgList] :
keyword[continue]
keyword[if] identifier[argumentName] keyword[in] identifier[positionalArgumentDict] :
identifier[positionalArgumentKeyValueList] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[pair] : identifier[pair] [ literal[int] ]!= identifier[argumentName] ,
identifier[positionalArgumentKeyValueList] ))
keyword[if] identifier[argumentName] keyword[in] identifier[keywordedArgDict] :
keyword[del] identifier[keywordedArgDict] [ identifier[argumentName] ]
identifier[positionalArgumentTuple] = identifier[tuple] ([ identifier[value] keyword[for] identifier[_] , identifier[value] keyword[in] identifier[positionalArgumentKeyValueList] ])
keyword[return] identifier[positionalArgumentTuple] , identifier[keywordedArgDict] | def _filterArgsAndKwargs(self, originalConstructorExpectedArgList, syntheticMemberList, positionalArgumentKeyValueList, keywordedArgDict):
"""Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*)
"""
# List is initialized with all variadic arguments.
positionalArgumentKeyValueList = copy.copy(positionalArgumentKeyValueList)
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
# Dict is initialized with all keyworded arguments.
keywordedArgDict = keywordedArgDict.copy()
for syntheticMember in syntheticMemberList:
argumentName = syntheticMember.memberName()
# Argument is expected by the original constructor.
if argumentName in originalConstructorExpectedArgList:
continue # depends on [control=['if'], data=[]] # We filter args only if original constructor does not expected variadic args.
if argumentName in positionalArgumentDict:
positionalArgumentKeyValueList = list(filter(lambda pair: pair[0] != argumentName, positionalArgumentKeyValueList)) # depends on [control=['if'], data=['argumentName']] # We filter args only if original constructor does not expected keyworded args.
if argumentName in keywordedArgDict:
del keywordedArgDict[argumentName] # depends on [control=['if'], data=['argumentName', 'keywordedArgDict']] # depends on [control=['for'], data=['syntheticMember']]
positionalArgumentTuple = tuple([value for (_, value) in positionalArgumentKeyValueList])
return (positionalArgumentTuple, keywordedArgDict) |
def all_entries(self):
"""
Equivalent of all_comp but returns entries, in the same order as the
coefficients.
"""
entries = []
for c in self._all_comp:
for e in self._all_entries:
if e.composition.reduced_formula == c.reduced_formula:
entries.append(e)
break
return entries | def function[all_entries, parameter[self]]:
constant[
Equivalent of all_comp but returns entries, in the same order as the
coefficients.
]
variable[entries] assign[=] list[[]]
for taget[name[c]] in starred[name[self]._all_comp] begin[:]
for taget[name[e]] in starred[name[self]._all_entries] begin[:]
if compare[name[e].composition.reduced_formula equal[==] name[c].reduced_formula] begin[:]
call[name[entries].append, parameter[name[e]]]
break
return[name[entries]] | keyword[def] identifier[all_entries] ( identifier[self] ):
literal[string]
identifier[entries] =[]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[_all_comp] :
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_all_entries] :
keyword[if] identifier[e] . identifier[composition] . identifier[reduced_formula] == identifier[c] . identifier[reduced_formula] :
identifier[entries] . identifier[append] ( identifier[e] )
keyword[break]
keyword[return] identifier[entries] | def all_entries(self):
"""
Equivalent of all_comp but returns entries, in the same order as the
coefficients.
"""
entries = []
for c in self._all_comp:
for e in self._all_entries:
if e.composition.reduced_formula == c.reduced_formula:
entries.append(e)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['for'], data=['c']]
return entries |
def POST(self):
""" The combined values from :attr:`forms` and :attr:`files`. Values are
either strings (form values) or instances of
:class:`cgi.FieldStorage` (file uploads).
"""
post = MultiDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value
return post | def function[POST, parameter[self]]:
constant[ The combined values from :attr:`forms` and :attr:`files`. Values are
either strings (form values) or instances of
:class:`cgi.FieldStorage` (file uploads).
]
variable[post] assign[=] call[name[MultiDict], parameter[]]
variable[safe_env] assign[=] dictionary[[<ast.Constant object at 0x7da1b18ad660>], [<ast.Constant object at 0x7da1b18aff70>]]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da1b18ac910>, <ast.Constant object at 0x7da1b18ac130>, <ast.Constant object at 0x7da1b18aefb0>]]] begin[:]
if compare[name[key] in name[self].environ] begin[:]
call[name[safe_env]][name[key]] assign[=] call[name[self].environ][name[key]]
if name[NCTextIOWrapper] begin[:]
variable[fb] assign[=] call[name[NCTextIOWrapper], parameter[name[self].body]]
variable[data] assign[=] call[name[cgi].FieldStorage, parameter[]]
for taget[name[item]] in starred[<ast.BoolOp object at 0x7da1b18aef80>] begin[:]
call[name[post]][name[item].name] assign[=] <ast.IfExp object at 0x7da1b18ae7d0>
return[name[post]] | keyword[def] identifier[POST] ( identifier[self] ):
literal[string]
identifier[post] = identifier[MultiDict] ()
identifier[safe_env] ={ literal[string] : literal[string] }
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[environ] : identifier[safe_env] [ identifier[key] ]= identifier[self] . identifier[environ] [ identifier[key] ]
keyword[if] identifier[NCTextIOWrapper] :
identifier[fb] = identifier[NCTextIOWrapper] ( identifier[self] . identifier[body] , identifier[encoding] = literal[string] , identifier[newline] = literal[string] )
keyword[else] :
identifier[fb] = identifier[self] . identifier[body]
identifier[data] = identifier[cgi] . identifier[FieldStorage] ( identifier[fp] = identifier[fb] , identifier[environ] = identifier[safe_env] , identifier[keep_blank_values] = keyword[True] )
keyword[for] identifier[item] keyword[in] identifier[data] . identifier[list] keyword[or] []:
identifier[post] [ identifier[item] . identifier[name] ]= identifier[item] keyword[if] identifier[item] . identifier[filename] keyword[else] identifier[item] . identifier[value]
keyword[return] identifier[post] | def POST(self):
""" The combined values from :attr:`forms` and :attr:`files`. Values are
either strings (form values) or instances of
:class:`cgi.FieldStorage` (file uploads).
"""
post = MultiDict()
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ:
safe_env[key] = self.environ[key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n') # depends on [control=['if'], data=[]]
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value # depends on [control=['for'], data=['item']]
return post |
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value | def function[requires, parameter[self]]:
constant[Additional object or objects required by this object.]
variable[value] assign[=] call[name[self]._schema.get, parameter[constant[requires], dictionary[[], []]]]
if <ast.UnaryOp object at 0x7da204346800> begin[:]
<ast.Raise object at 0x7da204347670>
return[name[value]] | keyword[def] identifier[requires] ( identifier[self] ):
literal[string]
identifier[value] = identifier[self] . identifier[_schema] . identifier[get] ( literal[string] ,{})
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] ,( identifier[basestring] , identifier[dict] )):
keyword[raise] identifier[SchemaError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[return] identifier[value] | def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get('requires', {})
if not isinstance(value, (basestring, dict)):
raise SchemaError('requires value {0!r} is neither a string nor an object'.format(value)) # depends on [control=['if'], data=[]]
return value |
def initialize_private_canvas(self, private_canvas):
"""Initialize the private canvas used by this instance.
"""
if self.t_.get('show_pan_position', False):
self.show_pan_mark(True)
if self.t_.get('show_focus_indicator', False):
self.show_focus_indicator(True) | def function[initialize_private_canvas, parameter[self, private_canvas]]:
constant[Initialize the private canvas used by this instance.
]
if call[name[self].t_.get, parameter[constant[show_pan_position], constant[False]]] begin[:]
call[name[self].show_pan_mark, parameter[constant[True]]]
if call[name[self].t_.get, parameter[constant[show_focus_indicator], constant[False]]] begin[:]
call[name[self].show_focus_indicator, parameter[constant[True]]] | keyword[def] identifier[initialize_private_canvas] ( identifier[self] , identifier[private_canvas] ):
literal[string]
keyword[if] identifier[self] . identifier[t_] . identifier[get] ( literal[string] , keyword[False] ):
identifier[self] . identifier[show_pan_mark] ( keyword[True] )
keyword[if] identifier[self] . identifier[t_] . identifier[get] ( literal[string] , keyword[False] ):
identifier[self] . identifier[show_focus_indicator] ( keyword[True] ) | def initialize_private_canvas(self, private_canvas):
"""Initialize the private canvas used by this instance.
"""
if self.t_.get('show_pan_position', False):
self.show_pan_mark(True) # depends on [control=['if'], data=[]]
if self.t_.get('show_focus_indicator', False):
self.show_focus_indicator(True) # depends on [control=['if'], data=[]] |
def _page_scroll_down(self):
"""Scrolls down to get the next set of images"""
# Scroll down to request the next set of images
self._chromeDriver.execute_script(
'window.scroll(0, document.body.clientHeight)')
# Wait for the images to load completely
time.sleep(self.WAIT_TIME)
# Check if the button - 'Show More Results' is visible
# If yes, click it and request more messages
# This step helps is avoiding duplicate image URLS from being captured
try:
self._chromeDriver.find_element_by_id('smb').click()
except ElementNotVisibleException as error:
pass | def function[_page_scroll_down, parameter[self]]:
constant[Scrolls down to get the next set of images]
call[name[self]._chromeDriver.execute_script, parameter[constant[window.scroll(0, document.body.clientHeight)]]]
call[name[time].sleep, parameter[name[self].WAIT_TIME]]
<ast.Try object at 0x7da18fe91cf0> | keyword[def] identifier[_page_scroll_down] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_chromeDriver] . identifier[execute_script] (
literal[string] )
identifier[time] . identifier[sleep] ( identifier[self] . identifier[WAIT_TIME] )
keyword[try] :
identifier[self] . identifier[_chromeDriver] . identifier[find_element_by_id] ( literal[string] ). identifier[click] ()
keyword[except] identifier[ElementNotVisibleException] keyword[as] identifier[error] :
keyword[pass] | def _page_scroll_down(self):
"""Scrolls down to get the next set of images"""
# Scroll down to request the next set of images
self._chromeDriver.execute_script('window.scroll(0, document.body.clientHeight)')
# Wait for the images to load completely
time.sleep(self.WAIT_TIME)
# Check if the button - 'Show More Results' is visible
# If yes, click it and request more messages
# This step helps is avoiding duplicate image URLS from being captured
try:
self._chromeDriver.find_element_by_id('smb').click() # depends on [control=['try'], data=[]]
except ElementNotVisibleException as error:
pass # depends on [control=['except'], data=[]] |
def open(self):
''' Open file corresponding to the TUN device. '''
self.fd = open('/dev/net/tun', 'rb+', buffering=0)
tun_flags = IFF_TAP | IFF_NO_PI | IFF_PERSIST
ifr = struct.pack('16sH', self.name, tun_flags)
fcntl.ioctl(self.fd, TUNSETIFF, ifr)
fcntl.ioctl(self.fd, TUNSETOWNER, os.getuid())
self.ifflags = self.ifflags | IFF_RUNNING | def function[open, parameter[self]]:
constant[ Open file corresponding to the TUN device. ]
name[self].fd assign[=] call[name[open], parameter[constant[/dev/net/tun], constant[rb+]]]
variable[tun_flags] assign[=] binary_operation[binary_operation[name[IFF_TAP] <ast.BitOr object at 0x7da2590d6aa0> name[IFF_NO_PI]] <ast.BitOr object at 0x7da2590d6aa0> name[IFF_PERSIST]]
variable[ifr] assign[=] call[name[struct].pack, parameter[constant[16sH], name[self].name, name[tun_flags]]]
call[name[fcntl].ioctl, parameter[name[self].fd, name[TUNSETIFF], name[ifr]]]
call[name[fcntl].ioctl, parameter[name[self].fd, name[TUNSETOWNER], call[name[os].getuid, parameter[]]]]
name[self].ifflags assign[=] binary_operation[name[self].ifflags <ast.BitOr object at 0x7da2590d6aa0> name[IFF_RUNNING]] | keyword[def] identifier[open] ( identifier[self] ):
literal[string]
identifier[self] . identifier[fd] = identifier[open] ( literal[string] , literal[string] , identifier[buffering] = literal[int] )
identifier[tun_flags] = identifier[IFF_TAP] | identifier[IFF_NO_PI] | identifier[IFF_PERSIST]
identifier[ifr] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[name] , identifier[tun_flags] )
identifier[fcntl] . identifier[ioctl] ( identifier[self] . identifier[fd] , identifier[TUNSETIFF] , identifier[ifr] )
identifier[fcntl] . identifier[ioctl] ( identifier[self] . identifier[fd] , identifier[TUNSETOWNER] , identifier[os] . identifier[getuid] ())
identifier[self] . identifier[ifflags] = identifier[self] . identifier[ifflags] | identifier[IFF_RUNNING] | def open(self):
""" Open file corresponding to the TUN device. """
self.fd = open('/dev/net/tun', 'rb+', buffering=0)
tun_flags = IFF_TAP | IFF_NO_PI | IFF_PERSIST
ifr = struct.pack('16sH', self.name, tun_flags)
fcntl.ioctl(self.fd, TUNSETIFF, ifr)
fcntl.ioctl(self.fd, TUNSETOWNER, os.getuid())
self.ifflags = self.ifflags | IFF_RUNNING |
def _stmt_check(self, req: set, legal: set, stmt: dict) -> dict:
"""
This method checks to make sure that the proc has all required statements and removes any statements
aren't valid. Missing required statements is an error. Extra statements are not.
:param req: set
:param legal: set
:param stmt: dict
:return: dictionary of verified statements
"""
# debug the argument list
if self.logger.level == 10:
for k, v in stmt.items():
print("Key: " + k + ", Value: " + str(v) + ", Type: " + str(type(v)))
# required statements
reqSet = req
if len(reqSet):
self.logger.debug("reqSet: {}".format(reqSet))
missing_set = reqSet.difference(set(stmt.keys()))
if missing_set:
if not stmt.get(
'score'): # till we handle either/or required. proc can be called more than one way w/ diff requirements
raise SyntaxError(
"You are missing %d required statements:\n%s" % (len(missing_set), str(missing_set)))
# legal statements
legalSet = legal
if len(legalSet):
self.logger.debug("legalSet: {}".format(legalSet))
if len(reqSet):
totSet = legalSet | reqSet
else:
totSet = legalSet
generalSet = {'ODSGraphics', 'stmtpassthrough', 'targOpts', 'procopts'}
extraSet = set(stmt.keys() - generalSet).difference(totSet) # find keys not in legal or required sets
if extraSet:
self.logger.debug("extraSet: {}".format(extraSet))
for item in extraSet:
stmt.pop(item, None)
warnings.warn(
"The following {} statements are invalid and will be ignored:\n{}".format(len(extraSet), extraSet))
self.logger.debug("stmt: {}".format(stmt))
return stmt | def function[_stmt_check, parameter[self, req, legal, stmt]]:
constant[
This method checks to make sure that the proc has all required statements and removes any statements
aren't valid. Missing required statements is an error. Extra statements are not.
:param req: set
:param legal: set
:param stmt: dict
:return: dictionary of verified statements
]
if compare[name[self].logger.level equal[==] constant[10]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18bc71570>, <ast.Name object at 0x7da18bc70df0>]]] in starred[call[name[stmt].items, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Key: ] + name[k]] + constant[, Value: ]] + call[name[str], parameter[name[v]]]] + constant[, Type: ]] + call[name[str], parameter[call[name[type], parameter[name[v]]]]]]]]
variable[reqSet] assign[=] name[req]
if call[name[len], parameter[name[reqSet]]] begin[:]
call[name[self].logger.debug, parameter[call[constant[reqSet: {}].format, parameter[name[reqSet]]]]]
variable[missing_set] assign[=] call[name[reqSet].difference, parameter[call[name[set], parameter[call[name[stmt].keys, parameter[]]]]]]
if name[missing_set] begin[:]
if <ast.UnaryOp object at 0x7da18bc70430> begin[:]
<ast.Raise object at 0x7da18bc72230>
variable[legalSet] assign[=] name[legal]
if call[name[len], parameter[name[legalSet]]] begin[:]
call[name[self].logger.debug, parameter[call[constant[legalSet: {}].format, parameter[name[legalSet]]]]]
if call[name[len], parameter[name[reqSet]]] begin[:]
variable[totSet] assign[=] binary_operation[name[legalSet] <ast.BitOr object at 0x7da2590d6aa0> name[reqSet]]
variable[generalSet] assign[=] <ast.Set object at 0x7da18bc72e90>
variable[extraSet] assign[=] call[call[name[set], parameter[binary_operation[call[name[stmt].keys, parameter[]] - name[generalSet]]]].difference, parameter[name[totSet]]]
if name[extraSet] begin[:]
call[name[self].logger.debug, parameter[call[constant[extraSet: {}].format, parameter[name[extraSet]]]]]
for taget[name[item]] in starred[name[extraSet]] begin[:]
call[name[stmt].pop, parameter[name[item], constant[None]]]
call[name[warnings].warn, parameter[call[constant[The following {} statements are invalid and will be ignored:
{}].format, parameter[call[name[len], parameter[name[extraSet]]], name[extraSet]]]]]
call[name[self].logger.debug, parameter[call[constant[stmt: {}].format, parameter[name[stmt]]]]]
return[name[stmt]] | keyword[def] identifier[_stmt_check] ( identifier[self] , identifier[req] : identifier[set] , identifier[legal] : identifier[set] , identifier[stmt] : identifier[dict] )-> identifier[dict] :
literal[string]
keyword[if] identifier[self] . identifier[logger] . identifier[level] == literal[int] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[stmt] . identifier[items] ():
identifier[print] ( literal[string] + identifier[k] + literal[string] + identifier[str] ( identifier[v] )+ literal[string] + identifier[str] ( identifier[type] ( identifier[v] )))
identifier[reqSet] = identifier[req]
keyword[if] identifier[len] ( identifier[reqSet] ):
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[reqSet] ))
identifier[missing_set] = identifier[reqSet] . identifier[difference] ( identifier[set] ( identifier[stmt] . identifier[keys] ()))
keyword[if] identifier[missing_set] :
keyword[if] keyword[not] identifier[stmt] . identifier[get] (
literal[string] ):
keyword[raise] identifier[SyntaxError] (
literal[string] %( identifier[len] ( identifier[missing_set] ), identifier[str] ( identifier[missing_set] )))
identifier[legalSet] = identifier[legal]
keyword[if] identifier[len] ( identifier[legalSet] ):
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[legalSet] ))
keyword[if] identifier[len] ( identifier[reqSet] ):
identifier[totSet] = identifier[legalSet] | identifier[reqSet]
keyword[else] :
identifier[totSet] = identifier[legalSet]
identifier[generalSet] ={ literal[string] , literal[string] , literal[string] , literal[string] }
identifier[extraSet] = identifier[set] ( identifier[stmt] . identifier[keys] ()- identifier[generalSet] ). identifier[difference] ( identifier[totSet] )
keyword[if] identifier[extraSet] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[extraSet] ))
keyword[for] identifier[item] keyword[in] identifier[extraSet] :
identifier[stmt] . identifier[pop] ( identifier[item] , keyword[None] )
identifier[warnings] . identifier[warn] (
literal[string] . identifier[format] ( identifier[len] ( identifier[extraSet] ), identifier[extraSet] ))
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[stmt] ))
keyword[return] identifier[stmt] | def _stmt_check(self, req: set, legal: set, stmt: dict) -> dict:
"""
This method checks to make sure that the proc has all required statements and removes any statements
aren't valid. Missing required statements is an error. Extra statements are not.
:param req: set
:param legal: set
:param stmt: dict
:return: dictionary of verified statements
"""
# debug the argument list
if self.logger.level == 10:
for (k, v) in stmt.items():
print('Key: ' + k + ', Value: ' + str(v) + ', Type: ' + str(type(v))) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# required statements
reqSet = req
if len(reqSet):
self.logger.debug('reqSet: {}'.format(reqSet))
missing_set = reqSet.difference(set(stmt.keys()))
if missing_set:
if not stmt.get('score'): # till we handle either/or required. proc can be called more than one way w/ diff requirements
raise SyntaxError('You are missing %d required statements:\n%s' % (len(missing_set), str(missing_set))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# legal statements
legalSet = legal
if len(legalSet):
self.logger.debug('legalSet: {}'.format(legalSet))
if len(reqSet):
totSet = legalSet | reqSet # depends on [control=['if'], data=[]]
else:
totSet = legalSet
generalSet = {'ODSGraphics', 'stmtpassthrough', 'targOpts', 'procopts'}
extraSet = set(stmt.keys() - generalSet).difference(totSet) # find keys not in legal or required sets
if extraSet:
self.logger.debug('extraSet: {}'.format(extraSet))
for item in extraSet:
stmt.pop(item, None) # depends on [control=['for'], data=['item']]
warnings.warn('The following {} statements are invalid and will be ignored:\n{}'.format(len(extraSet), extraSet)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.logger.debug('stmt: {}'.format(stmt))
return stmt |
def currentPage(self):
"""
Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again.
"""
self._updateResults(self._sortAttributeValue(0), equalToStart=True, refresh=True)
return self._currentResults | def function[currentPage, parameter[self]]:
constant[
Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again.
]
call[name[self]._updateResults, parameter[call[name[self]._sortAttributeValue, parameter[constant[0]]]]]
return[name[self]._currentResults] | keyword[def] identifier[currentPage] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_updateResults] ( identifier[self] . identifier[_sortAttributeValue] ( literal[int] ), identifier[equalToStart] = keyword[True] , identifier[refresh] = keyword[True] )
keyword[return] identifier[self] . identifier[_currentResults] | def currentPage(self):
"""
Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again.
"""
self._updateResults(self._sortAttributeValue(0), equalToStart=True, refresh=True)
return self._currentResults |
def _convert_types(self, filename: str, df: pd.DataFrame) -> None:
"""
Apply type conversions
"""
if df.empty:
return
converters = self._config.nodes.get(filename, {}).get("converters", {})
for col, converter in converters.items():
if col in df.columns:
df[col] = converter(df[col]) | def function[_convert_types, parameter[self, filename, df]]:
constant[
Apply type conversions
]
if name[df].empty begin[:]
return[None]
variable[converters] assign[=] call[call[name[self]._config.nodes.get, parameter[name[filename], dictionary[[], []]]].get, parameter[constant[converters], dictionary[[], []]]]
for taget[tuple[[<ast.Name object at 0x7da1b04d27d0>, <ast.Name object at 0x7da1b04d09d0>]]] in starred[call[name[converters].items, parameter[]]] begin[:]
if compare[name[col] in name[df].columns] begin[:]
call[name[df]][name[col]] assign[=] call[name[converter], parameter[call[name[df]][name[col]]]] | keyword[def] identifier[_convert_types] ( identifier[self] , identifier[filename] : identifier[str] , identifier[df] : identifier[pd] . identifier[DataFrame] )-> keyword[None] :
literal[string]
keyword[if] identifier[df] . identifier[empty] :
keyword[return]
identifier[converters] = identifier[self] . identifier[_config] . identifier[nodes] . identifier[get] ( identifier[filename] ,{}). identifier[get] ( literal[string] ,{})
keyword[for] identifier[col] , identifier[converter] keyword[in] identifier[converters] . identifier[items] ():
keyword[if] identifier[col] keyword[in] identifier[df] . identifier[columns] :
identifier[df] [ identifier[col] ]= identifier[converter] ( identifier[df] [ identifier[col] ]) | def _convert_types(self, filename: str, df: pd.DataFrame) -> None:
"""
Apply type conversions
"""
if df.empty:
return # depends on [control=['if'], data=[]]
converters = self._config.nodes.get(filename, {}).get('converters', {})
for (col, converter) in converters.items():
if col in df.columns:
df[col] = converter(df[col]) # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=[]] |
def form_b(self, n: float)->tuple:
"""
formats a bps as bps/kbps/mbps/gbps etc
handles whether its meant to be in bytes
:param n: input float
:rtype tuple:
:return: tuple of float-number of mbps etc, str-units
"""
unit = 'bps'
kilo = 1000
mega = 1000000
giga = 1000000000
bps = 0
if self.units == 'bytes' or self.units == 'B':
unit = 'Bps'
kilo = 8000
mega = 8000000
giga = 8000000000
if n < kilo:
bps = float(n)
if n >= kilo and n < mega:
unit = "K" + unit
bps = float(n / 1024.0)
if n >= mega and n < giga:
unit = "M" + unit
bps = float(n / (1024.0 * 1024.0))
if n >= giga:
unit = "G" + unit
bps = float(n / (1024.0 * 1024.0 * 1024.0))
return bps, unit | def function[form_b, parameter[self, n]]:
constant[
formats a bps as bps/kbps/mbps/gbps etc
handles whether its meant to be in bytes
:param n: input float
:rtype tuple:
:return: tuple of float-number of mbps etc, str-units
]
variable[unit] assign[=] constant[bps]
variable[kilo] assign[=] constant[1000]
variable[mega] assign[=] constant[1000000]
variable[giga] assign[=] constant[1000000000]
variable[bps] assign[=] constant[0]
if <ast.BoolOp object at 0x7da1b2347b20> begin[:]
variable[unit] assign[=] constant[Bps]
variable[kilo] assign[=] constant[8000]
variable[mega] assign[=] constant[8000000]
variable[giga] assign[=] constant[8000000000]
if compare[name[n] less[<] name[kilo]] begin[:]
variable[bps] assign[=] call[name[float], parameter[name[n]]]
if <ast.BoolOp object at 0x7da18fe91720> begin[:]
variable[unit] assign[=] binary_operation[constant[K] + name[unit]]
variable[bps] assign[=] call[name[float], parameter[binary_operation[name[n] / constant[1024.0]]]]
if <ast.BoolOp object at 0x7da1b23455a0> begin[:]
variable[unit] assign[=] binary_operation[constant[M] + name[unit]]
variable[bps] assign[=] call[name[float], parameter[binary_operation[name[n] / binary_operation[constant[1024.0] * constant[1024.0]]]]]
if compare[name[n] greater_or_equal[>=] name[giga]] begin[:]
variable[unit] assign[=] binary_operation[constant[G] + name[unit]]
variable[bps] assign[=] call[name[float], parameter[binary_operation[name[n] / binary_operation[binary_operation[constant[1024.0] * constant[1024.0]] * constant[1024.0]]]]]
return[tuple[[<ast.Name object at 0x7da1b2346ef0>, <ast.Name object at 0x7da1b2346ec0>]]] | keyword[def] identifier[form_b] ( identifier[self] , identifier[n] : identifier[float] )-> identifier[tuple] :
literal[string]
identifier[unit] = literal[string]
identifier[kilo] = literal[int]
identifier[mega] = literal[int]
identifier[giga] = literal[int]
identifier[bps] = literal[int]
keyword[if] identifier[self] . identifier[units] == literal[string] keyword[or] identifier[self] . identifier[units] == literal[string] :
identifier[unit] = literal[string]
identifier[kilo] = literal[int]
identifier[mega] = literal[int]
identifier[giga] = literal[int]
keyword[if] identifier[n] < identifier[kilo] :
identifier[bps] = identifier[float] ( identifier[n] )
keyword[if] identifier[n] >= identifier[kilo] keyword[and] identifier[n] < identifier[mega] :
identifier[unit] = literal[string] + identifier[unit]
identifier[bps] = identifier[float] ( identifier[n] / literal[int] )
keyword[if] identifier[n] >= identifier[mega] keyword[and] identifier[n] < identifier[giga] :
identifier[unit] = literal[string] + identifier[unit]
identifier[bps] = identifier[float] ( identifier[n] /( literal[int] * literal[int] ))
keyword[if] identifier[n] >= identifier[giga] :
identifier[unit] = literal[string] + identifier[unit]
identifier[bps] = identifier[float] ( identifier[n] /( literal[int] * literal[int] * literal[int] ))
keyword[return] identifier[bps] , identifier[unit] | def form_b(self, n: float) -> tuple:
"""
formats a bps as bps/kbps/mbps/gbps etc
handles whether its meant to be in bytes
:param n: input float
:rtype tuple:
:return: tuple of float-number of mbps etc, str-units
"""
unit = 'bps'
kilo = 1000
mega = 1000000
giga = 1000000000
bps = 0
if self.units == 'bytes' or self.units == 'B':
unit = 'Bps'
kilo = 8000
mega = 8000000
giga = 8000000000 # depends on [control=['if'], data=[]]
if n < kilo:
bps = float(n) # depends on [control=['if'], data=['n']]
if n >= kilo and n < mega:
unit = 'K' + unit
bps = float(n / 1024.0) # depends on [control=['if'], data=[]]
if n >= mega and n < giga:
unit = 'M' + unit
bps = float(n / (1024.0 * 1024.0)) # depends on [control=['if'], data=[]]
if n >= giga:
unit = 'G' + unit
bps = float(n / (1024.0 * 1024.0 * 1024.0)) # depends on [control=['if'], data=['n']]
return (bps, unit) |
def sys_wait_for_event(
mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool
) -> int:
"""Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
"""
return int(
lib.TCOD_sys_wait_for_event(
mask,
k.key_p if k else ffi.NULL,
m.mouse_p if m else ffi.NULL,
flush,
)
) | def function[sys_wait_for_event, parameter[mask, k, m, flush]]:
constant[Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
]
return[call[name[int], parameter[call[name[lib].TCOD_sys_wait_for_event, parameter[name[mask], <ast.IfExp object at 0x7da20c795b70>, <ast.IfExp object at 0x7da20c795420>, name[flush]]]]]] | keyword[def] identifier[sys_wait_for_event] (
identifier[mask] : identifier[int] , identifier[k] : identifier[Optional] [ identifier[Key] ], identifier[m] : identifier[Optional] [ identifier[Mouse] ], identifier[flush] : identifier[bool]
)-> identifier[int] :
literal[string]
keyword[return] identifier[int] (
identifier[lib] . identifier[TCOD_sys_wait_for_event] (
identifier[mask] ,
identifier[k] . identifier[key_p] keyword[if] identifier[k] keyword[else] identifier[ffi] . identifier[NULL] ,
identifier[m] . identifier[mouse_p] keyword[if] identifier[m] keyword[else] identifier[ffi] . identifier[NULL] ,
identifier[flush] ,
)
) | def sys_wait_for_event(mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool) -> int:
"""Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
"""
return int(lib.TCOD_sys_wait_for_event(mask, k.key_p if k else ffi.NULL, m.mouse_p if m else ffi.NULL, flush)) |
def render_to_response(self, template_name, __data,
content_type="text/html"):
'''Given a template name and template data.
Renders a template and returns `webob.Response` object'''
resp = self.render(template_name, __data)
return Response(resp,
content_type=content_type) | def function[render_to_response, parameter[self, template_name, __data, content_type]]:
constant[Given a template name and template data.
Renders a template and returns `webob.Response` object]
variable[resp] assign[=] call[name[self].render, parameter[name[template_name], name[__data]]]
return[call[name[Response], parameter[name[resp]]]] | keyword[def] identifier[render_to_response] ( identifier[self] , identifier[template_name] , identifier[__data] ,
identifier[content_type] = literal[string] ):
literal[string]
identifier[resp] = identifier[self] . identifier[render] ( identifier[template_name] , identifier[__data] )
keyword[return] identifier[Response] ( identifier[resp] ,
identifier[content_type] = identifier[content_type] ) | def render_to_response(self, template_name, __data, content_type='text/html'):
"""Given a template name and template data.
Renders a template and returns `webob.Response` object"""
resp = self.render(template_name, __data)
return Response(resp, content_type=content_type) |
def _getSendData(self, message=None, thread_id=None, thread_type=ThreadType.USER):
"""Returns the data needed to send a request to `SendURL`"""
messageAndOTID = generateOfflineThreadingID()
timestamp = now()
data = {
"client": "mercury",
"author": "fbid:{}".format(self._uid),
"timestamp": timestamp,
"source": "source:chat:web",
"offline_threading_id": messageAndOTID,
"message_id": messageAndOTID,
"threading_id": generateMessageID(self._client_id),
"ephemeral_ttl_mode:": "0",
}
# Set recipient
if thread_type in [ThreadType.USER, ThreadType.PAGE]:
data["other_user_fbid"] = thread_id
elif thread_type == ThreadType.GROUP:
data["thread_fbid"] = thread_id
if message is None:
message = Message()
if message.text or message.sticker or message.emoji_size:
data["action_type"] = "ma-type:user-generated-message"
if message.text:
data["body"] = message.text
for i, mention in enumerate(message.mentions):
data["profile_xmd[{}][id]".format(i)] = mention.thread_id
data["profile_xmd[{}][offset]".format(i)] = mention.offset
data["profile_xmd[{}][length]".format(i)] = mention.length
data["profile_xmd[{}][type]".format(i)] = "p"
if message.emoji_size:
if message.text:
data["tags[0]"] = "hot_emoji_size:" + message.emoji_size.name.lower()
else:
data["sticker_id"] = message.emoji_size.value
if message.sticker:
data["sticker_id"] = message.sticker.uid
if message.quick_replies:
xmd = {"quick_replies": []}
for quick_reply in message.quick_replies:
q = dict()
q["content_type"] = quick_reply._type
q["payload"] = quick_reply.payload
q["external_payload"] = quick_reply.external_payload
q["data"] = quick_reply.data
if quick_reply.is_response:
q["ignore_for_webhook"] = False
if isinstance(quick_reply, QuickReplyText):
q["title"] = quick_reply.title
if not isinstance(quick_reply, QuickReplyLocation):
q["image_url"] = quick_reply.image_url
xmd["quick_replies"].append(q)
if len(message.quick_replies) == 1 and message.quick_replies[0].is_response:
xmd["quick_replies"] = xmd["quick_replies"][0]
data["platform_xmd"] = json.dumps(xmd)
if message.reply_to_id:
data["replied_to_message_id"] = message.reply_to_id
return data | def function[_getSendData, parameter[self, message, thread_id, thread_type]]:
constant[Returns the data needed to send a request to `SendURL`]
variable[messageAndOTID] assign[=] call[name[generateOfflineThreadingID], parameter[]]
variable[timestamp] assign[=] call[name[now], parameter[]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b18afee0>, <ast.Constant object at 0x7da1b18af9d0>, <ast.Constant object at 0x7da1b18ae1d0>, <ast.Constant object at 0x7da1b18ae0e0>, <ast.Constant object at 0x7da1b18afb20>, <ast.Constant object at 0x7da1b18af3a0>, <ast.Constant object at 0x7da1b18ae740>, <ast.Constant object at 0x7da1b18aded0>], [<ast.Constant object at 0x7da1b18af460>, <ast.Call object at 0x7da1b18afd30>, <ast.Name object at 0x7da1b19f1930>, <ast.Constant object at 0x7da1b19f28c0>, <ast.Name object at 0x7da1b19f1720>, <ast.Name object at 0x7da1b19f31f0>, <ast.Call object at 0x7da1b184f340>, <ast.Constant object at 0x7da1b184cbe0>]]
if compare[name[thread_type] in list[[<ast.Attribute object at 0x7da1b184e980>, <ast.Attribute object at 0x7da1b18a1e70>]]] begin[:]
call[name[data]][constant[other_user_fbid]] assign[=] name[thread_id]
if compare[name[message] is constant[None]] begin[:]
variable[message] assign[=] call[name[Message], parameter[]]
if <ast.BoolOp object at 0x7da1b18a1a20> begin[:]
call[name[data]][constant[action_type]] assign[=] constant[ma-type:user-generated-message]
if name[message].text begin[:]
call[name[data]][constant[body]] assign[=] name[message].text
for taget[tuple[[<ast.Name object at 0x7da1b19db4f0>, <ast.Name object at 0x7da1b19db490>]]] in starred[call[name[enumerate], parameter[name[message].mentions]]] begin[:]
call[name[data]][call[constant[profile_xmd[{}][id]].format, parameter[name[i]]]] assign[=] name[mention].thread_id
call[name[data]][call[constant[profile_xmd[{}][offset]].format, parameter[name[i]]]] assign[=] name[mention].offset
call[name[data]][call[constant[profile_xmd[{}][length]].format, parameter[name[i]]]] assign[=] name[mention].length
call[name[data]][call[constant[profile_xmd[{}][type]].format, parameter[name[i]]]] assign[=] constant[p]
if name[message].emoji_size begin[:]
if name[message].text begin[:]
call[name[data]][constant[tags[0]]] assign[=] binary_operation[constant[hot_emoji_size:] + call[name[message].emoji_size.name.lower, parameter[]]]
if name[message].sticker begin[:]
call[name[data]][constant[sticker_id]] assign[=] name[message].sticker.uid
if name[message].quick_replies begin[:]
variable[xmd] assign[=] dictionary[[<ast.Constant object at 0x7da1b19dace0>], [<ast.List object at 0x7da1b19dac80>]]
for taget[name[quick_reply]] in starred[name[message].quick_replies] begin[:]
variable[q] assign[=] call[name[dict], parameter[]]
call[name[q]][constant[content_type]] assign[=] name[quick_reply]._type
call[name[q]][constant[payload]] assign[=] name[quick_reply].payload
call[name[q]][constant[external_payload]] assign[=] name[quick_reply].external_payload
call[name[q]][constant[data]] assign[=] name[quick_reply].data
if name[quick_reply].is_response begin[:]
call[name[q]][constant[ignore_for_webhook]] assign[=] constant[False]
if call[name[isinstance], parameter[name[quick_reply], name[QuickReplyText]]] begin[:]
call[name[q]][constant[title]] assign[=] name[quick_reply].title
if <ast.UnaryOp object at 0x7da1b19d98a0> begin[:]
call[name[q]][constant[image_url]] assign[=] name[quick_reply].image_url
call[call[name[xmd]][constant[quick_replies]].append, parameter[name[q]]]
if <ast.BoolOp object at 0x7da1b19d8dc0> begin[:]
call[name[xmd]][constant[quick_replies]] assign[=] call[call[name[xmd]][constant[quick_replies]]][constant[0]]
call[name[data]][constant[platform_xmd]] assign[=] call[name[json].dumps, parameter[name[xmd]]]
if name[message].reply_to_id begin[:]
call[name[data]][constant[replied_to_message_id]] assign[=] name[message].reply_to_id
return[name[data]] | keyword[def] identifier[_getSendData] ( identifier[self] , identifier[message] = keyword[None] , identifier[thread_id] = keyword[None] , identifier[thread_type] = identifier[ThreadType] . identifier[USER] ):
literal[string]
identifier[messageAndOTID] = identifier[generateOfflineThreadingID] ()
identifier[timestamp] = identifier[now] ()
identifier[data] ={
literal[string] : literal[string] ,
literal[string] : literal[string] . identifier[format] ( identifier[self] . identifier[_uid] ),
literal[string] : identifier[timestamp] ,
literal[string] : literal[string] ,
literal[string] : identifier[messageAndOTID] ,
literal[string] : identifier[messageAndOTID] ,
literal[string] : identifier[generateMessageID] ( identifier[self] . identifier[_client_id] ),
literal[string] : literal[string] ,
}
keyword[if] identifier[thread_type] keyword[in] [ identifier[ThreadType] . identifier[USER] , identifier[ThreadType] . identifier[PAGE] ]:
identifier[data] [ literal[string] ]= identifier[thread_id]
keyword[elif] identifier[thread_type] == identifier[ThreadType] . identifier[GROUP] :
identifier[data] [ literal[string] ]= identifier[thread_id]
keyword[if] identifier[message] keyword[is] keyword[None] :
identifier[message] = identifier[Message] ()
keyword[if] identifier[message] . identifier[text] keyword[or] identifier[message] . identifier[sticker] keyword[or] identifier[message] . identifier[emoji_size] :
identifier[data] [ literal[string] ]= literal[string]
keyword[if] identifier[message] . identifier[text] :
identifier[data] [ literal[string] ]= identifier[message] . identifier[text]
keyword[for] identifier[i] , identifier[mention] keyword[in] identifier[enumerate] ( identifier[message] . identifier[mentions] ):
identifier[data] [ literal[string] . identifier[format] ( identifier[i] )]= identifier[mention] . identifier[thread_id]
identifier[data] [ literal[string] . identifier[format] ( identifier[i] )]= identifier[mention] . identifier[offset]
identifier[data] [ literal[string] . identifier[format] ( identifier[i] )]= identifier[mention] . identifier[length]
identifier[data] [ literal[string] . identifier[format] ( identifier[i] )]= literal[string]
keyword[if] identifier[message] . identifier[emoji_size] :
keyword[if] identifier[message] . identifier[text] :
identifier[data] [ literal[string] ]= literal[string] + identifier[message] . identifier[emoji_size] . identifier[name] . identifier[lower] ()
keyword[else] :
identifier[data] [ literal[string] ]= identifier[message] . identifier[emoji_size] . identifier[value]
keyword[if] identifier[message] . identifier[sticker] :
identifier[data] [ literal[string] ]= identifier[message] . identifier[sticker] . identifier[uid]
keyword[if] identifier[message] . identifier[quick_replies] :
identifier[xmd] ={ literal[string] :[]}
keyword[for] identifier[quick_reply] keyword[in] identifier[message] . identifier[quick_replies] :
identifier[q] = identifier[dict] ()
identifier[q] [ literal[string] ]= identifier[quick_reply] . identifier[_type]
identifier[q] [ literal[string] ]= identifier[quick_reply] . identifier[payload]
identifier[q] [ literal[string] ]= identifier[quick_reply] . identifier[external_payload]
identifier[q] [ literal[string] ]= identifier[quick_reply] . identifier[data]
keyword[if] identifier[quick_reply] . identifier[is_response] :
identifier[q] [ literal[string] ]= keyword[False]
keyword[if] identifier[isinstance] ( identifier[quick_reply] , identifier[QuickReplyText] ):
identifier[q] [ literal[string] ]= identifier[quick_reply] . identifier[title]
keyword[if] keyword[not] identifier[isinstance] ( identifier[quick_reply] , identifier[QuickReplyLocation] ):
identifier[q] [ literal[string] ]= identifier[quick_reply] . identifier[image_url]
identifier[xmd] [ literal[string] ]. identifier[append] ( identifier[q] )
keyword[if] identifier[len] ( identifier[message] . identifier[quick_replies] )== literal[int] keyword[and] identifier[message] . identifier[quick_replies] [ literal[int] ]. identifier[is_response] :
identifier[xmd] [ literal[string] ]= identifier[xmd] [ literal[string] ][ literal[int] ]
identifier[data] [ literal[string] ]= identifier[json] . identifier[dumps] ( identifier[xmd] )
keyword[if] identifier[message] . identifier[reply_to_id] :
identifier[data] [ literal[string] ]= identifier[message] . identifier[reply_to_id]
keyword[return] identifier[data] | def _getSendData(self, message=None, thread_id=None, thread_type=ThreadType.USER):
"""Returns the data needed to send a request to `SendURL`"""
messageAndOTID = generateOfflineThreadingID()
timestamp = now()
data = {'client': 'mercury', 'author': 'fbid:{}'.format(self._uid), 'timestamp': timestamp, 'source': 'source:chat:web', 'offline_threading_id': messageAndOTID, 'message_id': messageAndOTID, 'threading_id': generateMessageID(self._client_id), 'ephemeral_ttl_mode:': '0'}
# Set recipient
if thread_type in [ThreadType.USER, ThreadType.PAGE]:
data['other_user_fbid'] = thread_id # depends on [control=['if'], data=[]]
elif thread_type == ThreadType.GROUP:
data['thread_fbid'] = thread_id # depends on [control=['if'], data=[]]
if message is None:
message = Message() # depends on [control=['if'], data=['message']]
if message.text or message.sticker or message.emoji_size:
data['action_type'] = 'ma-type:user-generated-message' # depends on [control=['if'], data=[]]
if message.text:
data['body'] = message.text # depends on [control=['if'], data=[]]
for (i, mention) in enumerate(message.mentions):
data['profile_xmd[{}][id]'.format(i)] = mention.thread_id
data['profile_xmd[{}][offset]'.format(i)] = mention.offset
data['profile_xmd[{}][length]'.format(i)] = mention.length
data['profile_xmd[{}][type]'.format(i)] = 'p' # depends on [control=['for'], data=[]]
if message.emoji_size:
if message.text:
data['tags[0]'] = 'hot_emoji_size:' + message.emoji_size.name.lower() # depends on [control=['if'], data=[]]
else:
data['sticker_id'] = message.emoji_size.value # depends on [control=['if'], data=[]]
if message.sticker:
data['sticker_id'] = message.sticker.uid # depends on [control=['if'], data=[]]
if message.quick_replies:
xmd = {'quick_replies': []}
for quick_reply in message.quick_replies:
q = dict()
q['content_type'] = quick_reply._type
q['payload'] = quick_reply.payload
q['external_payload'] = quick_reply.external_payload
q['data'] = quick_reply.data
if quick_reply.is_response:
q['ignore_for_webhook'] = False # depends on [control=['if'], data=[]]
if isinstance(quick_reply, QuickReplyText):
q['title'] = quick_reply.title # depends on [control=['if'], data=[]]
if not isinstance(quick_reply, QuickReplyLocation):
q['image_url'] = quick_reply.image_url # depends on [control=['if'], data=[]]
xmd['quick_replies'].append(q) # depends on [control=['for'], data=['quick_reply']]
if len(message.quick_replies) == 1 and message.quick_replies[0].is_response:
xmd['quick_replies'] = xmd['quick_replies'][0] # depends on [control=['if'], data=[]]
data['platform_xmd'] = json.dumps(xmd) # depends on [control=['if'], data=[]]
if message.reply_to_id:
data['replied_to_message_id'] = message.reply_to_id # depends on [control=['if'], data=[]]
return data |
def p_Revisions(self, p):
"""Revisions : Revisions Revision
| Revision"""
n = len(p)
if n == 3:
p[0] = ('Revisions', p[1][1] + [p[2]])
elif n == 2:
p[0] = ('Revisions', [p[1]]) | def function[p_Revisions, parameter[self, p]]:
constant[Revisions : Revisions Revision
| Revision]
variable[n] assign[=] call[name[len], parameter[name[p]]]
if compare[name[n] equal[==] constant[3]] begin[:]
call[name[p]][constant[0]] assign[=] tuple[[<ast.Constant object at 0x7da1b016f400>, <ast.BinOp object at 0x7da1b016e980>]] | keyword[def] identifier[p_Revisions] ( identifier[self] , identifier[p] ):
literal[string]
identifier[n] = identifier[len] ( identifier[p] )
keyword[if] identifier[n] == literal[int] :
identifier[p] [ literal[int] ]=( literal[string] , identifier[p] [ literal[int] ][ literal[int] ]+[ identifier[p] [ literal[int] ]])
keyword[elif] identifier[n] == literal[int] :
identifier[p] [ literal[int] ]=( literal[string] ,[ identifier[p] [ literal[int] ]]) | def p_Revisions(self, p):
"""Revisions : Revisions Revision
| Revision"""
n = len(p)
if n == 3:
p[0] = ('Revisions', p[1][1] + [p[2]]) # depends on [control=['if'], data=[]]
elif n == 2:
p[0] = ('Revisions', [p[1]]) # depends on [control=['if'], data=[]] |
def NewFromHtml(html, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed HTML color definition.
Parameters:
:html:
The HTML definition of the color (#RRGGBB or #RGB or a color name).
:alpha:
The color transparency [0...1], default is opaque.
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromHtml('#ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('#f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('lemonchiffon'))
'(1, 0.980392, 0.803922, 1)'
>>> str(Color.NewFromHtml('#ff8000', 0.5))
'(1, 0.501961, 0, 0.5)'
'''
return Color(Color.HtmlToRgb(html), 'rgb', alpha, wref) | def function[NewFromHtml, parameter[html, alpha, wref]]:
constant[Create a new instance based on the specifed HTML color definition.
Parameters:
:html:
The HTML definition of the color (#RRGGBB or #RGB or a color name).
:alpha:
The color transparency [0...1], default is opaque.
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromHtml('#ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('#f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('lemonchiffon'))
'(1, 0.980392, 0.803922, 1)'
>>> str(Color.NewFromHtml('#ff8000', 0.5))
'(1, 0.501961, 0, 0.5)'
]
return[call[name[Color], parameter[call[name[Color].HtmlToRgb, parameter[name[html]]], constant[rgb], name[alpha], name[wref]]]] | keyword[def] identifier[NewFromHtml] ( identifier[html] , identifier[alpha] = literal[int] , identifier[wref] = identifier[_DEFAULT_WREF] ):
literal[string]
keyword[return] identifier[Color] ( identifier[Color] . identifier[HtmlToRgb] ( identifier[html] ), literal[string] , identifier[alpha] , identifier[wref] ) | def NewFromHtml(html, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HTML color definition.
Parameters:
:html:
The HTML definition of the color (#RRGGBB or #RGB or a color name).
:alpha:
The color transparency [0...1], default is opaque.
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromHtml('#ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('#f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('lemonchiffon'))
'(1, 0.980392, 0.803922, 1)'
>>> str(Color.NewFromHtml('#ff8000', 0.5))
'(1, 0.501961, 0, 0.5)'
"""
return Color(Color.HtmlToRgb(html), 'rgb', alpha, wref) |
def valid_value(self, value):
"""
Check if the provided value is a valid choice.
"""
if isinstance(value, Constant):
value = value.name
text_value = force_text(value)
for option_value, option_label, option_title in self.choices:
if value == option_value or text_value == force_text(option_value):
return True
return False | def function[valid_value, parameter[self, value]]:
constant[
Check if the provided value is a valid choice.
]
if call[name[isinstance], parameter[name[value], name[Constant]]] begin[:]
variable[value] assign[=] name[value].name
variable[text_value] assign[=] call[name[force_text], parameter[name[value]]]
for taget[tuple[[<ast.Name object at 0x7da1b2347ee0>, <ast.Name object at 0x7da1b2347010>, <ast.Name object at 0x7da1b2346e30>]]] in starred[name[self].choices] begin[:]
if <ast.BoolOp object at 0x7da1b2344e50> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[valid_value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Constant] ):
identifier[value] = identifier[value] . identifier[name]
identifier[text_value] = identifier[force_text] ( identifier[value] )
keyword[for] identifier[option_value] , identifier[option_label] , identifier[option_title] keyword[in] identifier[self] . identifier[choices] :
keyword[if] identifier[value] == identifier[option_value] keyword[or] identifier[text_value] == identifier[force_text] ( identifier[option_value] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def valid_value(self, value):
"""
Check if the provided value is a valid choice.
"""
if isinstance(value, Constant):
value = value.name # depends on [control=['if'], data=[]]
text_value = force_text(value)
for (option_value, option_label, option_title) in self.choices:
if value == option_value or text_value == force_text(option_value):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return False |
def update(self, update_finished_cb):
"""Request an update of the memory content"""
if not self._update_finished_cb:
self._update_finished_cb = update_finished_cb
self.anchor_data = []
self.nr_of_anchors = 0
self.valid = False
logger.debug('Updating content of memory {}'.format(self.id))
# Start reading the header
self.mem_handler.read(self, LocoMemory.MEM_LOCO_INFO,
LocoMemory.MEM_LOCO_INFO_LEN) | def function[update, parameter[self, update_finished_cb]]:
constant[Request an update of the memory content]
if <ast.UnaryOp object at 0x7da1b16b1c00> begin[:]
name[self]._update_finished_cb assign[=] name[update_finished_cb]
name[self].anchor_data assign[=] list[[]]
name[self].nr_of_anchors assign[=] constant[0]
name[self].valid assign[=] constant[False]
call[name[logger].debug, parameter[call[constant[Updating content of memory {}].format, parameter[name[self].id]]]]
call[name[self].mem_handler.read, parameter[name[self], name[LocoMemory].MEM_LOCO_INFO, name[LocoMemory].MEM_LOCO_INFO_LEN]] | keyword[def] identifier[update] ( identifier[self] , identifier[update_finished_cb] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_update_finished_cb] :
identifier[self] . identifier[_update_finished_cb] = identifier[update_finished_cb]
identifier[self] . identifier[anchor_data] =[]
identifier[self] . identifier[nr_of_anchors] = literal[int]
identifier[self] . identifier[valid] = keyword[False]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[id] ))
identifier[self] . identifier[mem_handler] . identifier[read] ( identifier[self] , identifier[LocoMemory] . identifier[MEM_LOCO_INFO] ,
identifier[LocoMemory] . identifier[MEM_LOCO_INFO_LEN] ) | def update(self, update_finished_cb):
"""Request an update of the memory content"""
if not self._update_finished_cb:
self._update_finished_cb = update_finished_cb
self.anchor_data = []
self.nr_of_anchors = 0
self.valid = False
logger.debug('Updating content of memory {}'.format(self.id))
# Start reading the header
self.mem_handler.read(self, LocoMemory.MEM_LOCO_INFO, LocoMemory.MEM_LOCO_INFO_LEN) # depends on [control=['if'], data=[]] |
def new_wins(self, orig_criteria, orig_idx, new_criteria, new_idx):
"""
Returns a bool indicating whether a new adversarial example is better
than the pre-existing one for the same clean example.
:param orig_criteria: dict mapping names of criteria to their value
for each example in the whole dataset
:param orig_idx: The position of the pre-existing example within the
whole dataset.
:param new_criteria: dict, like orig_criteria, but with values only
on the latest batch of adversarial examples
:param new_idx: The position of the new adversarial example within
the batch
"""
raise NotImplementedError(str(type(self))
+ " needs to implement new_wins.") | def function[new_wins, parameter[self, orig_criteria, orig_idx, new_criteria, new_idx]]:
constant[
Returns a bool indicating whether a new adversarial example is better
than the pre-existing one for the same clean example.
:param orig_criteria: dict mapping names of criteria to their value
for each example in the whole dataset
:param orig_idx: The position of the pre-existing example within the
whole dataset.
:param new_criteria: dict, like orig_criteria, but with values only
on the latest batch of adversarial examples
:param new_idx: The position of the new adversarial example within
the batch
]
<ast.Raise object at 0x7da1b1e338b0> | keyword[def] identifier[new_wins] ( identifier[self] , identifier[orig_criteria] , identifier[orig_idx] , identifier[new_criteria] , identifier[new_idx] ):
literal[string]
keyword[raise] identifier[NotImplementedError] ( identifier[str] ( identifier[type] ( identifier[self] ))
+ literal[string] ) | def new_wins(self, orig_criteria, orig_idx, new_criteria, new_idx):
"""
Returns a bool indicating whether a new adversarial example is better
than the pre-existing one for the same clean example.
:param orig_criteria: dict mapping names of criteria to their value
for each example in the whole dataset
:param orig_idx: The position of the pre-existing example within the
whole dataset.
:param new_criteria: dict, like orig_criteria, but with values only
on the latest batch of adversarial examples
:param new_idx: The position of the new adversarial example within
the batch
"""
raise NotImplementedError(str(type(self)) + ' needs to implement new_wins.') |
def from_gamertag(cls, gamertag):
'''
Instantiates an instance of ``GamerProfile`` from
a gamertag
:param gamertag: Gamertag to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag
try:
return cls._fetch(url)
except GamertagNotFound:
raise GamertagNotFound('No such user: %s' % gamertag) | def function[from_gamertag, parameter[cls, gamertag]]:
constant[
Instantiates an instance of ``GamerProfile`` from
a gamertag
:param gamertag: Gamertag to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
]
variable[url] assign[=] binary_operation[constant[https://profile.xboxlive.com/users/gt(%s)/profile/settings] <ast.Mod object at 0x7da2590d6920> name[gamertag]]
<ast.Try object at 0x7da18bcc84c0> | keyword[def] identifier[from_gamertag] ( identifier[cls] , identifier[gamertag] ):
literal[string]
identifier[url] = literal[string] % identifier[gamertag]
keyword[try] :
keyword[return] identifier[cls] . identifier[_fetch] ( identifier[url] )
keyword[except] identifier[GamertagNotFound] :
keyword[raise] identifier[GamertagNotFound] ( literal[string] % identifier[gamertag] ) | def from_gamertag(cls, gamertag):
"""
Instantiates an instance of ``GamerProfile`` from
a gamertag
:param gamertag: Gamertag to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
"""
url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag
try:
return cls._fetch(url) # depends on [control=['try'], data=[]]
except GamertagNotFound:
raise GamertagNotFound('No such user: %s' % gamertag) # depends on [control=['except'], data=[]] |
def _create_regexp_filter(regex):
"""Returns a boolean function that filters strings based on a regular exp.
Args:
regex: A string describing the regexp to use.
Returns:
A function taking a string and returns True if any of its substrings
matches regex.
"""
# Warning: Note that python's regex library allows inputs that take
# exponential time. Time-limiting it is difficult. When we move to
# a true multi-tenant tensorboard server, the regexp implementation here
# would need to be replaced by something more secure.
compiled_regex = re.compile(regex)
def filter_fn(value):
if not isinstance(value, six.string_types):
raise error.HParamsError(
'Cannot use a regexp filter for a value of type %s. Value: %s' %
(type(value), value))
return re.search(compiled_regex, value) is not None
return filter_fn | def function[_create_regexp_filter, parameter[regex]]:
constant[Returns a boolean function that filters strings based on a regular exp.
Args:
regex: A string describing the regexp to use.
Returns:
A function taking a string and returns True if any of its substrings
matches regex.
]
variable[compiled_regex] assign[=] call[name[re].compile, parameter[name[regex]]]
def function[filter_fn, parameter[value]]:
if <ast.UnaryOp object at 0x7da1b2184880> begin[:]
<ast.Raise object at 0x7da1b2186560>
return[compare[call[name[re].search, parameter[name[compiled_regex], name[value]]] is_not constant[None]]]
return[name[filter_fn]] | keyword[def] identifier[_create_regexp_filter] ( identifier[regex] ):
literal[string]
identifier[compiled_regex] = identifier[re] . identifier[compile] ( identifier[regex] )
keyword[def] identifier[filter_fn] ( identifier[value] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[error] . identifier[HParamsError] (
literal[string] %
( identifier[type] ( identifier[value] ), identifier[value] ))
keyword[return] identifier[re] . identifier[search] ( identifier[compiled_regex] , identifier[value] ) keyword[is] keyword[not] keyword[None]
keyword[return] identifier[filter_fn] | def _create_regexp_filter(regex):
"""Returns a boolean function that filters strings based on a regular exp.
Args:
regex: A string describing the regexp to use.
Returns:
A function taking a string and returns True if any of its substrings
matches regex.
"""
# Warning: Note that python's regex library allows inputs that take
# exponential time. Time-limiting it is difficult. When we move to
# a true multi-tenant tensorboard server, the regexp implementation here
# would need to be replaced by something more secure.
compiled_regex = re.compile(regex)
def filter_fn(value):
if not isinstance(value, six.string_types):
raise error.HParamsError('Cannot use a regexp filter for a value of type %s. Value: %s' % (type(value), value)) # depends on [control=['if'], data=[]]
return re.search(compiled_regex, value) is not None
return filter_fn |
def get_tag(self, key, *, case_sensitive=True):
"""Return a tag by key, if found
Args:
key (str): Name/key of the tag to locate
case_sensitive (bool): Should tag keys be treated case-sensitive (default: true)
Returns:
`Tag`,`None`
"""
key = key if case_sensitive else key.lower()
for tag in self.resource.tags:
if not case_sensitive:
if tag.key.lower() == key:
return tag
elif key == tag.key:
return tag
return None | def function[get_tag, parameter[self, key]]:
constant[Return a tag by key, if found
Args:
key (str): Name/key of the tag to locate
case_sensitive (bool): Should tag keys be treated case-sensitive (default: true)
Returns:
`Tag`,`None`
]
variable[key] assign[=] <ast.IfExp object at 0x7da1b204b160>
for taget[name[tag]] in starred[name[self].resource.tags] begin[:]
if <ast.UnaryOp object at 0x7da1b1e97550> begin[:]
if compare[call[name[tag].key.lower, parameter[]] equal[==] name[key]] begin[:]
return[name[tag]]
return[constant[None]] | keyword[def] identifier[get_tag] ( identifier[self] , identifier[key] ,*, identifier[case_sensitive] = keyword[True] ):
literal[string]
identifier[key] = identifier[key] keyword[if] identifier[case_sensitive] keyword[else] identifier[key] . identifier[lower] ()
keyword[for] identifier[tag] keyword[in] identifier[self] . identifier[resource] . identifier[tags] :
keyword[if] keyword[not] identifier[case_sensitive] :
keyword[if] identifier[tag] . identifier[key] . identifier[lower] ()== identifier[key] :
keyword[return] identifier[tag]
keyword[elif] identifier[key] == identifier[tag] . identifier[key] :
keyword[return] identifier[tag]
keyword[return] keyword[None] | def get_tag(self, key, *, case_sensitive=True):
"""Return a tag by key, if found
Args:
key (str): Name/key of the tag to locate
case_sensitive (bool): Should tag keys be treated case-sensitive (default: true)
Returns:
`Tag`,`None`
"""
key = key if case_sensitive else key.lower()
for tag in self.resource.tags:
if not case_sensitive:
if tag.key.lower() == key:
return tag # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key == tag.key:
return tag # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']]
return None |
def is_symbol_hooked(self, symbol_name):
"""
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
return self.is_hooked(hook_addr) | def function[is_symbol_hooked, parameter[self, symbol_name]]:
constant[
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
]
variable[sym] assign[=] call[name[self].loader.find_symbol, parameter[name[symbol_name]]]
if compare[name[sym] is constant[None]] begin[:]
call[name[l].warning, parameter[constant[Could not find symbol %s], name[symbol_name]]]
return[constant[False]]
<ast.Tuple object at 0x7da18ede6440> assign[=] call[name[self].simos.prepare_function_symbol, parameter[name[symbol_name]]]
return[call[name[self].is_hooked, parameter[name[hook_addr]]]] | keyword[def] identifier[is_symbol_hooked] ( identifier[self] , identifier[symbol_name] ):
literal[string]
identifier[sym] = identifier[self] . identifier[loader] . identifier[find_symbol] ( identifier[symbol_name] )
keyword[if] identifier[sym] keyword[is] keyword[None] :
identifier[l] . identifier[warning] ( literal[string] , identifier[symbol_name] )
keyword[return] keyword[False]
identifier[hook_addr] , identifier[_] = identifier[self] . identifier[simos] . identifier[prepare_function_symbol] ( identifier[symbol_name] , identifier[basic_addr] = identifier[sym] . identifier[rebased_addr] )
keyword[return] identifier[self] . identifier[is_hooked] ( identifier[hook_addr] ) | def is_symbol_hooked(self, symbol_name):
"""
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning('Could not find symbol %s', symbol_name)
return False # depends on [control=['if'], data=[]]
(hook_addr, _) = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
return self.is_hooked(hook_addr) |
def num_feats(self):
""" The number of features per time step in the corpus. """
if not self._num_feats:
filename = self.get_train_fns()[0][0]
feats = np.load(filename)
# pylint: disable=maybe-no-member
if len(feats.shape) == 3:
# Then there are multiple channels of multiple feats
self._num_feats = feats.shape[1] * feats.shape[2]
elif len(feats.shape) == 2:
# Otherwise it is just of shape time x feats
self._num_feats = feats.shape[1]
else:
raise ValueError(
"Feature matrix of shape %s unexpected" % str(feats.shape))
return self._num_feats | def function[num_feats, parameter[self]]:
constant[ The number of features per time step in the corpus. ]
if <ast.UnaryOp object at 0x7da1b11fb070> begin[:]
variable[filename] assign[=] call[call[call[name[self].get_train_fns, parameter[]]][constant[0]]][constant[0]]
variable[feats] assign[=] call[name[np].load, parameter[name[filename]]]
if compare[call[name[len], parameter[name[feats].shape]] equal[==] constant[3]] begin[:]
name[self]._num_feats assign[=] binary_operation[call[name[feats].shape][constant[1]] * call[name[feats].shape][constant[2]]]
return[name[self]._num_feats] | keyword[def] identifier[num_feats] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_num_feats] :
identifier[filename] = identifier[self] . identifier[get_train_fns] ()[ literal[int] ][ literal[int] ]
identifier[feats] = identifier[np] . identifier[load] ( identifier[filename] )
keyword[if] identifier[len] ( identifier[feats] . identifier[shape] )== literal[int] :
identifier[self] . identifier[_num_feats] = identifier[feats] . identifier[shape] [ literal[int] ]* identifier[feats] . identifier[shape] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[feats] . identifier[shape] )== literal[int] :
identifier[self] . identifier[_num_feats] = identifier[feats] . identifier[shape] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] % identifier[str] ( identifier[feats] . identifier[shape] ))
keyword[return] identifier[self] . identifier[_num_feats] | def num_feats(self):
""" The number of features per time step in the corpus. """
if not self._num_feats:
filename = self.get_train_fns()[0][0]
feats = np.load(filename)
# pylint: disable=maybe-no-member
if len(feats.shape) == 3:
# Then there are multiple channels of multiple feats
self._num_feats = feats.shape[1] * feats.shape[2] # depends on [control=['if'], data=[]]
elif len(feats.shape) == 2:
# Otherwise it is just of shape time x feats
self._num_feats = feats.shape[1] # depends on [control=['if'], data=[]]
else:
raise ValueError('Feature matrix of shape %s unexpected' % str(feats.shape)) # depends on [control=['if'], data=[]]
return self._num_feats |
def _get_health_status(self, url, ssl_params, timeout):
"""
Don't send the "can connect" service check if we have troubles getting
the health status
"""
try:
r = self._perform_request(url, "/health", ssl_params, timeout)
# we don't use get() here so we can report a KeyError
return r.json()[self.HEALTH_KEY]
except Exception as e:
self.log.debug("Can't determine health status: {}".format(e)) | def function[_get_health_status, parameter[self, url, ssl_params, timeout]]:
constant[
Don't send the "can connect" service check if we have troubles getting
the health status
]
<ast.Try object at 0x7da20c6c6260> | keyword[def] identifier[_get_health_status] ( identifier[self] , identifier[url] , identifier[ssl_params] , identifier[timeout] ):
literal[string]
keyword[try] :
identifier[r] = identifier[self] . identifier[_perform_request] ( identifier[url] , literal[string] , identifier[ssl_params] , identifier[timeout] )
keyword[return] identifier[r] . identifier[json] ()[ identifier[self] . identifier[HEALTH_KEY] ]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[e] )) | def _get_health_status(self, url, ssl_params, timeout):
"""
Don't send the "can connect" service check if we have troubles getting
the health status
"""
try:
r = self._perform_request(url, '/health', ssl_params, timeout)
# we don't use get() here so we can report a KeyError
return r.json()[self.HEALTH_KEY] # depends on [control=['try'], data=[]]
except Exception as e:
self.log.debug("Can't determine health status: {}".format(e)) # depends on [control=['except'], data=['e']] |
def save_url_as(url, save_as):
"""
Download the file `url` and save it to the local disk as
`save_as`.
"""
remote = requests.get(url, verify=False)
if not remote.status_code == Constants.PULP_GET_OK:
raise JuicerPulpError("A %s error occurred trying to get %s" %
(remote.status_code, url))
with open(save_as, 'wb') as data:
data.write(remote.content) | def function[save_url_as, parameter[url, save_as]]:
constant[
Download the file `url` and save it to the local disk as
`save_as`.
]
variable[remote] assign[=] call[name[requests].get, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da20e9b2c20> begin[:]
<ast.Raise object at 0x7da20e9b2320>
with call[name[open], parameter[name[save_as], constant[wb]]] begin[:]
call[name[data].write, parameter[name[remote].content]] | keyword[def] identifier[save_url_as] ( identifier[url] , identifier[save_as] ):
literal[string]
identifier[remote] = identifier[requests] . identifier[get] ( identifier[url] , identifier[verify] = keyword[False] )
keyword[if] keyword[not] identifier[remote] . identifier[status_code] == identifier[Constants] . identifier[PULP_GET_OK] :
keyword[raise] identifier[JuicerPulpError] ( literal[string] %
( identifier[remote] . identifier[status_code] , identifier[url] ))
keyword[with] identifier[open] ( identifier[save_as] , literal[string] ) keyword[as] identifier[data] :
identifier[data] . identifier[write] ( identifier[remote] . identifier[content] ) | def save_url_as(url, save_as):
"""
Download the file `url` and save it to the local disk as
`save_as`.
"""
remote = requests.get(url, verify=False)
if not remote.status_code == Constants.PULP_GET_OK:
raise JuicerPulpError('A %s error occurred trying to get %s' % (remote.status_code, url)) # depends on [control=['if'], data=[]]
with open(save_as, 'wb') as data:
data.write(remote.content) # depends on [control=['with'], data=['data']] |
def _multi_permission_mask(mode):
"""
Support multiple, comma-separated Unix chmod symbolic modes.
>>> _multi_permission_mask('a=r,u+w')(0) == 0o644
True
"""
def compose(f, g):
return lambda *args, **kwargs: g(f(*args, **kwargs))
return functools.reduce(compose, map(_permission_mask, mode.split(','))) | def function[_multi_permission_mask, parameter[mode]]:
constant[
Support multiple, comma-separated Unix chmod symbolic modes.
>>> _multi_permission_mask('a=r,u+w')(0) == 0o644
True
]
def function[compose, parameter[f, g]]:
return[<ast.Lambda object at 0x7da1b08e4430>]
return[call[name[functools].reduce, parameter[name[compose], call[name[map], parameter[name[_permission_mask], call[name[mode].split, parameter[constant[,]]]]]]]] | keyword[def] identifier[_multi_permission_mask] ( identifier[mode] ):
literal[string]
keyword[def] identifier[compose] ( identifier[f] , identifier[g] ):
keyword[return] keyword[lambda] * identifier[args] ,** identifier[kwargs] : identifier[g] ( identifier[f] (* identifier[args] ,** identifier[kwargs] ))
keyword[return] identifier[functools] . identifier[reduce] ( identifier[compose] , identifier[map] ( identifier[_permission_mask] , identifier[mode] . identifier[split] ( literal[string] ))) | def _multi_permission_mask(mode):
"""
Support multiple, comma-separated Unix chmod symbolic modes.
>>> _multi_permission_mask('a=r,u+w')(0) == 0o644
True
"""
def compose(f, g):
return lambda *args, **kwargs: g(f(*args, **kwargs))
return functools.reduce(compose, map(_permission_mask, mode.split(','))) |
def read_docs(self, payloadType, els_client = None):
"""Fetches the list of documents associated with this entity from
api.elsevier.com. If need be, splits the requests in batches to
retrieve them all. Returns True if successful; else, False.
NOTE: this method requires elevated API permissions.
See http://bit.ly/2leirnq for more info."""
if els_client:
self._client = els_client;
elif not self.client:
raise ValueError('''Entity object not currently bound to els_client instance. Call .read() with els_client argument or set .client attribute.''')
try:
api_response = self.client.exec_request(self.uri + "?view=documents")
if isinstance(api_response[payloadType], list):
data = api_response[payloadType][0]
else:
data = api_response[payloadType]
docCount = int(data["documents"]["@total"])
self._doc_list = [x for x in data["documents"]["abstract-document"]]
for i in range (0, docCount//self.client.num_res):
try:
api_response = self.client.exec_request(self.uri + "?view=documents&start=" + str((i+1) * self.client.num_res+1))
if isinstance(api_response[payloadType], list):
data = api_response[payloadType][0]
else:
data = api_response[payloadType]
self._doc_list = self._doc_list + [x for x in data["documents"]["abstract-document"]]
except (requests.HTTPError, requests.RequestException) as e:
if hasattr(self, 'doc_list'): ## We don't want incomplete doc lists
self._doc_list = None
raise e
logger.info("Documents loaded for " + self.uri)
return True
except (requests.HTTPError, requests.RequestException) as e:
logger.warning(e.args)
return False | def function[read_docs, parameter[self, payloadType, els_client]]:
constant[Fetches the list of documents associated with this entity from
api.elsevier.com. If need be, splits the requests in batches to
retrieve them all. Returns True if successful; else, False.
NOTE: this method requires elevated API permissions.
See http://bit.ly/2leirnq for more info.]
if name[els_client] begin[:]
name[self]._client assign[=] name[els_client]
<ast.Try object at 0x7da20e9600d0> | keyword[def] identifier[read_docs] ( identifier[self] , identifier[payloadType] , identifier[els_client] = keyword[None] ):
literal[string]
keyword[if] identifier[els_client] :
identifier[self] . identifier[_client] = identifier[els_client] ;
keyword[elif] keyword[not] identifier[self] . identifier[client] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[try] :
identifier[api_response] = identifier[self] . identifier[client] . identifier[exec_request] ( identifier[self] . identifier[uri] + literal[string] )
keyword[if] identifier[isinstance] ( identifier[api_response] [ identifier[payloadType] ], identifier[list] ):
identifier[data] = identifier[api_response] [ identifier[payloadType] ][ literal[int] ]
keyword[else] :
identifier[data] = identifier[api_response] [ identifier[payloadType] ]
identifier[docCount] = identifier[int] ( identifier[data] [ literal[string] ][ literal[string] ])
identifier[self] . identifier[_doc_list] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ][ literal[string] ]]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[docCount] // identifier[self] . identifier[client] . identifier[num_res] ):
keyword[try] :
identifier[api_response] = identifier[self] . identifier[client] . identifier[exec_request] ( identifier[self] . identifier[uri] + literal[string] + identifier[str] (( identifier[i] + literal[int] )* identifier[self] . identifier[client] . identifier[num_res] + literal[int] ))
keyword[if] identifier[isinstance] ( identifier[api_response] [ identifier[payloadType] ], identifier[list] ):
identifier[data] = identifier[api_response] [ identifier[payloadType] ][ literal[int] ]
keyword[else] :
identifier[data] = identifier[api_response] [ identifier[payloadType] ]
identifier[self] . identifier[_doc_list] = identifier[self] . identifier[_doc_list] +[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ][ literal[string] ]]
keyword[except] ( identifier[requests] . identifier[HTTPError] , identifier[requests] . identifier[RequestException] ) keyword[as] identifier[e] :
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_doc_list] = keyword[None]
keyword[raise] identifier[e]
identifier[logger] . identifier[info] ( literal[string] + identifier[self] . identifier[uri] )
keyword[return] keyword[True]
keyword[except] ( identifier[requests] . identifier[HTTPError] , identifier[requests] . identifier[RequestException] ) keyword[as] identifier[e] :
identifier[logger] . identifier[warning] ( identifier[e] . identifier[args] )
keyword[return] keyword[False] | def read_docs(self, payloadType, els_client=None):
"""Fetches the list of documents associated with this entity from
api.elsevier.com. If need be, splits the requests in batches to
retrieve them all. Returns True if successful; else, False.
NOTE: this method requires elevated API permissions.
See http://bit.ly/2leirnq for more info."""
if els_client:
self._client = els_client # depends on [control=['if'], data=[]]
elif not self.client:
raise ValueError('Entity object not currently bound to els_client instance. Call .read() with els_client argument or set .client attribute.') # depends on [control=['if'], data=[]]
try:
api_response = self.client.exec_request(self.uri + '?view=documents')
if isinstance(api_response[payloadType], list):
data = api_response[payloadType][0] # depends on [control=['if'], data=[]]
else:
data = api_response[payloadType]
docCount = int(data['documents']['@total'])
self._doc_list = [x for x in data['documents']['abstract-document']]
for i in range(0, docCount // self.client.num_res):
try:
api_response = self.client.exec_request(self.uri + '?view=documents&start=' + str((i + 1) * self.client.num_res + 1))
if isinstance(api_response[payloadType], list):
data = api_response[payloadType][0] # depends on [control=['if'], data=[]]
else:
data = api_response[payloadType]
self._doc_list = self._doc_list + [x for x in data['documents']['abstract-document']] # depends on [control=['try'], data=[]]
except (requests.HTTPError, requests.RequestException) as e:
if hasattr(self, 'doc_list'): ## We don't want incomplete doc lists
self._doc_list = None # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['i']]
logger.info('Documents loaded for ' + self.uri)
return True # depends on [control=['try'], data=[]]
except (requests.HTTPError, requests.RequestException) as e:
logger.warning(e.args)
return False # depends on [control=['except'], data=['e']] |
def watch_firings(self, flag):
"""Whether or not the Rule firings are being watched."""
lib.EnvSetDefruleWatchFirings(self._env, int(flag), self._rule) | def function[watch_firings, parameter[self, flag]]:
constant[Whether or not the Rule firings are being watched.]
call[name[lib].EnvSetDefruleWatchFirings, parameter[name[self]._env, call[name[int], parameter[name[flag]]], name[self]._rule]] | keyword[def] identifier[watch_firings] ( identifier[self] , identifier[flag] ):
literal[string]
identifier[lib] . identifier[EnvSetDefruleWatchFirings] ( identifier[self] . identifier[_env] , identifier[int] ( identifier[flag] ), identifier[self] . identifier[_rule] ) | def watch_firings(self, flag):
"""Whether or not the Rule firings are being watched."""
lib.EnvSetDefruleWatchFirings(self._env, int(flag), self._rule) |
def check_application_state(self, request, callback):
"Check optional state parameter."
stored = request.session.get(self.session_key, None)
returned = request.GET.get('state', None)
check = False
if stored is not None:
if returned is not None:
check = constant_time_compare(stored, returned)
else:
logger.error('No state parameter returned by the provider.')
else:
logger.error('No state stored in the sesssion.')
return check | def function[check_application_state, parameter[self, request, callback]]:
constant[Check optional state parameter.]
variable[stored] assign[=] call[name[request].session.get, parameter[name[self].session_key, constant[None]]]
variable[returned] assign[=] call[name[request].GET.get, parameter[constant[state], constant[None]]]
variable[check] assign[=] constant[False]
if compare[name[stored] is_not constant[None]] begin[:]
if compare[name[returned] is_not constant[None]] begin[:]
variable[check] assign[=] call[name[constant_time_compare], parameter[name[stored], name[returned]]]
return[name[check]] | keyword[def] identifier[check_application_state] ( identifier[self] , identifier[request] , identifier[callback] ):
literal[string]
identifier[stored] = identifier[request] . identifier[session] . identifier[get] ( identifier[self] . identifier[session_key] , keyword[None] )
identifier[returned] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] , keyword[None] )
identifier[check] = keyword[False]
keyword[if] identifier[stored] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[returned] keyword[is] keyword[not] keyword[None] :
identifier[check] = identifier[constant_time_compare] ( identifier[stored] , identifier[returned] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] identifier[check] | def check_application_state(self, request, callback):
"""Check optional state parameter."""
stored = request.session.get(self.session_key, None)
returned = request.GET.get('state', None)
check = False
if stored is not None:
if returned is not None:
check = constant_time_compare(stored, returned) # depends on [control=['if'], data=['returned']]
else:
logger.error('No state parameter returned by the provider.') # depends on [control=['if'], data=['stored']]
else:
logger.error('No state stored in the sesssion.')
return check |
def wait(self):
"""Wait until there are no more inprogress transfers
This will not stop when failures are encountered and not propogate any
of these errors from failed transfers, but it can be interrupted with
a KeyboardInterrupt.
"""
try:
transfer_coordinator = None
for transfer_coordinator in self.tracked_transfer_coordinators:
transfer_coordinator.result()
except KeyboardInterrupt:
logger.debug('Received KeyboardInterrupt in wait()')
# If Keyboard interrupt is raised while waiting for
# the result, then exit out of the wait and raise the
# exception
if transfer_coordinator:
logger.debug(
'On KeyboardInterrupt was waiting for %s',
transfer_coordinator)
raise
except Exception:
# A general exception could have been thrown because
# of result(). We just want to ignore this and continue
# because we at least know that the transfer coordinator
# has completed.
pass | def function[wait, parameter[self]]:
constant[Wait until there are no more inprogress transfers
This will not stop when failures are encountered and not propogate any
of these errors from failed transfers, but it can be interrupted with
a KeyboardInterrupt.
]
<ast.Try object at 0x7da20c6e7c10> | keyword[def] identifier[wait] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[transfer_coordinator] = keyword[None]
keyword[for] identifier[transfer_coordinator] keyword[in] identifier[self] . identifier[tracked_transfer_coordinators] :
identifier[transfer_coordinator] . identifier[result] ()
keyword[except] identifier[KeyboardInterrupt] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[transfer_coordinator] :
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[transfer_coordinator] )
keyword[raise]
keyword[except] identifier[Exception] :
keyword[pass] | def wait(self):
"""Wait until there are no more inprogress transfers
This will not stop when failures are encountered and not propogate any
of these errors from failed transfers, but it can be interrupted with
a KeyboardInterrupt.
"""
try:
transfer_coordinator = None
for transfer_coordinator in self.tracked_transfer_coordinators:
transfer_coordinator.result() # depends on [control=['for'], data=['transfer_coordinator']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
logger.debug('Received KeyboardInterrupt in wait()')
# If Keyboard interrupt is raised while waiting for
# the result, then exit out of the wait and raise the
# exception
if transfer_coordinator:
logger.debug('On KeyboardInterrupt was waiting for %s', transfer_coordinator) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=[]]
except Exception:
# A general exception could have been thrown because
# of result(). We just want to ignore this and continue
# because we at least know that the transfer coordinator
# has completed.
pass # depends on [control=['except'], data=[]] |
def catch_exceptions(func):
"""
@catch_exceptions decorator handles generic exceptions in the request handler.
All uncaught exceptions will be packaged into a nice JSON response, and returned
to the caller with status code 500.
This is especially useful for development, for production you might want to
disable the messages.
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
res = func(request, *args, **kwargs)
except Exception as e:
logger.exception(e)
request.setResponseCode(500)
request.setHeader('Content-Type', 'application/json')
return json.dumps({"error": str(e)})
return res
return wrapper | def function[catch_exceptions, parameter[func]]:
constant[
@catch_exceptions decorator handles generic exceptions in the request handler.
All uncaught exceptions will be packaged into a nice JSON response, and returned
to the caller with status code 500.
This is especially useful for development, for production you might want to
disable the messages.
]
def function[wrapper, parameter[request]]:
<ast.Try object at 0x7da1b1df9750>
return[name[res]]
return[name[wrapper]] | keyword[def] identifier[catch_exceptions] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
identifier[res] = identifier[func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[exception] ( identifier[e] )
identifier[request] . identifier[setResponseCode] ( literal[int] )
identifier[request] . identifier[setHeader] ( literal[string] , literal[string] )
keyword[return] identifier[json] . identifier[dumps] ({ literal[string] : identifier[str] ( identifier[e] )})
keyword[return] identifier[res]
keyword[return] identifier[wrapper] | def catch_exceptions(func):
"""
@catch_exceptions decorator handles generic exceptions in the request handler.
All uncaught exceptions will be packaged into a nice JSON response, and returned
to the caller with status code 500.
This is especially useful for development, for production you might want to
disable the messages.
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
res = func(request, *args, **kwargs) # depends on [control=['try'], data=[]]
except Exception as e:
logger.exception(e)
request.setResponseCode(500)
request.setHeader('Content-Type', 'application/json')
return json.dumps({'error': str(e)}) # depends on [control=['except'], data=['e']]
return res
return wrapper |
def set_reload_params(self, mercy=None, exit=None):
"""Set reload related params.
:param int mercy: Set the maximum time (in seconds) we wait
for workers and other processes to die during reload/shutdown.
:param bool exit: Force exit even if a reload is requested.
"""
self._set('reload-mercy', mercy)
self.set_exit_events(reload=exit)
return self._section | def function[set_reload_params, parameter[self, mercy, exit]]:
constant[Set reload related params.
:param int mercy: Set the maximum time (in seconds) we wait
for workers and other processes to die during reload/shutdown.
:param bool exit: Force exit even if a reload is requested.
]
call[name[self]._set, parameter[constant[reload-mercy], name[mercy]]]
call[name[self].set_exit_events, parameter[]]
return[name[self]._section] | keyword[def] identifier[set_reload_params] ( identifier[self] , identifier[mercy] = keyword[None] , identifier[exit] = keyword[None] ):
literal[string]
identifier[self] . identifier[_set] ( literal[string] , identifier[mercy] )
identifier[self] . identifier[set_exit_events] ( identifier[reload] = identifier[exit] )
keyword[return] identifier[self] . identifier[_section] | def set_reload_params(self, mercy=None, exit=None):
"""Set reload related params.
:param int mercy: Set the maximum time (in seconds) we wait
for workers and other processes to die during reload/shutdown.
:param bool exit: Force exit even if a reload is requested.
"""
self._set('reload-mercy', mercy)
self.set_exit_events(reload=exit)
return self._section |
def get_timestamp_at(time_in=None, time_at=None):
'''
Computes the timestamp for a future event that may occur in ``time_in`` time
or at ``time_at``.
'''
if time_in:
if isinstance(time_in, int):
hours = 0
minutes = time_in
else:
time_in = time_in.replace('h', ':')
time_in = time_in.replace('m', '')
try:
hours, minutes = time_in.split(':')
except ValueError:
hours = 0
minutes = time_in
if not minutes:
minutes = 0
hours, minutes = int(hours), int(minutes)
dt = timedelta(hours=hours, minutes=minutes)
time_now = datetime.utcnow()
time_at = time_now + dt
return time.mktime(time_at.timetuple())
elif time_at:
log.debug('Predicted at specified as %s', time_at)
if isinstance(time_at, (six.integer_types, float)):
# then it's a timestamp
return time_at
else:
fmts = ('%H%M', '%Hh%M', '%I%p', '%I:%M%p', '%I:%M %p')
# Support different formats for the timestamp
# The current formats accepted are the following:
#
# - 18:30 (and 18h30)
# - 1pm (no minutes, fixed hour)
# - 1:20am (and 1:20am - with or without space)
for fmt in fmts:
try:
log.debug('Trying to match %s', fmt)
dt = datetime.strptime(time_at, fmt)
return time.mktime(dt.timetuple())
except ValueError:
log.debug('Did not match %s, continue searching', fmt)
continue
msg = '{pat} does not match any of the accepted formats: {fmts}'.format(pat=time_at,
fmts=', '.join(fmts))
log.error(msg)
raise ValueError(msg) | def function[get_timestamp_at, parameter[time_in, time_at]]:
constant[
Computes the timestamp for a future event that may occur in ``time_in`` time
or at ``time_at``.
]
if name[time_in] begin[:]
if call[name[isinstance], parameter[name[time_in], name[int]]] begin[:]
variable[hours] assign[=] constant[0]
variable[minutes] assign[=] name[time_in]
variable[dt] assign[=] call[name[timedelta], parameter[]]
variable[time_now] assign[=] call[name[datetime].utcnow, parameter[]]
variable[time_at] assign[=] binary_operation[name[time_now] + name[dt]]
return[call[name[time].mktime, parameter[call[name[time_at].timetuple, parameter[]]]]] | keyword[def] identifier[get_timestamp_at] ( identifier[time_in] = keyword[None] , identifier[time_at] = keyword[None] ):
literal[string]
keyword[if] identifier[time_in] :
keyword[if] identifier[isinstance] ( identifier[time_in] , identifier[int] ):
identifier[hours] = literal[int]
identifier[minutes] = identifier[time_in]
keyword[else] :
identifier[time_in] = identifier[time_in] . identifier[replace] ( literal[string] , literal[string] )
identifier[time_in] = identifier[time_in] . identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
identifier[hours] , identifier[minutes] = identifier[time_in] . identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] :
identifier[hours] = literal[int]
identifier[minutes] = identifier[time_in]
keyword[if] keyword[not] identifier[minutes] :
identifier[minutes] = literal[int]
identifier[hours] , identifier[minutes] = identifier[int] ( identifier[hours] ), identifier[int] ( identifier[minutes] )
identifier[dt] = identifier[timedelta] ( identifier[hours] = identifier[hours] , identifier[minutes] = identifier[minutes] )
identifier[time_now] = identifier[datetime] . identifier[utcnow] ()
identifier[time_at] = identifier[time_now] + identifier[dt]
keyword[return] identifier[time] . identifier[mktime] ( identifier[time_at] . identifier[timetuple] ())
keyword[elif] identifier[time_at] :
identifier[log] . identifier[debug] ( literal[string] , identifier[time_at] )
keyword[if] identifier[isinstance] ( identifier[time_at] ,( identifier[six] . identifier[integer_types] , identifier[float] )):
keyword[return] identifier[time_at]
keyword[else] :
identifier[fmts] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
keyword[for] identifier[fmt] keyword[in] identifier[fmts] :
keyword[try] :
identifier[log] . identifier[debug] ( literal[string] , identifier[fmt] )
identifier[dt] = identifier[datetime] . identifier[strptime] ( identifier[time_at] , identifier[fmt] )
keyword[return] identifier[time] . identifier[mktime] ( identifier[dt] . identifier[timetuple] ())
keyword[except] identifier[ValueError] :
identifier[log] . identifier[debug] ( literal[string] , identifier[fmt] )
keyword[continue]
identifier[msg] = literal[string] . identifier[format] ( identifier[pat] = identifier[time_at] ,
identifier[fmts] = literal[string] . identifier[join] ( identifier[fmts] ))
identifier[log] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[ValueError] ( identifier[msg] ) | def get_timestamp_at(time_in=None, time_at=None):
"""
Computes the timestamp for a future event that may occur in ``time_in`` time
or at ``time_at``.
"""
if time_in:
if isinstance(time_in, int):
hours = 0
minutes = time_in # depends on [control=['if'], data=[]]
else:
time_in = time_in.replace('h', ':')
time_in = time_in.replace('m', '')
try:
(hours, minutes) = time_in.split(':') # depends on [control=['try'], data=[]]
except ValueError:
hours = 0
minutes = time_in # depends on [control=['except'], data=[]]
if not minutes:
minutes = 0 # depends on [control=['if'], data=[]]
(hours, minutes) = (int(hours), int(minutes))
dt = timedelta(hours=hours, minutes=minutes)
time_now = datetime.utcnow()
time_at = time_now + dt
return time.mktime(time_at.timetuple()) # depends on [control=['if'], data=[]]
elif time_at:
log.debug('Predicted at specified as %s', time_at)
if isinstance(time_at, (six.integer_types, float)):
# then it's a timestamp
return time_at # depends on [control=['if'], data=[]]
else:
fmts = ('%H%M', '%Hh%M', '%I%p', '%I:%M%p', '%I:%M %p')
# Support different formats for the timestamp
# The current formats accepted are the following:
#
# - 18:30 (and 18h30)
# - 1pm (no minutes, fixed hour)
# - 1:20am (and 1:20am - with or without space)
for fmt in fmts:
try:
log.debug('Trying to match %s', fmt)
dt = datetime.strptime(time_at, fmt)
return time.mktime(dt.timetuple()) # depends on [control=['try'], data=[]]
except ValueError:
log.debug('Did not match %s, continue searching', fmt)
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['fmt']]
msg = '{pat} does not match any of the accepted formats: {fmts}'.format(pat=time_at, fmts=', '.join(fmts))
log.error(msg)
raise ValueError(msg) # depends on [control=['if'], data=[]] |
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
) | def function[add_value, parameter[self, value_type, value_min, value_max]]:
constant[Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
]
if compare[call[name[len], parameter[name[self]._employers]] greater[>] constant[0]] begin[:]
call[name[self]._logger.log, parameter[constant[warn], constant[Adding a value after employers have been created]]]
variable[value] assign[=] tuple[[<ast.Name object at 0x7da18f58e620>, <ast.Tuple object at 0x7da18f58f6d0>]]
call[name[self]._value_ranges.append, parameter[name[value]]]
name[self]._limit assign[=] binary_operation[name[self]._num_employers * call[name[len], parameter[name[self]._value_ranges]]]
call[name[self]._logger.log, parameter[constant[debug], call[constant[Limit set to {}].format, parameter[name[self]._limit]]]] | keyword[def] identifier[add_value] ( identifier[self] , identifier[value_type] , identifier[value_min] , identifier[value_max] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[_employers] )> literal[int] :
identifier[self] . identifier[_logger] . identifier[log] (
literal[string] ,
literal[string]
)
identifier[value] =( identifier[value_type] ,( identifier[value_min] , identifier[value_max] ))
identifier[self] . identifier[_value_ranges] . identifier[append] ( identifier[value] )
identifier[self] . identifier[_limit] = identifier[self] . identifier[_num_employers] * identifier[len] ( identifier[self] . identifier[_value_ranges] )
identifier[self] . identifier[_logger] . identifier[log] (
literal[string] ,
literal[string] . identifier[format] ( identifier[self] . identifier[_limit] )
) | def add_value(self, value_type, value_min, value_max):
"""Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
"""
if len(self._employers) > 0:
self._logger.log('warn', 'Adding a value after employers have been created') # depends on [control=['if'], data=[]]
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit)) |
def _check_inference(self, inference):
"""
Internal method for checking that the selected inference scheme is compatible with the specified model
"""
if inference=='GP2KronSum':
assert self.n_randEffs==2, 'VarianceDecomposition: for fast inference number of random effect terms must be == 2'
assert not sp.isnan(self.Y).any(), 'VarianceDecomposition: fast inference available only for complete phenotype designs' | def function[_check_inference, parameter[self, inference]]:
constant[
Internal method for checking that the selected inference scheme is compatible with the specified model
]
if compare[name[inference] equal[==] constant[GP2KronSum]] begin[:]
assert[compare[name[self].n_randEffs equal[==] constant[2]]]
assert[<ast.UnaryOp object at 0x7da1b09c4070>] | keyword[def] identifier[_check_inference] ( identifier[self] , identifier[inference] ):
literal[string]
keyword[if] identifier[inference] == literal[string] :
keyword[assert] identifier[self] . identifier[n_randEffs] == literal[int] , literal[string]
keyword[assert] keyword[not] identifier[sp] . identifier[isnan] ( identifier[self] . identifier[Y] ). identifier[any] (), literal[string] | def _check_inference(self, inference):
"""
Internal method for checking that the selected inference scheme is compatible with the specified model
"""
if inference == 'GP2KronSum':
assert self.n_randEffs == 2, 'VarianceDecomposition: for fast inference number of random effect terms must be == 2'
assert not sp.isnan(self.Y).any(), 'VarianceDecomposition: fast inference available only for complete phenotype designs' # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.