code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def avail_locations(conn=None, call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr == 'driver':
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret | def function[avail_locations, parameter[conn, call]]:
constant[
Return a dict of all available VM locations on the cloud provider with
relevant data
]
if compare[name[call] equal[==] constant[action]] begin[:]
<ast.Raise object at 0x7da1b2345450>
if <ast.UnaryOp object at 0x7da1b1f759f0> begin[:]
variable[conn] assign[=] call[name[get_conn], parameter[]]
variable[locations] assign[=] call[name[conn].list_locations, parameter[]]
variable[ret] assign[=] dictionary[[], []]
for taget[name[img]] in starred[name[locations]] begin[:]
if <ast.BoolOp object at 0x7da1b1f76f80> begin[:]
variable[img_name] assign[=] call[name[img].name.encode, parameter[constant[ascii], constant[salt-cloud-force-ascii]]]
call[name[ret]][name[img_name]] assign[=] dictionary[[], []]
for taget[name[attr]] in starred[call[name[dir], parameter[name[img]]]] begin[:]
if <ast.BoolOp object at 0x7da1b20895a0> begin[:]
continue
variable[attr_value] assign[=] call[name[getattr], parameter[name[img], name[attr]]]
if <ast.BoolOp object at 0x7da1b208a860> begin[:]
variable[attr_value] assign[=] call[name[attr_value].encode, parameter[constant[ascii], constant[salt-cloud-force-ascii]]]
call[call[name[ret]][name[img_name]]][name[attr]] assign[=] name[attr_value]
return[name[ret]] | keyword[def] identifier[avail_locations] ( identifier[conn] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] == literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
literal[string]
)
keyword[if] keyword[not] identifier[conn] :
identifier[conn] = identifier[get_conn] ()
identifier[locations] = identifier[conn] . identifier[list_locations] ()
identifier[ret] ={}
keyword[for] identifier[img] keyword[in] identifier[locations] :
keyword[if] identifier[isinstance] ( identifier[img] . identifier[name] , identifier[six] . identifier[string_types] ) keyword[and] keyword[not] identifier[six] . identifier[PY3] :
identifier[img_name] = identifier[img] . identifier[name] . identifier[encode] ( literal[string] , literal[string] )
keyword[else] :
identifier[img_name] = identifier[str] ( identifier[img] . identifier[name] )
identifier[ret] [ identifier[img_name] ]={}
keyword[for] identifier[attr] keyword[in] identifier[dir] ( identifier[img] ):
keyword[if] identifier[attr] . identifier[startswith] ( literal[string] ) keyword[or] identifier[attr] == literal[string] :
keyword[continue]
identifier[attr_value] = identifier[getattr] ( identifier[img] , identifier[attr] )
keyword[if] identifier[isinstance] ( identifier[attr_value] , identifier[six] . identifier[string_types] ) keyword[and] keyword[not] identifier[six] . identifier[PY3] :
identifier[attr_value] = identifier[attr_value] . identifier[encode] (
literal[string] , literal[string]
)
identifier[ret] [ identifier[img_name] ][ identifier[attr] ]= identifier[attr_value]
keyword[return] identifier[ret] | def avail_locations(conn=None, call=None):
"""
Return a dict of all available VM locations on the cloud provider with
relevant data
"""
if call == 'action':
raise SaltCloudSystemExit('The avail_locations function must be called with -f or --function, or with the --list-locations option') # depends on [control=['if'], data=[]]
if not conn:
conn = get_conn() # pylint: disable=E0602 # depends on [control=['if'], data=[]]
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, six.string_types) and (not six.PY3):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii') # depends on [control=['if'], data=[]]
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr == 'driver':
continue # depends on [control=['if'], data=[]]
attr_value = getattr(img, attr)
if isinstance(attr_value, six.string_types) and (not six.PY3):
attr_value = attr_value.encode('ascii', 'salt-cloud-force-ascii') # depends on [control=['if'], data=[]]
ret[img_name][attr] = attr_value # depends on [control=['for'], data=['attr']] # depends on [control=['for'], data=['img']]
return ret |
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
ia.do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = ia.imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = ia.imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn | def function[draw, parameter[self, size, background_threshold, background_class_id, colors, return_foreground_mask]]:
constant[
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
]
variable[arr] assign[=] call[name[self].get_arr_int, parameter[]]
variable[nb_classes] assign[=] binary_operation[constant[1] + call[name[np].max, parameter[name[arr]]]]
variable[segmap_drawn] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b028bc10>, <ast.Subscript object at 0x7da1b0289ae0>, <ast.Constant object at 0x7da1b014d1e0>]]]]
if compare[name[colors] is constant[None]] begin[:]
variable[colors] assign[=] name[SegmentationMapOnImage].DEFAULT_SEGMENT_COLORS
call[name[ia].do_assert, parameter[compare[name[nb_classes] less_or_equal[<=] call[name[len], parameter[name[colors]]]], binary_operation[constant[Can't draw all %d classes as it would exceed the maximum number of %d available colors.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b014c5b0>, <ast.Call object at 0x7da1b014c610>]]]]]
variable[ids_in_map] assign[=] call[name[np].unique, parameter[name[arr]]]
for taget[tuple[[<ast.Name object at 0x7da1b014c640>, <ast.Name object at 0x7da1b014c670>]]] in starred[call[name[zip], parameter[call[name[sm].xrange, parameter[name[nb_classes]]], name[colors]]]] begin[:]
if compare[name[c] in name[ids_in_map]] begin[:]
variable[class_mask] assign[=] compare[name[arr] equal[==] name[c]]
call[name[segmap_drawn]][name[class_mask]] assign[=] name[color]
if name[return_foreground_mask] begin[:]
variable[background_class_id] assign[=] <ast.IfExp object at 0x7da1b014c1c0>
variable[foreground_mask] assign[=] compare[name[arr] not_equal[!=] name[background_class_id]]
if compare[name[size] is_not constant[None]] begin[:]
variable[segmap_drawn] assign[=] call[name[ia].imresize_single_image, parameter[name[segmap_drawn], name[size]]]
if compare[name[foreground_mask] is_not constant[None]] begin[:]
variable[foreground_mask] assign[=] compare[call[name[ia].imresize_single_image, parameter[call[name[foreground_mask].astype, parameter[name[np].uint8]], name[size]]] greater[>] constant[0]]
if compare[name[foreground_mask] is_not constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b02b7940>, <ast.Name object at 0x7da1b02b70a0>]]]
return[name[segmap_drawn]] | keyword[def] identifier[draw] ( identifier[self] , identifier[size] = keyword[None] , identifier[background_threshold] = literal[int] , identifier[background_class_id] = keyword[None] , identifier[colors] = keyword[None] ,
identifier[return_foreground_mask] = keyword[False] ):
literal[string]
identifier[arr] = identifier[self] . identifier[get_arr_int] ( identifier[background_threshold] = identifier[background_threshold] , identifier[background_class_id] = identifier[background_class_id] )
identifier[nb_classes] = literal[int] + identifier[np] . identifier[max] ( identifier[arr] )
identifier[segmap_drawn] = identifier[np] . identifier[zeros] (( identifier[arr] . identifier[shape] [ literal[int] ], identifier[arr] . identifier[shape] [ literal[int] ], literal[int] ), identifier[dtype] = identifier[np] . identifier[uint8] )
keyword[if] identifier[colors] keyword[is] keyword[None] :
identifier[colors] = identifier[SegmentationMapOnImage] . identifier[DEFAULT_SEGMENT_COLORS]
identifier[ia] . identifier[do_assert] ( identifier[nb_classes] <= identifier[len] ( identifier[colors] ),
literal[string] %(
identifier[nb_classes] , identifier[len] ( identifier[colors] ),))
identifier[ids_in_map] = identifier[np] . identifier[unique] ( identifier[arr] )
keyword[for] identifier[c] , identifier[color] keyword[in] identifier[zip] ( identifier[sm] . identifier[xrange] ( identifier[nb_classes] ), identifier[colors] ):
keyword[if] identifier[c] keyword[in] identifier[ids_in_map] :
identifier[class_mask] =( identifier[arr] == identifier[c] )
identifier[segmap_drawn] [ identifier[class_mask] ]= identifier[color]
keyword[if] identifier[return_foreground_mask] :
identifier[background_class_id] = literal[int] keyword[if] identifier[background_class_id] keyword[is] keyword[None] keyword[else] identifier[background_class_id]
identifier[foreground_mask] =( identifier[arr] != identifier[background_class_id] )
keyword[else] :
identifier[foreground_mask] = keyword[None]
keyword[if] identifier[size] keyword[is] keyword[not] keyword[None] :
identifier[segmap_drawn] = identifier[ia] . identifier[imresize_single_image] ( identifier[segmap_drawn] , identifier[size] , identifier[interpolation] = literal[string] )
keyword[if] identifier[foreground_mask] keyword[is] keyword[not] keyword[None] :
identifier[foreground_mask] = identifier[ia] . identifier[imresize_single_image] (
identifier[foreground_mask] . identifier[astype] ( identifier[np] . identifier[uint8] ), identifier[size] , identifier[interpolation] = literal[string] )> literal[int]
keyword[if] identifier[foreground_mask] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[segmap_drawn] , identifier[foreground_mask]
keyword[return] identifier[segmap_drawn] | def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None, return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS # depends on [control=['if'], data=['colors']]
ia.do_assert(nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (nb_classes, len(colors)))
ids_in_map = np.unique(arr)
for (c, color) in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = arr == c
segmap_drawn[class_mask] = color # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=[]]
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = arr != background_class_id # depends on [control=['if'], data=[]]
else:
foreground_mask = None
if size is not None:
segmap_drawn = ia.imresize_single_image(segmap_drawn, size, interpolation='nearest')
if foreground_mask is not None:
foreground_mask = ia.imresize_single_image(foreground_mask.astype(np.uint8), size, interpolation='nearest') > 0 # depends on [control=['if'], data=['foreground_mask']] # depends on [control=['if'], data=['size']]
if foreground_mask is not None:
return (segmap_drawn, foreground_mask) # depends on [control=['if'], data=['foreground_mask']]
return segmap_drawn |
def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element] | def function[extract_data, parameter[self, page]]:
constant[Extract the AppNexus object or list of objects from the response]
variable[response_keys] assign[=] call[name[set], parameter[call[name[page].keys, parameter[]]]]
variable[uncommon_keys] assign[=] binary_operation[name[response_keys] - name[self].common_keys]
for taget[name[possible_data_key]] in starred[name[uncommon_keys]] begin[:]
variable[element] assign[=] call[name[page]][name[possible_data_key]]
if call[name[isinstance], parameter[name[element], name[dict]]] begin[:]
return[list[[<ast.Call object at 0x7da1b0470280>]]]
if call[name[isinstance], parameter[name[element], name[list]]] begin[:]
return[<ast.ListComp object at 0x7da1b0471270>] | keyword[def] identifier[extract_data] ( identifier[self] , identifier[page] ):
literal[string]
identifier[response_keys] = identifier[set] ( identifier[page] . identifier[keys] ())
identifier[uncommon_keys] = identifier[response_keys] - identifier[self] . identifier[common_keys]
keyword[for] identifier[possible_data_key] keyword[in] identifier[uncommon_keys] :
identifier[element] = identifier[page] [ identifier[possible_data_key] ]
keyword[if] identifier[isinstance] ( identifier[element] , identifier[dict] ):
keyword[return] [ identifier[self] . identifier[representation] ( identifier[self] . identifier[client] , identifier[self] . identifier[service_name] ,
identifier[element] )]
keyword[if] identifier[isinstance] ( identifier[element] , identifier[list] ):
keyword[return] [ identifier[self] . identifier[representation] ( identifier[self] . identifier[client] , identifier[self] . identifier[service_name] , identifier[x] )
keyword[for] identifier[x] keyword[in] identifier[element] ] | def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name, element)] # depends on [control=['if'], data=[]]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x) for x in element] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['possible_data_key']] |
def line_range(self, line_number):
"""Return a slice for the given line number"""
if line_number <= 0 or line_number > len(self.lines):
raise IndexError('NOTE: Python file line numbers are offset by 1.')
if line_number not in self.logical_lines:
return slice(line_number, line_number + 1)
else:
start, stop, _ = self.logical_lines[line_number]
return slice(start, stop) | def function[line_range, parameter[self, line_number]]:
constant[Return a slice for the given line number]
if <ast.BoolOp object at 0x7da1b1e5c160> begin[:]
<ast.Raise object at 0x7da1b1e5fe50>
if compare[name[line_number] <ast.NotIn object at 0x7da2590d7190> name[self].logical_lines] begin[:]
return[call[name[slice], parameter[name[line_number], binary_operation[name[line_number] + constant[1]]]]] | keyword[def] identifier[line_range] ( identifier[self] , identifier[line_number] ):
literal[string]
keyword[if] identifier[line_number] <= literal[int] keyword[or] identifier[line_number] > identifier[len] ( identifier[self] . identifier[lines] ):
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[if] identifier[line_number] keyword[not] keyword[in] identifier[self] . identifier[logical_lines] :
keyword[return] identifier[slice] ( identifier[line_number] , identifier[line_number] + literal[int] )
keyword[else] :
identifier[start] , identifier[stop] , identifier[_] = identifier[self] . identifier[logical_lines] [ identifier[line_number] ]
keyword[return] identifier[slice] ( identifier[start] , identifier[stop] ) | def line_range(self, line_number):
"""Return a slice for the given line number"""
if line_number <= 0 or line_number > len(self.lines):
raise IndexError('NOTE: Python file line numbers are offset by 1.') # depends on [control=['if'], data=[]]
if line_number not in self.logical_lines:
return slice(line_number, line_number + 1) # depends on [control=['if'], data=['line_number']]
else:
(start, stop, _) = self.logical_lines[line_number]
return slice(start, stop) |
def create_or_update_group_alias(self, name, alias_id=None, mount_accessor=None, canonical_id=None, mount_point=DEFAULT_MOUNT_POINT):
"""Creates or update a group alias.
Supported methods:
POST: /{mount_point}/group-alias. Produces: 200 application/json
:param alias_id: ID of the group alias. If set, updates the corresponding existing group alias.
:type alias_id: str | unicode
:param name: Name of the group alias.
:type name: str | unicode
:param mount_accessor: Mount accessor to which this alias belongs to
:type mount_accessor: str | unicode
:param canonical_id: ID of the group to which this is an alias.
:type canonical_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'name': name,
'mount_accessor': mount_accessor,
'canonical_id': canonical_id,
}
if alias_id is not None:
params['id'] = alias_id
api_path = '/v1/{mount_point}/group-alias'.format(mount_point=mount_point)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json() | def function[create_or_update_group_alias, parameter[self, name, alias_id, mount_accessor, canonical_id, mount_point]]:
constant[Creates or update a group alias.
Supported methods:
POST: /{mount_point}/group-alias. Produces: 200 application/json
:param alias_id: ID of the group alias. If set, updates the corresponding existing group alias.
:type alias_id: str | unicode
:param name: Name of the group alias.
:type name: str | unicode
:param mount_accessor: Mount accessor to which this alias belongs to
:type mount_accessor: str | unicode
:param canonical_id: ID of the group to which this is an alias.
:type canonical_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20e961f60>, <ast.Constant object at 0x7da20e9605e0>, <ast.Constant object at 0x7da18bc71bd0>], [<ast.Name object at 0x7da18bc703a0>, <ast.Name object at 0x7da18bc707c0>, <ast.Name object at 0x7da18bc73cd0>]]
if compare[name[alias_id] is_not constant[None]] begin[:]
call[name[params]][constant[id]] assign[=] name[alias_id]
variable[api_path] assign[=] call[constant[/v1/{mount_point}/group-alias].format, parameter[]]
variable[response] assign[=] call[name[self]._adapter.post, parameter[]]
return[call[name[response].json, parameter[]]] | keyword[def] identifier[create_or_update_group_alias] ( identifier[self] , identifier[name] , identifier[alias_id] = keyword[None] , identifier[mount_accessor] = keyword[None] , identifier[canonical_id] = keyword[None] , identifier[mount_point] = identifier[DEFAULT_MOUNT_POINT] ):
literal[string]
identifier[params] ={
literal[string] : identifier[name] ,
literal[string] : identifier[mount_accessor] ,
literal[string] : identifier[canonical_id] ,
}
keyword[if] identifier[alias_id] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[alias_id]
identifier[api_path] = literal[string] . identifier[format] ( identifier[mount_point] = identifier[mount_point] )
identifier[response] = identifier[self] . identifier[_adapter] . identifier[post] (
identifier[url] = identifier[api_path] ,
identifier[json] = identifier[params] ,
)
keyword[return] identifier[response] . identifier[json] () | def create_or_update_group_alias(self, name, alias_id=None, mount_accessor=None, canonical_id=None, mount_point=DEFAULT_MOUNT_POINT):
"""Creates or update a group alias.
Supported methods:
POST: /{mount_point}/group-alias. Produces: 200 application/json
:param alias_id: ID of the group alias. If set, updates the corresponding existing group alias.
:type alias_id: str | unicode
:param name: Name of the group alias.
:type name: str | unicode
:param mount_accessor: Mount accessor to which this alias belongs to
:type mount_accessor: str | unicode
:param canonical_id: ID of the group to which this is an alias.
:type canonical_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {'name': name, 'mount_accessor': mount_accessor, 'canonical_id': canonical_id}
if alias_id is not None:
params['id'] = alias_id # depends on [control=['if'], data=['alias_id']]
api_path = '/v1/{mount_point}/group-alias'.format(mount_point=mount_point)
response = self._adapter.post(url=api_path, json=params)
return response.json() |
def check_list(var, num_terms):
""" Check if a variable is a list and is the correct length.
If variable is not a list it will make it a list of the correct length with
all terms identical.
"""
if not isinstance(var, list):
if isinstance(var, tuple):
var = list(var)
else:
var = [var]
for _ in range(1, num_terms):
var.append(var[0])
if len(var) != num_terms:
print(
'"%s" has the wrong number of terms; it needs %s. Exiting ...' %
(var, num_terms))
sys.exit(1)
return var | def function[check_list, parameter[var, num_terms]]:
constant[ Check if a variable is a list and is the correct length.
If variable is not a list it will make it a list of the correct length with
all terms identical.
]
if <ast.UnaryOp object at 0x7da18dc054b0> begin[:]
if call[name[isinstance], parameter[name[var], name[tuple]]] begin[:]
variable[var] assign[=] call[name[list], parameter[name[var]]]
for taget[name[_]] in starred[call[name[range], parameter[constant[1], name[num_terms]]]] begin[:]
call[name[var].append, parameter[call[name[var]][constant[0]]]]
if compare[call[name[len], parameter[name[var]]] not_equal[!=] name[num_terms]] begin[:]
call[name[print], parameter[binary_operation[constant["%s" has the wrong number of terms; it needs %s. Exiting ...] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc04d60>, <ast.Name object at 0x7da18dc055d0>]]]]]
call[name[sys].exit, parameter[constant[1]]]
return[name[var]] | keyword[def] identifier[check_list] ( identifier[var] , identifier[num_terms] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[var] , identifier[list] ):
keyword[if] identifier[isinstance] ( identifier[var] , identifier[tuple] ):
identifier[var] = identifier[list] ( identifier[var] )
keyword[else] :
identifier[var] =[ identifier[var] ]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[num_terms] ):
identifier[var] . identifier[append] ( identifier[var] [ literal[int] ])
keyword[if] identifier[len] ( identifier[var] )!= identifier[num_terms] :
identifier[print] (
literal[string] %
( identifier[var] , identifier[num_terms] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[return] identifier[var] | def check_list(var, num_terms):
""" Check if a variable is a list and is the correct length.
If variable is not a list it will make it a list of the correct length with
all terms identical.
"""
if not isinstance(var, list):
if isinstance(var, tuple):
var = list(var) # depends on [control=['if'], data=[]]
else:
var = [var]
for _ in range(1, num_terms):
var.append(var[0]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if len(var) != num_terms:
print('"%s" has the wrong number of terms; it needs %s. Exiting ...' % (var, num_terms))
sys.exit(1) # depends on [control=['if'], data=['num_terms']]
return var |
def removeDataset(self, dataset):
"""
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
"""
for datasetRecord in models.Dataset.select().where(
models.Dataset.id == dataset.getId()):
datasetRecord.delete_instance(recursive=True) | def function[removeDataset, parameter[self, dataset]]:
constant[
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
]
for taget[name[datasetRecord]] in starred[call[call[name[models].Dataset.select, parameter[]].where, parameter[compare[name[models].Dataset.id equal[==] call[name[dataset].getId, parameter[]]]]]] begin[:]
call[name[datasetRecord].delete_instance, parameter[]] | keyword[def] identifier[removeDataset] ( identifier[self] , identifier[dataset] ):
literal[string]
keyword[for] identifier[datasetRecord] keyword[in] identifier[models] . identifier[Dataset] . identifier[select] (). identifier[where] (
identifier[models] . identifier[Dataset] . identifier[id] == identifier[dataset] . identifier[getId] ()):
identifier[datasetRecord] . identifier[delete_instance] ( identifier[recursive] = keyword[True] ) | def removeDataset(self, dataset):
"""
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
"""
for datasetRecord in models.Dataset.select().where(models.Dataset.id == dataset.getId()):
datasetRecord.delete_instance(recursive=True) # depends on [control=['for'], data=['datasetRecord']] |
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.lock_disable and not self.session_config_lock:
self._unlock() | def function[discard_config, parameter[self]]:
constant[Discard changes (rollback 0).]
call[name[self].device.cu.rollback, parameter[]]
if <ast.BoolOp object at 0x7da1b1c10160> begin[:]
call[name[self]._unlock, parameter[]] | keyword[def] identifier[discard_config] ( identifier[self] ):
literal[string]
identifier[self] . identifier[device] . identifier[cu] . identifier[rollback] ( identifier[rb_id] = literal[int] )
keyword[if] keyword[not] identifier[self] . identifier[lock_disable] keyword[and] keyword[not] identifier[self] . identifier[session_config_lock] :
identifier[self] . identifier[_unlock] () | def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.lock_disable and (not self.session_config_lock):
self._unlock() # depends on [control=['if'], data=[]] |
def all(self, usage_type, usage_period_id, api, query=None, *args, **kwargs):
"""
Gets all api usages by type for a given period an api.
"""
if query is None:
query = {}
mandatory_query = {
'filters[usagePeriod]': usage_period_id,
'filters[metric]': api
}
mandatory_query.update(query)
return self.client._get(
self._url(usage_type),
mandatory_query,
headers={
'x-contentful-enable-alpha-feature': 'usage-insights'
}
) | def function[all, parameter[self, usage_type, usage_period_id, api, query]]:
constant[
Gets all api usages by type for a given period an api.
]
if compare[name[query] is constant[None]] begin[:]
variable[query] assign[=] dictionary[[], []]
variable[mandatory_query] assign[=] dictionary[[<ast.Constant object at 0x7da18f58de70>, <ast.Constant object at 0x7da18f58d720>], [<ast.Name object at 0x7da18f58f1c0>, <ast.Name object at 0x7da18f58fe20>]]
call[name[mandatory_query].update, parameter[name[query]]]
return[call[name[self].client._get, parameter[call[name[self]._url, parameter[name[usage_type]]], name[mandatory_query]]]] | keyword[def] identifier[all] ( identifier[self] , identifier[usage_type] , identifier[usage_period_id] , identifier[api] , identifier[query] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[query] keyword[is] keyword[None] :
identifier[query] ={}
identifier[mandatory_query] ={
literal[string] : identifier[usage_period_id] ,
literal[string] : identifier[api]
}
identifier[mandatory_query] . identifier[update] ( identifier[query] )
keyword[return] identifier[self] . identifier[client] . identifier[_get] (
identifier[self] . identifier[_url] ( identifier[usage_type] ),
identifier[mandatory_query] ,
identifier[headers] ={
literal[string] : literal[string]
}
) | def all(self, usage_type, usage_period_id, api, query=None, *args, **kwargs):
"""
Gets all api usages by type for a given period an api.
"""
if query is None:
query = {} # depends on [control=['if'], data=['query']]
mandatory_query = {'filters[usagePeriod]': usage_period_id, 'filters[metric]': api}
mandatory_query.update(query)
return self.client._get(self._url(usage_type), mandatory_query, headers={'x-contentful-enable-alpha-feature': 'usage-insights'}) |
def convert_header(cls, old_header, new_version):
""" Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
"""
new_header_class = cls.header_class_for_version(new_version)
b = bytearray(old_header)
b += b"\x00" * (ctypes.sizeof(new_header_class) - len(b))
new_header = new_header_class.from_buffer(b)
new_header.version = str(new_version)
return new_header | def function[convert_header, parameter[cls, old_header, new_version]]:
constant[ Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
]
variable[new_header_class] assign[=] call[name[cls].header_class_for_version, parameter[name[new_version]]]
variable[b] assign[=] call[name[bytearray], parameter[name[old_header]]]
<ast.AugAssign object at 0x7da2041d8250>
variable[new_header] assign[=] call[name[new_header_class].from_buffer, parameter[name[b]]]
name[new_header].version assign[=] call[name[str], parameter[name[new_version]]]
return[name[new_header]] | keyword[def] identifier[convert_header] ( identifier[cls] , identifier[old_header] , identifier[new_version] ):
literal[string]
identifier[new_header_class] = identifier[cls] . identifier[header_class_for_version] ( identifier[new_version] )
identifier[b] = identifier[bytearray] ( identifier[old_header] )
identifier[b] += literal[string] *( identifier[ctypes] . identifier[sizeof] ( identifier[new_header_class] )- identifier[len] ( identifier[b] ))
identifier[new_header] = identifier[new_header_class] . identifier[from_buffer] ( identifier[b] )
identifier[new_header] . identifier[version] = identifier[str] ( identifier[new_version] )
keyword[return] identifier[new_header] | def convert_header(cls, old_header, new_version):
""" Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
"""
new_header_class = cls.header_class_for_version(new_version)
b = bytearray(old_header)
b += b'\x00' * (ctypes.sizeof(new_header_class) - len(b))
new_header = new_header_class.from_buffer(b)
new_header.version = str(new_version)
return new_header |
def expand_path(path: Union[str, Path]) -> Path:
"""Convert relative paths to absolute with resolving user directory."""
return Path(path).expanduser().resolve() | def function[expand_path, parameter[path]]:
constant[Convert relative paths to absolute with resolving user directory.]
return[call[call[call[name[Path], parameter[name[path]]].expanduser, parameter[]].resolve, parameter[]]] | keyword[def] identifier[expand_path] ( identifier[path] : identifier[Union] [ identifier[str] , identifier[Path] ])-> identifier[Path] :
literal[string]
keyword[return] identifier[Path] ( identifier[path] ). identifier[expanduser] (). identifier[resolve] () | def expand_path(path: Union[str, Path]) -> Path:
"""Convert relative paths to absolute with resolving user directory."""
return Path(path).expanduser().resolve() |
def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights | def function[roller, parameter[timestamps, contract_dates, get_weights]]:
constant[
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
]
variable[timestamps] assign[=] call[name[sorted], parameter[name[timestamps]]]
variable[contract_dates] assign[=] call[name[contract_dates].sort_values, parameter[]]
call[name[_check_contract_dates], parameter[name[contract_dates]]]
variable[weights] assign[=] list[[]]
variable[validate_inputs] assign[=] constant[True]
variable[ts] assign[=] call[name[timestamps]][constant[0]]
call[name[weights].extend, parameter[call[name[get_weights], parameter[name[ts], name[contract_dates]]]]]
variable[validate_inputs] assign[=] constant[False]
for taget[name[ts]] in starred[call[name[timestamps]][<ast.Slice object at 0x7da1b197f490>]] begin[:]
call[name[weights].extend, parameter[call[name[get_weights], parameter[name[ts], name[contract_dates]]]]]
variable[weights] assign[=] call[name[aggregate_weights], parameter[name[weights]]]
return[name[weights]] | keyword[def] identifier[roller] ( identifier[timestamps] , identifier[contract_dates] , identifier[get_weights] ,** identifier[kwargs] ):
literal[string]
identifier[timestamps] = identifier[sorted] ( identifier[timestamps] )
identifier[contract_dates] = identifier[contract_dates] . identifier[sort_values] ()
identifier[_check_contract_dates] ( identifier[contract_dates] )
identifier[weights] =[]
identifier[validate_inputs] = keyword[True]
identifier[ts] = identifier[timestamps] [ literal[int] ]
identifier[weights] . identifier[extend] ( identifier[get_weights] ( identifier[ts] , identifier[contract_dates] ,
identifier[validate_inputs] = identifier[validate_inputs] ,** identifier[kwargs] ))
identifier[validate_inputs] = keyword[False]
keyword[for] identifier[ts] keyword[in] identifier[timestamps] [ literal[int] :]:
identifier[weights] . identifier[extend] ( identifier[get_weights] ( identifier[ts] , identifier[contract_dates] ,
identifier[validate_inputs] = identifier[validate_inputs] ,** identifier[kwargs] ))
identifier[weights] = identifier[aggregate_weights] ( identifier[weights] )
keyword[return] identifier[weights] | def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates, validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates, validate_inputs=validate_inputs, **kwargs)) # depends on [control=['for'], data=['ts']]
weights = aggregate_weights(weights)
return weights |
def _next_argsig(s):
"""
given a string, find the next complete argument signature and
return it and a new string advanced past that point
"""
c = s[0]
if c in "BCDFIJSVZ":
result = (c, s[1:])
elif c == "[":
d, s = _next_argsig(s[1:])
result = (c + d, s[len(d) + 1:])
elif c == "L":
i = s.find(';') + 1
result = (s[:i], s[i + 1:])
elif c == "(":
i = s.find(')') + 1
result = (s[:i], s[i:])
else:
raise Unimplemented("_next_argsig is %r in %r" % (c, s))
return result | def function[_next_argsig, parameter[s]]:
constant[
given a string, find the next complete argument signature and
return it and a new string advanced past that point
]
variable[c] assign[=] call[name[s]][constant[0]]
if compare[name[c] in constant[BCDFIJSVZ]] begin[:]
variable[result] assign[=] tuple[[<ast.Name object at 0x7da1b0ca7ee0>, <ast.Subscript object at 0x7da1b0ca5b40>]]
return[name[result]] | keyword[def] identifier[_next_argsig] ( identifier[s] ):
literal[string]
identifier[c] = identifier[s] [ literal[int] ]
keyword[if] identifier[c] keyword[in] literal[string] :
identifier[result] =( identifier[c] , identifier[s] [ literal[int] :])
keyword[elif] identifier[c] == literal[string] :
identifier[d] , identifier[s] = identifier[_next_argsig] ( identifier[s] [ literal[int] :])
identifier[result] =( identifier[c] + identifier[d] , identifier[s] [ identifier[len] ( identifier[d] )+ literal[int] :])
keyword[elif] identifier[c] == literal[string] :
identifier[i] = identifier[s] . identifier[find] ( literal[string] )+ literal[int]
identifier[result] =( identifier[s] [: identifier[i] ], identifier[s] [ identifier[i] + literal[int] :])
keyword[elif] identifier[c] == literal[string] :
identifier[i] = identifier[s] . identifier[find] ( literal[string] )+ literal[int]
identifier[result] =( identifier[s] [: identifier[i] ], identifier[s] [ identifier[i] :])
keyword[else] :
keyword[raise] identifier[Unimplemented] ( literal[string] %( identifier[c] , identifier[s] ))
keyword[return] identifier[result] | def _next_argsig(s):
"""
given a string, find the next complete argument signature and
return it and a new string advanced past that point
"""
c = s[0]
if c in 'BCDFIJSVZ':
result = (c, s[1:]) # depends on [control=['if'], data=['c']]
elif c == '[':
(d, s) = _next_argsig(s[1:])
result = (c + d, s[len(d) + 1:]) # depends on [control=['if'], data=['c']]
elif c == 'L':
i = s.find(';') + 1
result = (s[:i], s[i + 1:]) # depends on [control=['if'], data=[]]
elif c == '(':
i = s.find(')') + 1
result = (s[:i], s[i:]) # depends on [control=['if'], data=[]]
else:
raise Unimplemented('_next_argsig is %r in %r' % (c, s))
return result |
def update(self, user, identity):
"""
Update specified identity for the specified user
:param user: User object or id
:param identity: Identity object to be updated.
:return: The updated Identity
"""
return UserIdentityRequest(self).put(self.endpoint.update, user, identity) | def function[update, parameter[self, user, identity]]:
constant[
Update specified identity for the specified user
:param user: User object or id
:param identity: Identity object to be updated.
:return: The updated Identity
]
return[call[call[name[UserIdentityRequest], parameter[name[self]]].put, parameter[name[self].endpoint.update, name[user], name[identity]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[user] , identifier[identity] ):
literal[string]
keyword[return] identifier[UserIdentityRequest] ( identifier[self] ). identifier[put] ( identifier[self] . identifier[endpoint] . identifier[update] , identifier[user] , identifier[identity] ) | def update(self, user, identity):
"""
Update specified identity for the specified user
:param user: User object or id
:param identity: Identity object to be updated.
:return: The updated Identity
"""
return UserIdentityRequest(self).put(self.endpoint.update, user, identity) |
def _local_uri_rewriter(raw_uri):
"""Rewrite local file URIs as required by the rewrite_uris method.
Local file paths, unlike GCS paths, may have their raw URI simplified by
os.path.normpath which collapses extraneous indirect characters.
>>> _local_uri_rewriter('/tmp/a_path/../B_PATH/file.txt')
('/tmp/B_PATH/file.txt', 'file/tmp/B_PATH/file.txt')
>>> _local_uri_rewriter('/myhome/./mydir/')
('/myhome/mydir/', 'file/myhome/mydir/')
The local path rewriter will also work to preserve relative paths even
when creating the docker path. This prevents leaking of information on the
invoker's system to the remote system. Doing this requires a number of path
substitutions denoted with the _<rewrite>_ convention.
>>> _local_uri_rewriter('./../upper_dir/')[1]
'file/_dotdot_/upper_dir/'
>>> _local_uri_rewriter('~/localdata/*.bam')[1]
'file/_home_/localdata/*.bam'
Args:
raw_uri: (str) the raw file or directory path.
Returns:
normalized: a simplified and/or expanded version of the uri.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
"""
# The path is split into components so that the filename is not rewritten.
raw_path, filename = os.path.split(raw_uri)
# Generate the local path that can be resolved by filesystem operations,
# this removes special shell characters, condenses indirects and replaces
# any unnecessary prefix.
prefix_replacements = [('file:///', '/'), ('~/', os.getenv('HOME')), ('./',
''),
('file:/', '/')]
normed_path = raw_path
for prefix, replacement in prefix_replacements:
if normed_path.startswith(prefix):
normed_path = os.path.join(replacement, normed_path[len(prefix):])
# Because abspath strips the trailing '/' from bare directory references
# other than root, this ensures that all directory references end with '/'.
normed_uri = directory_fmt(os.path.abspath(normed_path))
normed_uri = os.path.join(normed_uri, filename)
# Generate the path used inside the docker image;
# 1) Get rid of extra indirects: /this/./that -> /this/that
# 2) Rewrite required indirects as synthetic characters.
# 3) Strip relative or absolute path leading character.
# 4) Add 'file/' prefix.
docker_rewrites = [(r'/\.\.', '/_dotdot_'), (r'^\.\.', '_dotdot_'),
(r'^~/', '_home_/'), (r'^file:/', '')]
docker_path = os.path.normpath(raw_path)
for pattern, replacement in docker_rewrites:
docker_path = re.sub(pattern, replacement, docker_path)
docker_path = docker_path.lstrip('./') # Strips any of '.' './' '/'.
docker_path = directory_fmt('file/' + docker_path) + filename
return normed_uri, docker_path | def function[_local_uri_rewriter, parameter[raw_uri]]:
constant[Rewrite local file URIs as required by the rewrite_uris method.
Local file paths, unlike GCS paths, may have their raw URI simplified by
os.path.normpath which collapses extraneous indirect characters.
>>> _local_uri_rewriter('/tmp/a_path/../B_PATH/file.txt')
('/tmp/B_PATH/file.txt', 'file/tmp/B_PATH/file.txt')
>>> _local_uri_rewriter('/myhome/./mydir/')
('/myhome/mydir/', 'file/myhome/mydir/')
The local path rewriter will also work to preserve relative paths even
when creating the docker path. This prevents leaking of information on the
invoker's system to the remote system. Doing this requires a number of path
substitutions denoted with the _<rewrite>_ convention.
>>> _local_uri_rewriter('./../upper_dir/')[1]
'file/_dotdot_/upper_dir/'
>>> _local_uri_rewriter('~/localdata/*.bam')[1]
'file/_home_/localdata/*.bam'
Args:
raw_uri: (str) the raw file or directory path.
Returns:
normalized: a simplified and/or expanded version of the uri.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
]
<ast.Tuple object at 0x7da1b0109e10> assign[=] call[name[os].path.split, parameter[name[raw_uri]]]
variable[prefix_replacements] assign[=] list[[<ast.Tuple object at 0x7da1b0014460>, <ast.Tuple object at 0x7da1b0014520>, <ast.Tuple object at 0x7da1b00145e0>, <ast.Tuple object at 0x7da1b0014f70>]]
variable[normed_path] assign[=] name[raw_path]
for taget[tuple[[<ast.Name object at 0x7da1b0014af0>, <ast.Name object at 0x7da1b00149d0>]]] in starred[name[prefix_replacements]] begin[:]
if call[name[normed_path].startswith, parameter[name[prefix]]] begin[:]
variable[normed_path] assign[=] call[name[os].path.join, parameter[name[replacement], call[name[normed_path]][<ast.Slice object at 0x7da1b0014a30>]]]
variable[normed_uri] assign[=] call[name[directory_fmt], parameter[call[name[os].path.abspath, parameter[name[normed_path]]]]]
variable[normed_uri] assign[=] call[name[os].path.join, parameter[name[normed_uri], name[filename]]]
variable[docker_rewrites] assign[=] list[[<ast.Tuple object at 0x7da1b00166b0>, <ast.Tuple object at 0x7da1b0015300>, <ast.Tuple object at 0x7da1b0015150>, <ast.Tuple object at 0x7da1b00150c0>]]
variable[docker_path] assign[=] call[name[os].path.normpath, parameter[name[raw_path]]]
for taget[tuple[[<ast.Name object at 0x7da1b0015480>, <ast.Name object at 0x7da1b0015180>]]] in starred[name[docker_rewrites]] begin[:]
variable[docker_path] assign[=] call[name[re].sub, parameter[name[pattern], name[replacement], name[docker_path]]]
variable[docker_path] assign[=] call[name[docker_path].lstrip, parameter[constant[./]]]
variable[docker_path] assign[=] binary_operation[call[name[directory_fmt], parameter[binary_operation[constant[file/] + name[docker_path]]]] + name[filename]]
return[tuple[[<ast.Name object at 0x7da1b0015390>, <ast.Name object at 0x7da1b0015b70>]]] | keyword[def] identifier[_local_uri_rewriter] ( identifier[raw_uri] ):
literal[string]
identifier[raw_path] , identifier[filename] = identifier[os] . identifier[path] . identifier[split] ( identifier[raw_uri] )
identifier[prefix_replacements] =[( literal[string] , literal[string] ),( literal[string] , identifier[os] . identifier[getenv] ( literal[string] )),( literal[string] ,
literal[string] ),
( literal[string] , literal[string] )]
identifier[normed_path] = identifier[raw_path]
keyword[for] identifier[prefix] , identifier[replacement] keyword[in] identifier[prefix_replacements] :
keyword[if] identifier[normed_path] . identifier[startswith] ( identifier[prefix] ):
identifier[normed_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[replacement] , identifier[normed_path] [ identifier[len] ( identifier[prefix] ):])
identifier[normed_uri] = identifier[directory_fmt] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[normed_path] ))
identifier[normed_uri] = identifier[os] . identifier[path] . identifier[join] ( identifier[normed_uri] , identifier[filename] )
identifier[docker_rewrites] =[( literal[string] , literal[string] ),( literal[string] , literal[string] ),
( literal[string] , literal[string] ),( literal[string] , literal[string] )]
identifier[docker_path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[raw_path] )
keyword[for] identifier[pattern] , identifier[replacement] keyword[in] identifier[docker_rewrites] :
identifier[docker_path] = identifier[re] . identifier[sub] ( identifier[pattern] , identifier[replacement] , identifier[docker_path] )
identifier[docker_path] = identifier[docker_path] . identifier[lstrip] ( literal[string] )
identifier[docker_path] = identifier[directory_fmt] ( literal[string] + identifier[docker_path] )+ identifier[filename]
keyword[return] identifier[normed_uri] , identifier[docker_path] | def _local_uri_rewriter(raw_uri):
"""Rewrite local file URIs as required by the rewrite_uris method.
Local file paths, unlike GCS paths, may have their raw URI simplified by
os.path.normpath which collapses extraneous indirect characters.
>>> _local_uri_rewriter('/tmp/a_path/../B_PATH/file.txt')
('/tmp/B_PATH/file.txt', 'file/tmp/B_PATH/file.txt')
>>> _local_uri_rewriter('/myhome/./mydir/')
('/myhome/mydir/', 'file/myhome/mydir/')
The local path rewriter will also work to preserve relative paths even
when creating the docker path. This prevents leaking of information on the
invoker's system to the remote system. Doing this requires a number of path
substitutions denoted with the _<rewrite>_ convention.
>>> _local_uri_rewriter('./../upper_dir/')[1]
'file/_dotdot_/upper_dir/'
>>> _local_uri_rewriter('~/localdata/*.bam')[1]
'file/_home_/localdata/*.bam'
Args:
raw_uri: (str) the raw file or directory path.
Returns:
normalized: a simplified and/or expanded version of the uri.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
"""
# The path is split into components so that the filename is not rewritten.
(raw_path, filename) = os.path.split(raw_uri)
# Generate the local path that can be resolved by filesystem operations,
# this removes special shell characters, condenses indirects and replaces
# any unnecessary prefix.
prefix_replacements = [('file:///', '/'), ('~/', os.getenv('HOME')), ('./', ''), ('file:/', '/')]
normed_path = raw_path
for (prefix, replacement) in prefix_replacements:
if normed_path.startswith(prefix):
normed_path = os.path.join(replacement, normed_path[len(prefix):]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Because abspath strips the trailing '/' from bare directory references
# other than root, this ensures that all directory references end with '/'.
normed_uri = directory_fmt(os.path.abspath(normed_path))
normed_uri = os.path.join(normed_uri, filename)
# Generate the path used inside the docker image;
# 1) Get rid of extra indirects: /this/./that -> /this/that
# 2) Rewrite required indirects as synthetic characters.
# 3) Strip relative or absolute path leading character.
# 4) Add 'file/' prefix.
docker_rewrites = [('/\\.\\.', '/_dotdot_'), ('^\\.\\.', '_dotdot_'), ('^~/', '_home_/'), ('^file:/', '')]
docker_path = os.path.normpath(raw_path)
for (pattern, replacement) in docker_rewrites:
docker_path = re.sub(pattern, replacement, docker_path) # depends on [control=['for'], data=[]]
docker_path = docker_path.lstrip('./') # Strips any of '.' './' '/'.
docker_path = directory_fmt('file/' + docker_path) + filename
return (normed_uri, docker_path) |
def quit(self, reason=''):
"""
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.keep_going = False
self._quit(reason)
self._socket.shutdown(self._m_socket.SHUT_RDWR)
self._socket.close() | def function[quit, parameter[self, reason]]:
constant[
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
]
with name[self].lock begin[:]
name[self].keep_going assign[=] constant[False]
call[name[self]._quit, parameter[name[reason]]]
call[name[self]._socket.shutdown, parameter[name[self]._m_socket.SHUT_RDWR]]
call[name[self]._socket.close, parameter[]] | keyword[def] identifier[quit] ( identifier[self] , identifier[reason] = literal[string] ):
literal[string]
keyword[with] identifier[self] . identifier[lock] :
identifier[self] . identifier[keep_going] = keyword[False]
identifier[self] . identifier[_quit] ( identifier[reason] )
identifier[self] . identifier[_socket] . identifier[shutdown] ( identifier[self] . identifier[_m_socket] . identifier[SHUT_RDWR] )
identifier[self] . identifier[_socket] . identifier[close] () | def quit(self, reason=''):
"""
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.keep_going = False
self._quit(reason)
self._socket.shutdown(self._m_socket.SHUT_RDWR)
self._socket.close() # depends on [control=['with'], data=[]] |
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page) | def function[_build_page, parameter[self, filepath]]:
constant[ To build from filepath, relative to pages_dir ]
variable[filename] assign[=] call[call[name[filepath].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da2054a40d0>]
if <ast.BoolOp object at 0x7da2054a4d00> begin[:]
variable[meta] assign[=] call[name[self]._get_page_meta, parameter[name[filepath]]]
variable[content] assign[=] call[name[self]._get_page_content, parameter[name[filepath]]]
variable[_default_page] assign[=] dictionary[[<ast.Constant object at 0x7da2054a66b0>, <ast.Constant object at 0x7da2054a74c0>, <ast.Constant object at 0x7da2054a5420>, <ast.Constant object at 0x7da2054a71f0>, <ast.Constant object at 0x7da2054a7760>, <ast.Constant object at 0x7da2054a6b60>, <ast.Constant object at 0x7da2054a61a0>], [<ast.Attribute object at 0x7da2054a4940>, <ast.Subscript object at 0x7da2054a7880>, <ast.Dict object at 0x7da2054a6920>, <ast.Name object at 0x7da2054a64d0>, <ast.Call object at 0x7da2054a4670>, <ast.Call object at 0x7da2054a68f0>, <ast.BoolOp object at 0x7da2054a6b30>]]
variable[_generator] assign[=] call[name[meta].get, parameter[constant[_generator]]]
if name[_generator] begin[:]
variable[data] assign[=] call[name[self]._data.get, parameter[call[name[_generator].get, parameter[constant[data_source]]]]]
variable[special_meta] assign[=] list[[<ast.Constant object at 0x7da20cabcfd0>, <ast.Constant object at 0x7da20cabc550>, <ast.Constant object at 0x7da20cabed70>]]
if compare[call[name[_generator].get, parameter[constant[type]]] equal[==] constant[single]] begin[:]
for taget[name[d]] in starred[name[data]] begin[:]
variable[dmeta] assign[=] call[name[copy].deepcopy, parameter[name[meta]]]
variable[page] assign[=] call[name[copy].deepcopy, parameter[name[_default_page]]]
for taget[name[_]] in starred[name[special_meta]] begin[:]
if compare[name[_] in name[d]] begin[:]
call[name[dmeta]][name[_]] assign[=] call[name[d].get, parameter[name[_]]]
if compare[constant[slug] in name[_generator]] begin[:]
call[name[dmeta]][constant[slug]] assign[=] call[call[name[_generator].get, parameter[constant[slug]]].format, parameter[]]
if compare[constant[slug] <ast.NotIn object at 0x7da2590d7190> name[dmeta]] begin[:]
call[name[print], parameter[constant[WARNING: Skipping page because it's missing `slug`]]]
continue
variable[slug] assign[=] call[name[dmeta].get, parameter[constant[slug]]]
call[name[dmeta]][constant[url]] assign[=] name[slug]
call[name[dmeta]][constant[context]] assign[=] name[d]
call[name[page].update, parameter[dictionary[[<ast.Constant object at 0x7da18dc07640>, <ast.Constant object at 0x7da18dc06470>], [<ast.Name object at 0x7da18dc04e50>, <ast.Dict object at 0x7da18dc053c0>]]]]
call[name[self].create_page, parameter[]]
if compare[call[name[_generator].get, parameter[constant[type]]] equal[==] constant[pagination]] begin[:]
variable[per_page] assign[=] call[name[int], parameter[call[name[_generator].get, parameter[constant[per_page], call[name[self].site_config.get, parameter[constant[pagination.per_page], constant[10]]]]]]]
variable[left_edge] assign[=] call[name[int], parameter[call[name[_generator].get, parameter[constant[left_edge], call[name[self].site_config.get, parameter[constant[pagination.left_edge], constant[2]]]]]]]
variable[left_current] assign[=] call[name[int], parameter[call[name[_generator].get, parameter[constant[left_edge], call[name[self].site_config.get, parameter[constant[pagination.left_current], constant[3]]]]]]]
variable[right_current] assign[=] call[name[int], parameter[call[name[_generator].get, parameter[constant[right_current], call[name[self].site_config.get, parameter[constant[pagination.right_current], constant[4]]]]]]]
variable[right_edge] assign[=] call[name[int], parameter[call[name[_generator].get, parameter[constant[right_edge], call[name[self].site_config.get, parameter[constant[pagination.right_edge], constant[2]]]]]]]
variable[padding] assign[=] call[name[_generator].get, parameter[constant[padding]]]
variable[slug] assign[=] call[name[_generator].get, parameter[constant[slug]]]
variable[limit] assign[=] call[name[_generator].get, parameter[constant[limit]]]
if compare[constant[limit] in name[_generator]] begin[:]
variable[data] assign[=] call[name[data]][<ast.Slice object at 0x7da18f811750>]
variable[data_chunks] assign[=] call[name[utils].chunk_list, parameter[name[data], name[per_page]]]
variable[len_data] assign[=] call[name[len], parameter[name[data]]]
for taget[tuple[[<ast.Name object at 0x7da18f812920>, <ast.Name object at 0x7da18f812980>]]] in starred[call[name[enumerate], parameter[name[data_chunks]]]] begin[:]
variable[dmeta] assign[=] call[name[copy].deepcopy, parameter[name[meta]]]
variable[page] assign[=] call[name[copy].deepcopy, parameter[name[_default_page]]]
variable[page_num] assign[=] binary_operation[name[i] + constant[1]]
variable[_paginator] assign[=] call[name[Paginator], parameter[list[[]]]]
name[_paginator].slug assign[=] name[slug]
name[_paginator].index_slug assign[=] call[name[_generator].get, parameter[constant[index_slug]]]
variable[_slug] assign[=] call[name[slug].format, parameter[]]
call[name[dmeta]][constant[url]] assign[=] name[_slug]
call[name[dmeta]][constant[context]] assign[=] name[d]
call[name[dmeta]][constant[paginator]] assign[=] name[_paginator]
call[name[page].update, parameter[dictionary[[<ast.Constant object at 0x7da18f811840>, <ast.Constant object at 0x7da18f8131c0>], [<ast.Name object at 0x7da18f813e80>, <ast.Dict object at 0x7da18f811570>]]]]
call[name[self].create_page, parameter[]]
if <ast.BoolOp object at 0x7da18f813490> begin[:]
call[name[page]][constant[filepath]] assign[=] call[name[_generator].get, parameter[constant[index_slug]]]
call[name[self].create_page, parameter[]] | keyword[def] identifier[_build_page] ( identifier[self] , identifier[filepath] ):
literal[string]
identifier[filename] = identifier[filepath] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] keyword[not] identifier[filename] . identifier[startswith] (( literal[string] , literal[string] )) keyword[and] ( identifier[filename] . identifier[endswith] ( identifier[PAGE_FORMAT] )):
identifier[meta] = identifier[self] . identifier[_get_page_meta] ( identifier[filepath] )
identifier[content] = identifier[self] . identifier[_get_page_content] ( identifier[filepath] )
identifier[_default_page] ={
literal[string] : identifier[self] . identifier[build_dir] ,
literal[string] : identifier[meta] [ literal[string] ],
literal[string] :{ literal[string] : identifier[meta] },
literal[string] : identifier[content] ,
literal[string] : identifier[meta] . identifier[get] ( literal[string] ),
literal[string] : identifier[meta] . identifier[get] ( literal[string] ),
literal[string] : identifier[meta] . identifier[get] ( literal[string] ) keyword[or] identifier[self] . identifier[default_layout]
}
identifier[_generator] = identifier[meta] . identifier[get] ( literal[string] )
keyword[if] identifier[_generator] :
identifier[data] = identifier[self] . identifier[_data] . identifier[get] ( identifier[_generator] . identifier[get] ( literal[string] ))
identifier[special_meta] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[_generator] . identifier[get] ( literal[string] )== literal[string] :
keyword[for] identifier[d] keyword[in] identifier[data] :
identifier[dmeta] = identifier[copy] . identifier[deepcopy] ( identifier[meta] )
identifier[page] = identifier[copy] . identifier[deepcopy] ( identifier[_default_page] )
keyword[for] identifier[_] keyword[in] identifier[special_meta] :
keyword[if] identifier[_] keyword[in] identifier[d] :
identifier[dmeta] [ identifier[_] ]= identifier[d] . identifier[get] ( identifier[_] )
keyword[if] literal[string] keyword[in] identifier[_generator] :
identifier[dmeta] [ literal[string] ]= identifier[_generator] . identifier[get] ( literal[string] ). identifier[format] (** identifier[d] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[dmeta] :
identifier[print] ( literal[string] )
keyword[continue]
identifier[slug] = identifier[dmeta] . identifier[get] ( literal[string] )
identifier[dmeta] [ literal[string] ]= identifier[slug]
identifier[dmeta] [ literal[string] ]= identifier[d]
identifier[page] . identifier[update] ({
literal[string] : identifier[slug] ,
literal[string] :{ literal[string] : identifier[dmeta] }
})
identifier[self] . identifier[create_page] (** identifier[page] )
keyword[if] identifier[_generator] . identifier[get] ( literal[string] )== literal[string] :
identifier[per_page] = identifier[int] ( identifier[_generator] . identifier[get] ( literal[string] , identifier[self] . identifier[site_config] . identifier[get] ( literal[string] , literal[int] )))
identifier[left_edge] = identifier[int] ( identifier[_generator] . identifier[get] ( literal[string] , identifier[self] . identifier[site_config] . identifier[get] ( literal[string] , literal[int] )))
identifier[left_current] = identifier[int] ( identifier[_generator] . identifier[get] ( literal[string] , identifier[self] . identifier[site_config] . identifier[get] ( literal[string] , literal[int] )))
identifier[right_current] = identifier[int] ( identifier[_generator] . identifier[get] ( literal[string] , identifier[self] . identifier[site_config] . identifier[get] ( literal[string] , literal[int] )))
identifier[right_edge] = identifier[int] ( identifier[_generator] . identifier[get] ( literal[string] , identifier[self] . identifier[site_config] . identifier[get] ( literal[string] , literal[int] )))
identifier[padding] = identifier[_generator] . identifier[get] ( literal[string] )
identifier[slug] = identifier[_generator] . identifier[get] ( literal[string] )
identifier[limit] = identifier[_generator] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_generator] :
identifier[data] = identifier[data] [: identifier[int] ( identifier[limit] )]
identifier[data_chunks] = identifier[utils] . identifier[chunk_list] ( identifier[data] , identifier[per_page] )
identifier[len_data] = identifier[len] ( identifier[data] )
keyword[for] identifier[i] , identifier[d] keyword[in] identifier[enumerate] ( identifier[data_chunks] ):
identifier[dmeta] = identifier[copy] . identifier[deepcopy] ( identifier[meta] )
identifier[page] = identifier[copy] . identifier[deepcopy] ( identifier[_default_page] )
identifier[page_num] = identifier[i] + literal[int]
identifier[_paginator] = identifier[Paginator] ([],
identifier[total] = identifier[len_data] ,
identifier[page] = identifier[page_num] ,
identifier[per_page] = identifier[per_page] ,
identifier[padding] = identifier[padding] ,
identifier[left_edge] = identifier[left_edge] ,
identifier[right_edge] = identifier[right_edge] ,
identifier[left_current] = identifier[left_current] ,
identifier[right_current] = identifier[right_current] )
identifier[_paginator] . identifier[slug] = identifier[slug]
identifier[_paginator] . identifier[index_slug] = identifier[_generator] . identifier[get] ( literal[string] )
identifier[_slug] = identifier[slug] . identifier[format] (**{ literal[string] : identifier[page_num] })
identifier[dmeta] [ literal[string] ]= identifier[_slug]
identifier[dmeta] [ literal[string] ]= identifier[d]
identifier[dmeta] [ literal[string] ]= identifier[_paginator]
identifier[page] . identifier[update] ({
literal[string] : identifier[_slug] ,
literal[string] :{ literal[string] : identifier[dmeta] }
})
identifier[self] . identifier[create_page] (** identifier[page] )
keyword[if] identifier[i] == literal[int] keyword[and] identifier[_generator] . identifier[get] ( literal[string] ):
identifier[page] [ literal[string] ]= identifier[_generator] . identifier[get] ( literal[string] )
identifier[self] . identifier[create_page] (** identifier[page] )
keyword[else] :
identifier[self] . identifier[create_page] (** identifier[_default_page] ) | def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split('/')[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(('_', '.')) and filename.endswith(PAGE_FORMAT):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {'build_dir': self.build_dir, 'filepath': meta['filepath'], 'context': {'page': meta}, 'content': content, 'markup': meta.get('markup'), 'template': meta.get('template'), 'layout': meta.get('layout') or self.default_layout}
# GENERATOR
# Allows to generate
_generator = meta.get('_generator')
if _generator:
data = self._data.get(_generator.get('data_source'))
# We want these back in meta in they exists in the data
special_meta = ['title', 'slug', 'description']
# SINGLE
if _generator.get('type') == 'single':
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_) # depends on [control=['if'], data=['_', 'd']] # depends on [control=['for'], data=['_']]
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if 'slug' in _generator:
dmeta['slug'] = _generator.get('slug').format(**d) # depends on [control=['if'], data=['_generator']]
# Slug is required
if 'slug' not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue # depends on [control=['if'], data=[]]
slug = dmeta.get('slug')
dmeta['url'] = slug
dmeta['context'] = d
page.update({'filepath': slug, 'context': {'page': dmeta}})
self.create_page(**page) # depends on [control=['for'], data=['d']] # depends on [control=['if'], data=[]]
if _generator.get('type') == 'pagination':
per_page = int(_generator.get('per_page', self.site_config.get('pagination.per_page', 10)))
left_edge = int(_generator.get('left_edge', self.site_config.get('pagination.left_edge', 2)))
left_current = int(_generator.get('left_edge', self.site_config.get('pagination.left_current', 3)))
right_current = int(_generator.get('right_current', self.site_config.get('pagination.right_current', 4)))
right_edge = int(_generator.get('right_edge', self.site_config.get('pagination.right_edge', 2)))
padding = _generator.get('padding')
slug = _generator.get('slug')
limit = _generator.get('limit')
if 'limit' in _generator:
data = data[:int(limit)] # depends on [control=['if'], data=[]]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for (i, d) in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([], total=len_data, page=page_num, per_page=per_page, padding=padding, left_edge=left_edge, right_edge=right_edge, left_current=left_current, right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get('index_slug')
_slug = slug.format(**{'page_num': page_num})
dmeta['url'] = _slug
dmeta['context'] = d
dmeta['paginator'] = _paginator
page.update({'filepath': _slug, 'context': {'page': dmeta}})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get('index_slug'):
page['filepath'] = _generator.get('index_slug')
self.create_page(**page) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# NORMAL PAGE
self.create_page(**_default_page) # depends on [control=['if'], data=[]] |
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template | def function[read, parameter[self, file_or_path]]:
constant[Read template from cache or file.]
if compare[name[file_or_path] in name[self]._cached_templates] begin[:]
return[call[name[self]._cached_templates][name[file_or_path]]]
if call[name[is_filelike], parameter[name[file_or_path]]] begin[:]
variable[template] assign[=] call[name[file_or_path].read, parameter[]]
variable[dirname] assign[=] constant[None]
variable[template] assign[=] call[name[self]._engine, parameter[name[template]]]
call[name[self]._cached_templates][name[file_or_path]] assign[=] name[template]
return[name[template]] | keyword[def] identifier[read] ( identifier[self] , identifier[file_or_path] ):
literal[string]
keyword[if] identifier[file_or_path] keyword[in] identifier[self] . identifier[_cached_templates] :
keyword[return] identifier[self] . identifier[_cached_templates] [ identifier[file_or_path] ]
keyword[if] identifier[is_filelike] ( identifier[file_or_path] ):
identifier[template] = identifier[file_or_path] . identifier[read] ()
identifier[dirname] = keyword[None]
keyword[else] :
keyword[with] identifier[open] ( identifier[file_or_path] , literal[string] ) keyword[as] identifier[f] :
identifier[template] = identifier[f] . identifier[read] ()
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[file_or_path] )
identifier[template] = identifier[self] . identifier[_engine] ( identifier[template] ,
identifier[dirname] = identifier[dirname] ,
identifier[tolerant] = identifier[self] . identifier[_tolerant] )
identifier[self] . identifier[_cached_templates] [ identifier[file_or_path] ]= identifier[template]
keyword[return] identifier[template] | def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path] # depends on [control=['if'], data=['file_or_path']]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None # depends on [control=['if'], data=[]]
else:
with open(file_or_path, 'r') as f:
template = f.read() # depends on [control=['with'], data=['f']]
dirname = os.path.dirname(file_or_path)
template = self._engine(template, dirname=dirname, tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template |
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value():
return queryset.filter(country=self.value())
else:
return queryset | def function[queryset, parameter[self, request, queryset]]:
constant[
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
]
if call[name[self].value, parameter[]] begin[:]
return[call[name[queryset].filter, parameter[]]] | keyword[def] identifier[queryset] ( identifier[self] , identifier[request] , identifier[queryset] ):
literal[string]
keyword[if] identifier[self] . identifier[value] ():
keyword[return] identifier[queryset] . identifier[filter] ( identifier[country] = identifier[self] . identifier[value] ())
keyword[else] :
keyword[return] identifier[queryset] | def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value():
return queryset.filter(country=self.value()) # depends on [control=['if'], data=[]]
else:
return queryset |
def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None,
domain=None, datacenter=None, nic_speed=None,
public_ip=None, private_ip=None, **kwargs):
"""List all hardware (servers and bare metal computing instances).
:param list tags: filter based on tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory in gigabytes
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string datacenter: filter based on datacenter
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
hardware. This list will contain both dedicated servers and
bare metal computing instances
Example::
# Using a custom object-mask. Will get ONLY what is specified
# These will stem from the SoftLayer_Hardware_Server datatype
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
result = mgr.list_hardware(mask=object_mask)
"""
if 'mask' not in kwargs:
hw_items = [
'id',
'hostname',
'domain',
'hardwareStatusId',
'globalIdentifier',
'fullyQualifiedDomainName',
'processorPhysicalCoreAmount',
'memoryCapacity',
'primaryBackendIpAddress',
'primaryIpAddress',
'datacenter',
]
server_items = [
'activeTransaction[id, transactionStatus[friendlyName,name]]',
]
kwargs['mask'] = ('[mask[%s],'
' mask(SoftLayer_Hardware_Server)[%s]]'
% (','.join(hw_items), ','.join(server_items)))
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['hardware']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if cpus:
_filter['hardware']['processorPhysicalCoreAmount'] = (
utils.query_filter(cpus))
if memory:
_filter['hardware']['memoryCapacity'] = utils.query_filter(memory)
if hostname:
_filter['hardware']['hostname'] = utils.query_filter(hostname)
if domain:
_filter['hardware']['domain'] = utils.query_filter(domain)
if datacenter:
_filter['hardware']['datacenter']['name'] = (
utils.query_filter(datacenter))
if nic_speed:
_filter['hardware']['networkComponents']['maxSpeed'] = (
utils.query_filter(nic_speed))
if public_ip:
_filter['hardware']['primaryIpAddress'] = (
utils.query_filter(public_ip))
if private_ip:
_filter['hardware']['primaryBackendIpAddress'] = (
utils.query_filter(private_ip))
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.client.call('Account', 'getHardware', **kwargs) | def function[list_hardware, parameter[self, tags, cpus, memory, hostname, domain, datacenter, nic_speed, public_ip, private_ip]]:
constant[List all hardware (servers and bare metal computing instances).
:param list tags: filter based on tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory in gigabytes
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string datacenter: filter based on datacenter
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \*\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
hardware. This list will contain both dedicated servers and
bare metal computing instances
Example::
# Using a custom object-mask. Will get ONLY what is specified
# These will stem from the SoftLayer_Hardware_Server datatype
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
result = mgr.list_hardware(mask=object_mask)
]
if compare[constant[mask] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
variable[hw_items] assign[=] list[[<ast.Constant object at 0x7da18c4cef80>, <ast.Constant object at 0x7da18c4ceb30>, <ast.Constant object at 0x7da18c4cd5d0>, <ast.Constant object at 0x7da18c4ce5f0>, <ast.Constant object at 0x7da18c4cc040>, <ast.Constant object at 0x7da18c4ccf70>, <ast.Constant object at 0x7da18c4cccd0>, <ast.Constant object at 0x7da18c4cf820>, <ast.Constant object at 0x7da18c4ce050>, <ast.Constant object at 0x7da18c4ccc40>, <ast.Constant object at 0x7da18c4cf340>]]
variable[server_items] assign[=] list[[<ast.Constant object at 0x7da18c4ce950>]]
call[name[kwargs]][constant[mask]] assign[=] binary_operation[constant[[mask[%s], mask(SoftLayer_Hardware_Server)[%s]]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18c4ccb80>, <ast.Call object at 0x7da18c4cc8b0>]]]
variable[_filter] assign[=] call[name[utils].NestedDict, parameter[<ast.BoolOp object at 0x7da18c4ce590>]]
if name[tags] begin[:]
call[call[call[call[name[_filter]][constant[hardware]]][constant[tagReferences]]][constant[tag]]][constant[name]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf0a0>, <ast.Constant object at 0x7da18c4cd900>], [<ast.Constant object at 0x7da18c4cc4c0>, <ast.List object at 0x7da18c4cd330>]]
if name[cpus] begin[:]
call[call[name[_filter]][constant[hardware]]][constant[processorPhysicalCoreAmount]] assign[=] call[name[utils].query_filter, parameter[name[cpus]]]
if name[memory] begin[:]
call[call[name[_filter]][constant[hardware]]][constant[memoryCapacity]] assign[=] call[name[utils].query_filter, parameter[name[memory]]]
if name[hostname] begin[:]
call[call[name[_filter]][constant[hardware]]][constant[hostname]] assign[=] call[name[utils].query_filter, parameter[name[hostname]]]
if name[domain] begin[:]
call[call[name[_filter]][constant[hardware]]][constant[domain]] assign[=] call[name[utils].query_filter, parameter[name[domain]]]
if name[datacenter] begin[:]
call[call[call[name[_filter]][constant[hardware]]][constant[datacenter]]][constant[name]] assign[=] call[name[utils].query_filter, parameter[name[datacenter]]]
if name[nic_speed] begin[:]
call[call[call[name[_filter]][constant[hardware]]][constant[networkComponents]]][constant[maxSpeed]] assign[=] call[name[utils].query_filter, parameter[name[nic_speed]]]
if name[public_ip] begin[:]
call[call[name[_filter]][constant[hardware]]][constant[primaryIpAddress]] assign[=] call[name[utils].query_filter, parameter[name[public_ip]]]
if name[private_ip] begin[:]
call[call[name[_filter]][constant[hardware]]][constant[primaryBackendIpAddress]] assign[=] call[name[utils].query_filter, parameter[name[private_ip]]]
call[name[kwargs]][constant[filter]] assign[=] call[name[_filter].to_dict, parameter[]]
call[name[kwargs]][constant[iter]] assign[=] constant[True]
return[call[name[self].client.call, parameter[constant[Account], constant[getHardware]]]] | keyword[def] identifier[list_hardware] ( identifier[self] , identifier[tags] = keyword[None] , identifier[cpus] = keyword[None] , identifier[memory] = keyword[None] , identifier[hostname] = keyword[None] ,
identifier[domain] = keyword[None] , identifier[datacenter] = keyword[None] , identifier[nic_speed] = keyword[None] ,
identifier[public_ip] = keyword[None] , identifier[private_ip] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[hw_items] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[server_items] =[
literal[string] ,
]
identifier[kwargs] [ literal[string] ]=( literal[string]
literal[string]
%( literal[string] . identifier[join] ( identifier[hw_items] ), literal[string] . identifier[join] ( identifier[server_items] )))
identifier[_filter] = identifier[utils] . identifier[NestedDict] ( identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] {})
keyword[if] identifier[tags] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]={
literal[string] : literal[string] ,
literal[string] :[{ literal[string] : literal[string] , literal[string] : identifier[tags] }],
}
keyword[if] identifier[cpus] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[cpus] ))
keyword[if] identifier[memory] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[memory] )
keyword[if] identifier[hostname] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[hostname] )
keyword[if] identifier[domain] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[domain] )
keyword[if] identifier[datacenter] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[datacenter] ))
keyword[if] identifier[nic_speed] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[nic_speed] ))
keyword[if] identifier[public_ip] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[public_ip] ))
keyword[if] identifier[private_ip] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[private_ip] ))
identifier[kwargs] [ literal[string] ]= identifier[_filter] . identifier[to_dict] ()
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[return] identifier[self] . identifier[client] . identifier[call] ( literal[string] , literal[string] ,** identifier[kwargs] ) | def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None, domain=None, datacenter=None, nic_speed=None, public_ip=None, private_ip=None, **kwargs):
"""List all hardware (servers and bare metal computing instances).
:param list tags: filter based on tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory in gigabytes
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string datacenter: filter based on datacenter
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
hardware. This list will contain both dedicated servers and
bare metal computing instances
Example::
# Using a custom object-mask. Will get ONLY what is specified
# These will stem from the SoftLayer_Hardware_Server datatype
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
result = mgr.list_hardware(mask=object_mask)
"""
if 'mask' not in kwargs:
hw_items = ['id', 'hostname', 'domain', 'hardwareStatusId', 'globalIdentifier', 'fullyQualifiedDomainName', 'processorPhysicalCoreAmount', 'memoryCapacity', 'primaryBackendIpAddress', 'primaryIpAddress', 'datacenter']
server_items = ['activeTransaction[id, transactionStatus[friendlyName,name]]']
kwargs['mask'] = '[mask[%s], mask(SoftLayer_Hardware_Server)[%s]]' % (','.join(hw_items), ','.join(server_items)) # depends on [control=['if'], data=['kwargs']]
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['hardware']['tagReferences']['tag']['name'] = {'operation': 'in', 'options': [{'name': 'data', 'value': tags}]} # depends on [control=['if'], data=[]]
if cpus:
_filter['hardware']['processorPhysicalCoreAmount'] = utils.query_filter(cpus) # depends on [control=['if'], data=[]]
if memory:
_filter['hardware']['memoryCapacity'] = utils.query_filter(memory) # depends on [control=['if'], data=[]]
if hostname:
_filter['hardware']['hostname'] = utils.query_filter(hostname) # depends on [control=['if'], data=[]]
if domain:
_filter['hardware']['domain'] = utils.query_filter(domain) # depends on [control=['if'], data=[]]
if datacenter:
_filter['hardware']['datacenter']['name'] = utils.query_filter(datacenter) # depends on [control=['if'], data=[]]
if nic_speed:
_filter['hardware']['networkComponents']['maxSpeed'] = utils.query_filter(nic_speed) # depends on [control=['if'], data=[]]
if public_ip:
_filter['hardware']['primaryIpAddress'] = utils.query_filter(public_ip) # depends on [control=['if'], data=[]]
if private_ip:
_filter['hardware']['primaryBackendIpAddress'] = utils.query_filter(private_ip) # depends on [control=['if'], data=[]]
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.client.call('Account', 'getHardware', **kwargs) |
def gitrepo(cwd):
"""Return hash of Git data that can be used to display more information to
users.
Example:
"git": {
"head": {
"id": "5e837ce92220be64821128a70f6093f836dd2c05",
"author_name": "Wil Gieseler",
"author_email": "wil@example.com",
"committer_name": "Wil Gieseler",
"committer_email": "wil@example.com",
"message": "depend on simplecov >= 0.7"
},
"branch": "master",
"remotes": [{
"name": "origin",
"url": "https://github.com/lemurheavy/coveralls-ruby.git"
}]
}
From https://github.com/coagulant/coveralls-python (with MIT license).
"""
repo = Repository(cwd)
if not repo.valid():
return {}
return {
'head': {
'id': repo.gitlog('%H'),
'author_name': repo.gitlog('%aN'),
'author_email': repo.gitlog('%ae'),
'committer_name': repo.gitlog('%cN'),
'committer_email': repo.gitlog('%ce'),
'message': repo.gitlog('%s')
},
'branch': os.environ.get('TRAVIS_BRANCH',
os.environ.get('APPVEYOR_REPO_BRANCH',
repo.git('rev-parse', '--abbrev-ref', 'HEAD')[1].strip())),
'remotes': [{'name': line.split()[0], 'url': line.split()[1]}
for line in repo.git('remote', '-v')[1] if '(fetch)' in line]
} | def function[gitrepo, parameter[cwd]]:
constant[Return hash of Git data that can be used to display more information to
users.
Example:
"git": {
"head": {
"id": "5e837ce92220be64821128a70f6093f836dd2c05",
"author_name": "Wil Gieseler",
"author_email": "wil@example.com",
"committer_name": "Wil Gieseler",
"committer_email": "wil@example.com",
"message": "depend on simplecov >= 0.7"
},
"branch": "master",
"remotes": [{
"name": "origin",
"url": "https://github.com/lemurheavy/coveralls-ruby.git"
}]
}
From https://github.com/coagulant/coveralls-python (with MIT license).
]
variable[repo] assign[=] call[name[Repository], parameter[name[cwd]]]
if <ast.UnaryOp object at 0x7da1b1275de0> begin[:]
return[dictionary[[], []]]
return[dictionary[[<ast.Constant object at 0x7da1b1274280>, <ast.Constant object at 0x7da1b1277c40>, <ast.Constant object at 0x7da1b1277580>], [<ast.Dict object at 0x7da1b1276080>, <ast.Call object at 0x7da1b12749d0>, <ast.ListComp object at 0x7da1b1277c70>]]] | keyword[def] identifier[gitrepo] ( identifier[cwd] ):
literal[string]
identifier[repo] = identifier[Repository] ( identifier[cwd] )
keyword[if] keyword[not] identifier[repo] . identifier[valid] ():
keyword[return] {}
keyword[return] {
literal[string] :{
literal[string] : identifier[repo] . identifier[gitlog] ( literal[string] ),
literal[string] : identifier[repo] . identifier[gitlog] ( literal[string] ),
literal[string] : identifier[repo] . identifier[gitlog] ( literal[string] ),
literal[string] : identifier[repo] . identifier[gitlog] ( literal[string] ),
literal[string] : identifier[repo] . identifier[gitlog] ( literal[string] ),
literal[string] : identifier[repo] . identifier[gitlog] ( literal[string] )
},
literal[string] : identifier[os] . identifier[environ] . identifier[get] ( literal[string] ,
identifier[os] . identifier[environ] . identifier[get] ( literal[string] ,
identifier[repo] . identifier[git] ( literal[string] , literal[string] , literal[string] )[ literal[int] ]. identifier[strip] ())),
literal[string] :[{ literal[string] : identifier[line] . identifier[split] ()[ literal[int] ], literal[string] : identifier[line] . identifier[split] ()[ literal[int] ]}
keyword[for] identifier[line] keyword[in] identifier[repo] . identifier[git] ( literal[string] , literal[string] )[ literal[int] ] keyword[if] literal[string] keyword[in] identifier[line] ]
} | def gitrepo(cwd):
"""Return hash of Git data that can be used to display more information to
users.
Example:
"git": {
"head": {
"id": "5e837ce92220be64821128a70f6093f836dd2c05",
"author_name": "Wil Gieseler",
"author_email": "wil@example.com",
"committer_name": "Wil Gieseler",
"committer_email": "wil@example.com",
"message": "depend on simplecov >= 0.7"
},
"branch": "master",
"remotes": [{
"name": "origin",
"url": "https://github.com/lemurheavy/coveralls-ruby.git"
}]
}
From https://github.com/coagulant/coveralls-python (with MIT license).
"""
repo = Repository(cwd)
if not repo.valid():
return {} # depends on [control=['if'], data=[]]
return {'head': {'id': repo.gitlog('%H'), 'author_name': repo.gitlog('%aN'), 'author_email': repo.gitlog('%ae'), 'committer_name': repo.gitlog('%cN'), 'committer_email': repo.gitlog('%ce'), 'message': repo.gitlog('%s')}, 'branch': os.environ.get('TRAVIS_BRANCH', os.environ.get('APPVEYOR_REPO_BRANCH', repo.git('rev-parse', '--abbrev-ref', 'HEAD')[1].strip())), 'remotes': [{'name': line.split()[0], 'url': line.split()[1]} for line in repo.git('remote', '-v')[1] if '(fetch)' in line]} |
def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, str):
input_str = unicode(input_str)
elif not isinstance(input_str, unicode):
raise TypeError("input_str must be a basestring")
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char)
elif char == u'_':
result.append('__')
elif char in uppercase:
result.append('_%s' % char.lower())
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0
HH = ''.join('%x' % ord(c) for c in utf8)
result.append('_%d%s' % (N, HH))
return ''.join(result) | def function[encode_filesystem_name, parameter[input_str]]:
constant[Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
]
if call[name[isinstance], parameter[name[input_str], name[str]]] begin[:]
variable[input_str] assign[=] call[name[unicode], parameter[name[input_str]]]
variable[as_is] assign[=] constant[abcdefghijklmnopqrstuvwxyz0123456789.-]
variable[uppercase] assign[=] constant[ABCDEFGHIJKLMNOPQRSTUVWXYZ]
variable[result] assign[=] list[[]]
for taget[name[char]] in starred[name[input_str]] begin[:]
if compare[name[char] in name[as_is]] begin[:]
call[name[result].append, parameter[name[char]]]
return[call[constant[].join, parameter[name[result]]]] | keyword[def] identifier[encode_filesystem_name] ( identifier[input_str] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[input_str] , identifier[str] ):
identifier[input_str] = identifier[unicode] ( identifier[input_str] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[input_str] , identifier[unicode] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[as_is] = literal[string]
identifier[uppercase] = literal[string]
identifier[result] =[]
keyword[for] identifier[char] keyword[in] identifier[input_str] :
keyword[if] identifier[char] keyword[in] identifier[as_is] :
identifier[result] . identifier[append] ( identifier[char] )
keyword[elif] identifier[char] == literal[string] :
identifier[result] . identifier[append] ( literal[string] )
keyword[elif] identifier[char] keyword[in] identifier[uppercase] :
identifier[result] . identifier[append] ( literal[string] % identifier[char] . identifier[lower] ())
keyword[else] :
identifier[utf8] = identifier[char] . identifier[encode] ( literal[string] )
identifier[N] = identifier[len] ( identifier[utf8] )
keyword[if] identifier[N] == literal[int] :
identifier[N] = literal[int]
identifier[HH] = literal[string] . identifier[join] ( literal[string] % identifier[ord] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[utf8] )
identifier[result] . identifier[append] ( literal[string] %( identifier[N] , identifier[HH] ))
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, str):
input_str = unicode(input_str) # depends on [control=['if'], data=[]]
elif not isinstance(input_str, unicode):
raise TypeError('input_str must be a basestring') # depends on [control=['if'], data=[]]
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char) # depends on [control=['if'], data=['char']]
elif char == u'_':
result.append('__') # depends on [control=['if'], data=[]]
elif char in uppercase:
result.append('_%s' % char.lower()) # depends on [control=['if'], data=['char']]
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0 # depends on [control=['if'], data=['N']]
HH = ''.join(('%x' % ord(c) for c in utf8))
result.append('_%d%s' % (N, HH)) # depends on [control=['for'], data=['char']]
return ''.join(result) |
def _process_json(data):
"""
return a list of GradLeave objects.
"""
requests = []
for item in data:
leave = GradLeave()
leave.reason = item.get('leaveReason')
leave.submit_date = datetime_from_string(item.get('submitDate'))
if item.get('status') is not None and len(item.get('status')) > 0:
leave.status = item.get('status').lower()
for quarter in item.get('quarters'):
term = GradTerm()
term.quarter = quarter.get('quarter').lower()
term.year = quarter.get('year')
leave.terms.append(term)
requests.append(leave)
return requests | def function[_process_json, parameter[data]]:
constant[
return a list of GradLeave objects.
]
variable[requests] assign[=] list[[]]
for taget[name[item]] in starred[name[data]] begin[:]
variable[leave] assign[=] call[name[GradLeave], parameter[]]
name[leave].reason assign[=] call[name[item].get, parameter[constant[leaveReason]]]
name[leave].submit_date assign[=] call[name[datetime_from_string], parameter[call[name[item].get, parameter[constant[submitDate]]]]]
if <ast.BoolOp object at 0x7da1b2346aa0> begin[:]
name[leave].status assign[=] call[call[name[item].get, parameter[constant[status]]].lower, parameter[]]
for taget[name[quarter]] in starred[call[name[item].get, parameter[constant[quarters]]]] begin[:]
variable[term] assign[=] call[name[GradTerm], parameter[]]
name[term].quarter assign[=] call[call[name[quarter].get, parameter[constant[quarter]]].lower, parameter[]]
name[term].year assign[=] call[name[quarter].get, parameter[constant[year]]]
call[name[leave].terms.append, parameter[name[term]]]
call[name[requests].append, parameter[name[leave]]]
return[name[requests]] | keyword[def] identifier[_process_json] ( identifier[data] ):
literal[string]
identifier[requests] =[]
keyword[for] identifier[item] keyword[in] identifier[data] :
identifier[leave] = identifier[GradLeave] ()
identifier[leave] . identifier[reason] = identifier[item] . identifier[get] ( literal[string] )
identifier[leave] . identifier[submit_date] = identifier[datetime_from_string] ( identifier[item] . identifier[get] ( literal[string] ))
keyword[if] identifier[item] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[item] . identifier[get] ( literal[string] ))> literal[int] :
identifier[leave] . identifier[status] = identifier[item] . identifier[get] ( literal[string] ). identifier[lower] ()
keyword[for] identifier[quarter] keyword[in] identifier[item] . identifier[get] ( literal[string] ):
identifier[term] = identifier[GradTerm] ()
identifier[term] . identifier[quarter] = identifier[quarter] . identifier[get] ( literal[string] ). identifier[lower] ()
identifier[term] . identifier[year] = identifier[quarter] . identifier[get] ( literal[string] )
identifier[leave] . identifier[terms] . identifier[append] ( identifier[term] )
identifier[requests] . identifier[append] ( identifier[leave] )
keyword[return] identifier[requests] | def _process_json(data):
"""
return a list of GradLeave objects.
"""
requests = []
for item in data:
leave = GradLeave()
leave.reason = item.get('leaveReason')
leave.submit_date = datetime_from_string(item.get('submitDate'))
if item.get('status') is not None and len(item.get('status')) > 0:
leave.status = item.get('status').lower() # depends on [control=['if'], data=[]]
for quarter in item.get('quarters'):
term = GradTerm()
term.quarter = quarter.get('quarter').lower()
term.year = quarter.get('year')
leave.terms.append(term) # depends on [control=['for'], data=['quarter']]
requests.append(leave) # depends on [control=['for'], data=['item']]
return requests |
def add_extra_tags(self, image_id, main_tag, extra_tags, add_latest):
"""
Adds extra tags to an image after de-duplicating tag names.
:param image_id: Id of the image.
:type image_id: unicode | str
:param main_tag: Repo / tag specification that has been used to build the image. If present, the tag will be
removed from further arguments.
:type main_tag: unicode | str
:param extra_tags: Additional tags to add to the image.
:type extra_tags: list | tuple | set | NoneType
:param add_latest: Whether to add a ``latest`` tag to the image.
:type add_latest: bool
"""
repo, __, i_tag = main_tag.rpartition(':')
tag_set = set(extra_tags or ())
if add_latest:
tag_set.add('latest')
tag_set.discard(i_tag)
added_tags = []
tag_kwargs = {}
if str(self.api_version) < DEPRECATED_FORCE_TAG_VERSION:
tag_kwargs['force'] = True
if repo and tag_set:
for t in tag_set:
try:
self.tag(image_id, repo, t, **tag_kwargs)
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, added_tags)
else:
added_tags.append(t)
return added_tags | def function[add_extra_tags, parameter[self, image_id, main_tag, extra_tags, add_latest]]:
constant[
Adds extra tags to an image after de-duplicating tag names.
:param image_id: Id of the image.
:type image_id: unicode | str
:param main_tag: Repo / tag specification that has been used to build the image. If present, the tag will be
removed from further arguments.
:type main_tag: unicode | str
:param extra_tags: Additional tags to add to the image.
:type extra_tags: list | tuple | set | NoneType
:param add_latest: Whether to add a ``latest`` tag to the image.
:type add_latest: bool
]
<ast.Tuple object at 0x7da20e9b3730> assign[=] call[name[main_tag].rpartition, parameter[constant[:]]]
variable[tag_set] assign[=] call[name[set], parameter[<ast.BoolOp object at 0x7da1b27278e0>]]
if name[add_latest] begin[:]
call[name[tag_set].add, parameter[constant[latest]]]
call[name[tag_set].discard, parameter[name[i_tag]]]
variable[added_tags] assign[=] list[[]]
variable[tag_kwargs] assign[=] dictionary[[], []]
if compare[call[name[str], parameter[name[self].api_version]] less[<] name[DEPRECATED_FORCE_TAG_VERSION]] begin[:]
call[name[tag_kwargs]][constant[force]] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b2724f40> begin[:]
for taget[name[t]] in starred[name[tag_set]] begin[:]
<ast.Try object at 0x7da1b27260e0>
return[name[added_tags]] | keyword[def] identifier[add_extra_tags] ( identifier[self] , identifier[image_id] , identifier[main_tag] , identifier[extra_tags] , identifier[add_latest] ):
literal[string]
identifier[repo] , identifier[__] , identifier[i_tag] = identifier[main_tag] . identifier[rpartition] ( literal[string] )
identifier[tag_set] = identifier[set] ( identifier[extra_tags] keyword[or] ())
keyword[if] identifier[add_latest] :
identifier[tag_set] . identifier[add] ( literal[string] )
identifier[tag_set] . identifier[discard] ( identifier[i_tag] )
identifier[added_tags] =[]
identifier[tag_kwargs] ={}
keyword[if] identifier[str] ( identifier[self] . identifier[api_version] )< identifier[DEPRECATED_FORCE_TAG_VERSION] :
identifier[tag_kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[repo] keyword[and] identifier[tag_set] :
keyword[for] identifier[t] keyword[in] identifier[tag_set] :
keyword[try] :
identifier[self] . identifier[tag] ( identifier[image_id] , identifier[repo] , identifier[t] ,** identifier[tag_kwargs] )
keyword[except] :
identifier[exc_info] = identifier[sys] . identifier[exc_info] ()
keyword[raise] identifier[PartialResultsError] ( identifier[exc_info] , identifier[added_tags] )
keyword[else] :
identifier[added_tags] . identifier[append] ( identifier[t] )
keyword[return] identifier[added_tags] | def add_extra_tags(self, image_id, main_tag, extra_tags, add_latest):
"""
Adds extra tags to an image after de-duplicating tag names.
:param image_id: Id of the image.
:type image_id: unicode | str
:param main_tag: Repo / tag specification that has been used to build the image. If present, the tag will be
removed from further arguments.
:type main_tag: unicode | str
:param extra_tags: Additional tags to add to the image.
:type extra_tags: list | tuple | set | NoneType
:param add_latest: Whether to add a ``latest`` tag to the image.
:type add_latest: bool
"""
(repo, __, i_tag) = main_tag.rpartition(':')
tag_set = set(extra_tags or ())
if add_latest:
tag_set.add('latest') # depends on [control=['if'], data=[]]
tag_set.discard(i_tag)
added_tags = []
tag_kwargs = {}
if str(self.api_version) < DEPRECATED_FORCE_TAG_VERSION:
tag_kwargs['force'] = True # depends on [control=['if'], data=[]]
if repo and tag_set:
for t in tag_set:
try:
self.tag(image_id, repo, t, **tag_kwargs) # depends on [control=['try'], data=[]]
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, added_tags) # depends on [control=['except'], data=[]]
else:
added_tags.append(t) # depends on [control=['for'], data=['t']] # depends on [control=['if'], data=[]]
return added_tags |
def _check_send_batch(self, result=None):
"""Check if we have enough messages/bytes to send
Since this can be called from the callback chain, we
pass through our first (non-self) arg
"""
if (
(self.batch_every_n and self.batch_every_n <= self._waitingMsgCount) or
(self.batch_every_b and self.batch_every_b <= self._waitingByteCount)
):
self._send_batch()
return result | def function[_check_send_batch, parameter[self, result]]:
constant[Check if we have enough messages/bytes to send
Since this can be called from the callback chain, we
pass through our first (non-self) arg
]
if <ast.BoolOp object at 0x7da1b0415900> begin[:]
call[name[self]._send_batch, parameter[]]
return[name[result]] | keyword[def] identifier[_check_send_batch] ( identifier[self] , identifier[result] = keyword[None] ):
literal[string]
keyword[if] (
( identifier[self] . identifier[batch_every_n] keyword[and] identifier[self] . identifier[batch_every_n] <= identifier[self] . identifier[_waitingMsgCount] ) keyword[or]
( identifier[self] . identifier[batch_every_b] keyword[and] identifier[self] . identifier[batch_every_b] <= identifier[self] . identifier[_waitingByteCount] )
):
identifier[self] . identifier[_send_batch] ()
keyword[return] identifier[result] | def _check_send_batch(self, result=None):
"""Check if we have enough messages/bytes to send
Since this can be called from the callback chain, we
pass through our first (non-self) arg
"""
if self.batch_every_n and self.batch_every_n <= self._waitingMsgCount or (self.batch_every_b and self.batch_every_b <= self._waitingByteCount):
self._send_batch() # depends on [control=['if'], data=[]]
return result |
def tokenize_words(string):
"""
Tokenize input text to words.
:param string: Text to tokenize
:type string: str or unicode
:return: words
:rtype: list of strings
"""
string = six.text_type(string)
return re.findall(WORD_TOKENIZATION_RULES, string) | def function[tokenize_words, parameter[string]]:
constant[
Tokenize input text to words.
:param string: Text to tokenize
:type string: str or unicode
:return: words
:rtype: list of strings
]
variable[string] assign[=] call[name[six].text_type, parameter[name[string]]]
return[call[name[re].findall, parameter[name[WORD_TOKENIZATION_RULES], name[string]]]] | keyword[def] identifier[tokenize_words] ( identifier[string] ):
literal[string]
identifier[string] = identifier[six] . identifier[text_type] ( identifier[string] )
keyword[return] identifier[re] . identifier[findall] ( identifier[WORD_TOKENIZATION_RULES] , identifier[string] ) | def tokenize_words(string):
"""
Tokenize input text to words.
:param string: Text to tokenize
:type string: str or unicode
:return: words
:rtype: list of strings
"""
string = six.text_type(string)
return re.findall(WORD_TOKENIZATION_RULES, string) |
def DbDeleteClassAttribute(self, argin):
""" delete a class attribute and all its properties from database
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbDeleteClassAttribute()")
if len(argin) < 2:
self.warn_stream("DataBase::db_delete_class_attribute(): insufficient number of arguments ")
th_exc(DB_IncorrectArguments,
"insufficient number of arguments to delete class attribute",
"DataBase::DeleteClassAttribute()")
klass_name, attr_name = argin[:2]
self.db.delete_class_attribute(klass_name, attr_name) | def function[DbDeleteClassAttribute, parameter[self, argin]]:
constant[ delete a class attribute and all its properties from database
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid ]
call[name[self]._log.debug, parameter[constant[In DbDeleteClassAttribute()]]]
if compare[call[name[len], parameter[name[argin]]] less[<] constant[2]] begin[:]
call[name[self].warn_stream, parameter[constant[DataBase::db_delete_class_attribute(): insufficient number of arguments ]]]
call[name[th_exc], parameter[name[DB_IncorrectArguments], constant[insufficient number of arguments to delete class attribute], constant[DataBase::DeleteClassAttribute()]]]
<ast.Tuple object at 0x7da2041daef0> assign[=] call[name[argin]][<ast.Slice object at 0x7da2041d9240>]
call[name[self].db.delete_class_attribute, parameter[name[klass_name], name[attr_name]]] | keyword[def] identifier[DbDeleteClassAttribute] ( identifier[self] , identifier[argin] ):
literal[string]
identifier[self] . identifier[_log] . identifier[debug] ( literal[string] )
keyword[if] identifier[len] ( identifier[argin] )< literal[int] :
identifier[self] . identifier[warn_stream] ( literal[string] )
identifier[th_exc] ( identifier[DB_IncorrectArguments] ,
literal[string] ,
literal[string] )
identifier[klass_name] , identifier[attr_name] = identifier[argin] [: literal[int] ]
identifier[self] . identifier[db] . identifier[delete_class_attribute] ( identifier[klass_name] , identifier[attr_name] ) | def DbDeleteClassAttribute(self, argin):
""" delete a class attribute and all its properties from database
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug('In DbDeleteClassAttribute()')
if len(argin) < 2:
self.warn_stream('DataBase::db_delete_class_attribute(): insufficient number of arguments ')
th_exc(DB_IncorrectArguments, 'insufficient number of arguments to delete class attribute', 'DataBase::DeleteClassAttribute()') # depends on [control=['if'], data=[]]
(klass_name, attr_name) = argin[:2]
self.db.delete_class_attribute(klass_name, attr_name) |
def is_contains(self, data):
"""
Judge the data whether is already exist if each bit of hash code is 1 then data exist.
"""
if not data:
return False
data = self._compress_by_md5(data)
result = True
# cut the first two place,route to different block by block_num
name = self.key + str(int(data[0:2], 16) % self.block_num)
for h in self.hash_function:
local_hash = h.hash(data)
result = result & self.server.getbit(name, local_hash)
return result | def function[is_contains, parameter[self, data]]:
constant[
Judge the data whether is already exist if each bit of hash code is 1 then data exist.
]
if <ast.UnaryOp object at 0x7da18ede4190> begin[:]
return[constant[False]]
variable[data] assign[=] call[name[self]._compress_by_md5, parameter[name[data]]]
variable[result] assign[=] constant[True]
variable[name] assign[=] binary_operation[name[self].key + call[name[str], parameter[binary_operation[call[name[int], parameter[call[name[data]][<ast.Slice object at 0x7da18ede5a50>], constant[16]]] <ast.Mod object at 0x7da2590d6920> name[self].block_num]]]]
for taget[name[h]] in starred[name[self].hash_function] begin[:]
variable[local_hash] assign[=] call[name[h].hash, parameter[name[data]]]
variable[result] assign[=] binary_operation[name[result] <ast.BitAnd object at 0x7da2590d6b60> call[name[self].server.getbit, parameter[name[name], name[local_hash]]]]
return[name[result]] | keyword[def] identifier[is_contains] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] keyword[not] identifier[data] :
keyword[return] keyword[False]
identifier[data] = identifier[self] . identifier[_compress_by_md5] ( identifier[data] )
identifier[result] = keyword[True]
identifier[name] = identifier[self] . identifier[key] + identifier[str] ( identifier[int] ( identifier[data] [ literal[int] : literal[int] ], literal[int] )% identifier[self] . identifier[block_num] )
keyword[for] identifier[h] keyword[in] identifier[self] . identifier[hash_function] :
identifier[local_hash] = identifier[h] . identifier[hash] ( identifier[data] )
identifier[result] = identifier[result] & identifier[self] . identifier[server] . identifier[getbit] ( identifier[name] , identifier[local_hash] )
keyword[return] identifier[result] | def is_contains(self, data):
"""
Judge the data whether is already exist if each bit of hash code is 1 then data exist.
"""
if not data:
return False # depends on [control=['if'], data=[]]
data = self._compress_by_md5(data)
result = True
# cut the first two place,route to different block by block_num
name = self.key + str(int(data[0:2], 16) % self.block_num)
for h in self.hash_function:
local_hash = h.hash(data)
result = result & self.server.getbit(name, local_hash) # depends on [control=['for'], data=['h']]
return result |
def sort_by(self, attr_or_key, direction='asc'):
"""Sort the view by an attribute or key
:param attr_or_key: The attribute or key to sort by
:param direction: Either `asc` or `desc` indicating the direction of
sorting
"""
# work out the direction
if direction in ('+', 'asc', gtk.SORT_ASCENDING):
direction = gtk.SORT_ASCENDING
elif direction in ('-', 'desc', gtk.SORT_DESCENDING):
direction = gtk.SORT_DESCENDING
else:
raise AttributeError('unrecognised direction')
if callable(attr_or_key):
# is a key
sort_func = self._key_sort_func
else:
# it's an attribute
sort_func = self._attr_sort_func
self.model.set_default_sort_func(sort_func, attr_or_key)
self.model.set_sort_column_id(-1, direction) | def function[sort_by, parameter[self, attr_or_key, direction]]:
constant[Sort the view by an attribute or key
:param attr_or_key: The attribute or key to sort by
:param direction: Either `asc` or `desc` indicating the direction of
sorting
]
if compare[name[direction] in tuple[[<ast.Constant object at 0x7da2044c0550>, <ast.Constant object at 0x7da2044c2290>, <ast.Attribute object at 0x7da2044c32b0>]]] begin[:]
variable[direction] assign[=] name[gtk].SORT_ASCENDING
if call[name[callable], parameter[name[attr_or_key]]] begin[:]
variable[sort_func] assign[=] name[self]._key_sort_func
call[name[self].model.set_default_sort_func, parameter[name[sort_func], name[attr_or_key]]]
call[name[self].model.set_sort_column_id, parameter[<ast.UnaryOp object at 0x7da2047ead10>, name[direction]]] | keyword[def] identifier[sort_by] ( identifier[self] , identifier[attr_or_key] , identifier[direction] = literal[string] ):
literal[string]
keyword[if] identifier[direction] keyword[in] ( literal[string] , literal[string] , identifier[gtk] . identifier[SORT_ASCENDING] ):
identifier[direction] = identifier[gtk] . identifier[SORT_ASCENDING]
keyword[elif] identifier[direction] keyword[in] ( literal[string] , literal[string] , identifier[gtk] . identifier[SORT_DESCENDING] ):
identifier[direction] = identifier[gtk] . identifier[SORT_DESCENDING]
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] )
keyword[if] identifier[callable] ( identifier[attr_or_key] ):
identifier[sort_func] = identifier[self] . identifier[_key_sort_func]
keyword[else] :
identifier[sort_func] = identifier[self] . identifier[_attr_sort_func]
identifier[self] . identifier[model] . identifier[set_default_sort_func] ( identifier[sort_func] , identifier[attr_or_key] )
identifier[self] . identifier[model] . identifier[set_sort_column_id] (- literal[int] , identifier[direction] ) | def sort_by(self, attr_or_key, direction='asc'):
"""Sort the view by an attribute or key
:param attr_or_key: The attribute or key to sort by
:param direction: Either `asc` or `desc` indicating the direction of
sorting
"""
# work out the direction
if direction in ('+', 'asc', gtk.SORT_ASCENDING):
direction = gtk.SORT_ASCENDING # depends on [control=['if'], data=['direction']]
elif direction in ('-', 'desc', gtk.SORT_DESCENDING):
direction = gtk.SORT_DESCENDING # depends on [control=['if'], data=['direction']]
else:
raise AttributeError('unrecognised direction')
if callable(attr_or_key):
# is a key
sort_func = self._key_sort_func # depends on [control=['if'], data=[]]
else:
# it's an attribute
sort_func = self._attr_sort_func
self.model.set_default_sort_func(sort_func, attr_or_key)
self.model.set_sort_column_id(-1, direction) |
def visit_call(self, node):
"""visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node) | def function[visit_call, parameter[self, node]]:
constant[visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
]
call[name[self]._check_misplaced_format_function, parameter[name[node]]]
if call[name[isinstance], parameter[name[node].func, name[astroid].Name]] begin[:]
variable[name] assign[=] name[node].func.name
if <ast.UnaryOp object at 0x7da1b028d750> begin[:]
if compare[name[name] equal[==] constant[exec]] begin[:]
call[name[self].add_message, parameter[constant[exec-used]]] | keyword[def] identifier[visit_call] ( identifier[self] , identifier[node] ):
literal[string]
identifier[self] . identifier[_check_misplaced_format_function] ( identifier[node] )
keyword[if] identifier[isinstance] ( identifier[node] . identifier[func] , identifier[astroid] . identifier[Name] ):
identifier[name] = identifier[node] . identifier[func] . identifier[name]
keyword[if] keyword[not] ( identifier[name] keyword[in] identifier[node] . identifier[frame] () keyword[or] identifier[name] keyword[in] identifier[node] . identifier[root] ()):
keyword[if] identifier[name] == literal[string] :
identifier[self] . identifier[add_message] ( literal[string] , identifier[node] = identifier[node] )
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_check_reversed] ( identifier[node] )
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[add_message] ( literal[string] , identifier[node] = identifier[node] ) | def visit_call(self, node):
"""visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == 'exec':
self.add_message('exec-used', node=node) # depends on [control=['if'], data=[]]
elif name == 'reversed':
self._check_reversed(node) # depends on [control=['if'], data=[]]
elif name == 'eval':
self.add_message('eval-used', node=node) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_valid_residue(residue):
"""Check if the given string represents a valid amino acid residue."""
if residue is not None and amino_acids.get(residue) is None:
res = amino_acids_reverse.get(residue.lower())
if res is None:
raise InvalidResidueError(residue)
else:
return res
return residue | def function[get_valid_residue, parameter[residue]]:
constant[Check if the given string represents a valid amino acid residue.]
if <ast.BoolOp object at 0x7da2041d94e0> begin[:]
variable[res] assign[=] call[name[amino_acids_reverse].get, parameter[call[name[residue].lower, parameter[]]]]
if compare[name[res] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c991960>
return[name[residue]] | keyword[def] identifier[get_valid_residue] ( identifier[residue] ):
literal[string]
keyword[if] identifier[residue] keyword[is] keyword[not] keyword[None] keyword[and] identifier[amino_acids] . identifier[get] ( identifier[residue] ) keyword[is] keyword[None] :
identifier[res] = identifier[amino_acids_reverse] . identifier[get] ( identifier[residue] . identifier[lower] ())
keyword[if] identifier[res] keyword[is] keyword[None] :
keyword[raise] identifier[InvalidResidueError] ( identifier[residue] )
keyword[else] :
keyword[return] identifier[res]
keyword[return] identifier[residue] | def get_valid_residue(residue):
"""Check if the given string represents a valid amino acid residue."""
if residue is not None and amino_acids.get(residue) is None:
res = amino_acids_reverse.get(residue.lower())
if res is None:
raise InvalidResidueError(residue) # depends on [control=['if'], data=[]]
else:
return res # depends on [control=['if'], data=[]]
return residue |
def _set_reserved_vlan(self, v, load=False):
"""
Setter method for reserved_vlan, mapped from YANG variable /reserved_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reserved_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reserved_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=reserved_vlan.reserved_vlan, is_container='container', presence=False, yang_name="reserved-vlan", rest_name="reserved-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sets the range of vlans used for internal purposes', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_VLAN_CONFIG', u'cli-suppress-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'nsmReservedVlanConfig'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reserved_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=reserved_vlan.reserved_vlan, is_container='container', presence=False, yang_name="reserved-vlan", rest_name="reserved-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sets the range of vlans used for internal purposes', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_VLAN_CONFIG', u'cli-suppress-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'nsmReservedVlanConfig'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__reserved_vlan = t
if hasattr(self, '_set'):
self._set() | def function[_set_reserved_vlan, parameter[self, v, load]]:
constant[
Setter method for reserved_vlan, mapped from YANG variable /reserved_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reserved_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reserved_vlan() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18fe939d0>
name[self].__reserved_vlan assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_reserved_vlan] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[reserved_vlan] . identifier[reserved_vlan] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__reserved_vlan] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_reserved_vlan(self, v, load=False):
"""
Setter method for reserved_vlan, mapped from YANG variable /reserved_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reserved_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reserved_vlan() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=reserved_vlan.reserved_vlan, is_container='container', presence=False, yang_name='reserved-vlan', rest_name='reserved-vlan', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sets the range of vlans used for internal purposes', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_VLAN_CONFIG', u'cli-suppress-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'nsmReservedVlanConfig'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'reserved_vlan must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=reserved_vlan.reserved_vlan, is_container=\'container\', presence=False, yang_name="reserved-vlan", rest_name="reserved-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Sets the range of vlans used for internal purposes\', u\'sort-priority\': u\'RUNNCFG_LEVEL_INTERFACE_VLAN_CONFIG\', u\'cli-suppress-no\': None, u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'callpoint\': u\'nsmReservedVlanConfig\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__reserved_vlan = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def settimeout(self, key, value, timeout):
"""Set a key with a timeout value (in seconds).
:meth:`settimeout` is used to override the shelf's timeout value.
:param timeout: The timeout value in seconds for the given key.
``0`` means that the key will never expire.
:type timeout: integer
"""
self[key] = value
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
self._index[key] = int(time() + timeout) if timeout else None | def function[settimeout, parameter[self, key, value, timeout]]:
constant[Set a key with a timeout value (in seconds).
:meth:`settimeout` is used to override the shelf's timeout value.
:param timeout: The timeout value in seconds for the given key.
``0`` means that the key will never expire.
:type timeout: integer
]
call[name[self]][name[key]] assign[=] name[value]
if <ast.UnaryOp object at 0x7da18f7209a0> begin[:]
return[None]
call[name[self]._index][name[key]] assign[=] <ast.IfExp object at 0x7da18f723d00> | keyword[def] identifier[settimeout] ( identifier[self] , identifier[key] , identifier[value] , identifier[timeout] ):
literal[string]
identifier[self] [ identifier[key] ]= identifier[value]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return]
identifier[self] . identifier[_index] [ identifier[key] ]= identifier[int] ( identifier[time] ()+ identifier[timeout] ) keyword[if] identifier[timeout] keyword[else] keyword[None] | def settimeout(self, key, value, timeout):
"""Set a key with a timeout value (in seconds).
:meth:`settimeout` is used to override the shelf's timeout value.
:param timeout: The timeout value in seconds for the given key.
``0`` means that the key will never expire.
:type timeout: integer
"""
self[key] = value
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed # depends on [control=['if'], data=[]]
self._index[key] = int(time() + timeout) if timeout else None |
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out | def function[_str_member_list, parameter[self, name]]:
constant[
Generate a member listing, autosummary:: table where possible,
and a table where not.
]
variable[out] assign[=] list[[]]
if call[name[self]][name[name]] begin[:]
<ast.AugAssign object at 0x7da1b0807d00>
variable[prefix] assign[=] call[name[getattr], parameter[name[self], constant[_name], constant[]]]
if name[prefix] begin[:]
variable[prefix] assign[=] binary_operation[constant[~%s.] <ast.Mod object at 0x7da2590d6920> name[prefix]]
variable[autosum] assign[=] list[[]]
variable[others] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0807790>, <ast.Name object at 0x7da1b0807760>, <ast.Name object at 0x7da1b0807730>]]] in starred[call[name[self]][name[name]]] begin[:]
variable[param] assign[=] call[name[param].strip, parameter[]]
if <ast.BoolOp object at 0x7da20e9b3ee0> begin[:]
<ast.AugAssign object at 0x7da20e9b1d80>
if name[autosum] begin[:]
<ast.AugAssign object at 0x7da20e9b1ff0>
<ast.AugAssign object at 0x7da20e9b0a60>
if name[others] begin[:]
variable[maxlen_0] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b08f8700>]]
variable[maxlen_1] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b08fbac0>]]
variable[hdr] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[=] * name[maxlen_0]] + constant[ ]] + binary_operation[constant[=] * name[maxlen_1]]] + constant[ ]] + binary_operation[constant[=] * constant[10]]]
variable[fmt] assign[=] binary_operation[constant[%%%ds %%%ds ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0807670>, <ast.Name object at 0x7da1b0807640>]]]
variable[n_indent] assign[=] binary_operation[binary_operation[name[maxlen_0] + name[maxlen_1]] + constant[4]]
<ast.AugAssign object at 0x7da1b08074c0>
for taget[tuple[[<ast.Name object at 0x7da1b08073a0>, <ast.Name object at 0x7da1b0807370>, <ast.Name object at 0x7da1b0807340>]]] in starred[name[others]] begin[:]
<ast.AugAssign object at 0x7da1b08072e0>
<ast.AugAssign object at 0x7da1b0807100>
<ast.AugAssign object at 0x7da1b0806fb0>
<ast.AugAssign object at 0x7da1b0806ef0>
return[name[out]] | keyword[def] identifier[_str_member_list] ( identifier[self] , identifier[name] ):
literal[string]
identifier[out] =[]
keyword[if] identifier[self] [ identifier[name] ]:
identifier[out] +=[ literal[string] % identifier[name] , literal[string] ]
identifier[prefix] = identifier[getattr] ( identifier[self] , literal[string] , literal[string] )
keyword[if] identifier[prefix] :
identifier[prefix] = literal[string] % identifier[prefix]
identifier[autosum] =[]
identifier[others] =[]
keyword[for] identifier[param] , identifier[param_type] , identifier[desc] keyword[in] identifier[self] [ identifier[name] ]:
identifier[param] = identifier[param] . identifier[strip] ()
keyword[if] keyword[not] identifier[self] . identifier[_obj] keyword[or] identifier[hasattr] ( identifier[self] . identifier[_obj] , identifier[param] ):
identifier[autosum] +=[ literal[string] %( identifier[prefix] , identifier[param] )]
keyword[else] :
identifier[others] . identifier[append] (( identifier[param] , identifier[param_type] , identifier[desc] ))
keyword[if] identifier[autosum] :
identifier[out] +=[ literal[string] , literal[string] ]
identifier[out] += identifier[autosum]
keyword[if] identifier[others] :
identifier[maxlen_0] = identifier[max] ([ identifier[len] ( identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[others] ])
identifier[maxlen_1] = identifier[max] ([ identifier[len] ( identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[others] ])
identifier[hdr] = literal[string] * identifier[maxlen_0] + literal[string] + literal[string] * identifier[maxlen_1] + literal[string] + literal[string] * literal[int]
identifier[fmt] = literal[string] %( identifier[maxlen_0] , identifier[maxlen_1] )
identifier[n_indent] = identifier[maxlen_0] + identifier[maxlen_1] + literal[int]
identifier[out] +=[ identifier[hdr] ]
keyword[for] identifier[param] , identifier[param_type] , identifier[desc] keyword[in] identifier[others] :
identifier[out] +=[ identifier[fmt] %( identifier[param] . identifier[strip] (), identifier[param_type] )]
identifier[out] += identifier[self] . identifier[_str_indent] ( identifier[desc] , identifier[n_indent] )
identifier[out] +=[ identifier[hdr] ]
identifier[out] +=[ literal[string] ]
keyword[return] identifier[out] | def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix # depends on [control=['if'], data=[]]
autosum = []
others = []
for (param, param_type, desc) in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [' %s%s' % (prefix, param)] # depends on [control=['if'], data=[]]
else:
others.append((param, param_type, desc)) # depends on [control=['for'], data=[]]
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum # depends on [control=['if'], data=[]]
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = '=' * maxlen_0 + ' ' + '=' * maxlen_1 + ' ' + '=' * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for (param, param_type, desc) in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent) # depends on [control=['for'], data=[]]
out += [hdr] # depends on [control=['if'], data=[]]
out += [''] # depends on [control=['if'], data=[]]
return out |
def outbound_message_filter(f):
"""
Register the decorated function as a service-level outbound message filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
raise TypeError(
"outbound_message_filter must not be a coroutine function"
)
add_handler_spec(
f,
HandlerSpec(
(_apply_outbound_message_filter, ())
),
)
return f | def function[outbound_message_filter, parameter[f]]:
constant[
Register the decorated function as a service-level outbound message filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
]
if call[name[asyncio].iscoroutinefunction, parameter[name[f]]] begin[:]
<ast.Raise object at 0x7da20c6aae90>
call[name[add_handler_spec], parameter[name[f], call[name[HandlerSpec], parameter[tuple[[<ast.Name object at 0x7da20c6a9240>, <ast.Tuple object at 0x7da20c6a8730>]]]]]]
return[name[f]] | keyword[def] identifier[outbound_message_filter] ( identifier[f] ):
literal[string]
keyword[if] identifier[asyncio] . identifier[iscoroutinefunction] ( identifier[f] ):
keyword[raise] identifier[TypeError] (
literal[string]
)
identifier[add_handler_spec] (
identifier[f] ,
identifier[HandlerSpec] (
( identifier[_apply_outbound_message_filter] ,())
),
)
keyword[return] identifier[f] | def outbound_message_filter(f):
"""
Register the decorated function as a service-level outbound message filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
raise TypeError('outbound_message_filter must not be a coroutine function') # depends on [control=['if'], data=[]]
add_handler_spec(f, HandlerSpec((_apply_outbound_message_filter, ())))
return f |
def parse_title(self, docname):
"""Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist.
"""
env = self.document.settings.env
title = self.titles.get(docname)
if title is None:
fname = os.path.join(env.srcdir, docname+'.rst')
try:
f = open(fname, 'r')
except IOError:
title = False
else:
for line in f:
if len(line) > 0 and (line[0].isalnum() or line[0] == '<'):
title = line.rstrip()
break
f.close()
if title is None:
title = os.path.basename(docname)
self.titles[docname] = title
return title | def function[parse_title, parameter[self, docname]]:
constant[Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist.
]
variable[env] assign[=] name[self].document.settings.env
variable[title] assign[=] call[name[self].titles.get, parameter[name[docname]]]
if compare[name[title] is constant[None]] begin[:]
variable[fname] assign[=] call[name[os].path.join, parameter[name[env].srcdir, binary_operation[name[docname] + constant[.rst]]]]
<ast.Try object at 0x7da1b1fa6140>
call[name[self].titles][name[docname]] assign[=] name[title]
return[name[title]] | keyword[def] identifier[parse_title] ( identifier[self] , identifier[docname] ):
literal[string]
identifier[env] = identifier[self] . identifier[document] . identifier[settings] . identifier[env]
identifier[title] = identifier[self] . identifier[titles] . identifier[get] ( identifier[docname] )
keyword[if] identifier[title] keyword[is] keyword[None] :
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[env] . identifier[srcdir] , identifier[docname] + literal[string] )
keyword[try] :
identifier[f] = identifier[open] ( identifier[fname] , literal[string] )
keyword[except] identifier[IOError] :
identifier[title] = keyword[False]
keyword[else] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] identifier[len] ( identifier[line] )> literal[int] keyword[and] ( identifier[line] [ literal[int] ]. identifier[isalnum] () keyword[or] identifier[line] [ literal[int] ]== literal[string] ):
identifier[title] = identifier[line] . identifier[rstrip] ()
keyword[break]
identifier[f] . identifier[close] ()
keyword[if] identifier[title] keyword[is] keyword[None] :
identifier[title] = identifier[os] . identifier[path] . identifier[basename] ( identifier[docname] )
identifier[self] . identifier[titles] [ identifier[docname] ]= identifier[title]
keyword[return] identifier[title] | def parse_title(self, docname):
"""Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist.
"""
env = self.document.settings.env
title = self.titles.get(docname)
if title is None:
fname = os.path.join(env.srcdir, docname + '.rst')
try:
f = open(fname, 'r') # depends on [control=['try'], data=[]]
except IOError:
title = False # depends on [control=['except'], data=[]]
else:
for line in f:
if len(line) > 0 and (line[0].isalnum() or line[0] == '<'):
title = line.rstrip()
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
f.close()
if title is None:
title = os.path.basename(docname) # depends on [control=['if'], data=['title']]
self.titles[docname] = title # depends on [control=['if'], data=['title']]
return title |
def get_format(**kwargs):
"""
Returns a `QTextCharFormat <http://doc.qt.nokia.com/qtextcharformat.html>`_ format.
:param \*\*kwargs: Format settings.
:type \*\*kwargs: dict
:return: Format.
:rtype: QTextCharFormat
"""
settings = foundations.data_structures.Structure(**{"format": QTextCharFormat(),
"background_color": None,
"color": None,
"font_weight": None,
"font_point_size": None,
"italic": False})
settings.update(kwargs)
format = QTextCharFormat(settings.format)
settings.background_color and format.setBackground(settings.background_color)
settings.color and format.setForeground(settings.color)
settings.font_weight and format.setFontWeight(settings.font_weight)
settings.font_point_size and format.setFontPointSize(settings.font_point_size)
settings.italic and format.setFontItalic(True)
return format | def function[get_format, parameter[]]:
constant[
Returns a `QTextCharFormat <http://doc.qt.nokia.com/qtextcharformat.html>`_ format.
:param \*\*kwargs: Format settings.
:type \*\*kwargs: dict
:return: Format.
:rtype: QTextCharFormat
]
variable[settings] assign[=] call[name[foundations].data_structures.Structure, parameter[]]
call[name[settings].update, parameter[name[kwargs]]]
variable[format] assign[=] call[name[QTextCharFormat], parameter[name[settings].format]]
<ast.BoolOp object at 0x7da20e956320>
<ast.BoolOp object at 0x7da20e9568f0>
<ast.BoolOp object at 0x7da20e954ee0>
<ast.BoolOp object at 0x7da20e957be0>
<ast.BoolOp object at 0x7da20e9b3fd0>
return[name[format]] | keyword[def] identifier[get_format] (** identifier[kwargs] ):
literal[string]
identifier[settings] = identifier[foundations] . identifier[data_structures] . identifier[Structure] (**{ literal[string] : identifier[QTextCharFormat] (),
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[False] })
identifier[settings] . identifier[update] ( identifier[kwargs] )
identifier[format] = identifier[QTextCharFormat] ( identifier[settings] . identifier[format] )
identifier[settings] . identifier[background_color] keyword[and] identifier[format] . identifier[setBackground] ( identifier[settings] . identifier[background_color] )
identifier[settings] . identifier[color] keyword[and] identifier[format] . identifier[setForeground] ( identifier[settings] . identifier[color] )
identifier[settings] . identifier[font_weight] keyword[and] identifier[format] . identifier[setFontWeight] ( identifier[settings] . identifier[font_weight] )
identifier[settings] . identifier[font_point_size] keyword[and] identifier[format] . identifier[setFontPointSize] ( identifier[settings] . identifier[font_point_size] )
identifier[settings] . identifier[italic] keyword[and] identifier[format] . identifier[setFontItalic] ( keyword[True] )
keyword[return] identifier[format] | def get_format(**kwargs):
"""
Returns a `QTextCharFormat <http://doc.qt.nokia.com/qtextcharformat.html>`_ format.
:param \\*\\*kwargs: Format settings.
:type \\*\\*kwargs: dict
:return: Format.
:rtype: QTextCharFormat
"""
settings = foundations.data_structures.Structure(**{'format': QTextCharFormat(), 'background_color': None, 'color': None, 'font_weight': None, 'font_point_size': None, 'italic': False})
settings.update(kwargs)
format = QTextCharFormat(settings.format)
settings.background_color and format.setBackground(settings.background_color)
settings.color and format.setForeground(settings.color)
settings.font_weight and format.setFontWeight(settings.font_weight)
settings.font_point_size and format.setFontPointSize(settings.font_point_size)
settings.italic and format.setFontItalic(True)
return format |
def build_items(self):
u"""This method called by Executer.
/proc/net/tcp -> {host:host, key:key, value:value, clock:clock}
"""
protocols = ['tcp', 'tcp6']
for protocol in protocols:
procfile = open('/proc/net/{0}'.format(protocol), 'r')
stats = self.count(procfile)
for key, value in stats.items():
item = NetstatItem(key=key,
value=value,
host=self.hostname
)
self.queue.put(item, block=False) | def function[build_items, parameter[self]]:
constant[This method called by Executer.
/proc/net/tcp -> {host:host, key:key, value:value, clock:clock}
]
variable[protocols] assign[=] list[[<ast.Constant object at 0x7da2046228f0>, <ast.Constant object at 0x7da204623550>]]
for taget[name[protocol]] in starred[name[protocols]] begin[:]
variable[procfile] assign[=] call[name[open], parameter[call[constant[/proc/net/{0}].format, parameter[name[protocol]]], constant[r]]]
variable[stats] assign[=] call[name[self].count, parameter[name[procfile]]]
for taget[tuple[[<ast.Name object at 0x7da20c6a84c0>, <ast.Name object at 0x7da20c6a8e20>]]] in starred[call[name[stats].items, parameter[]]] begin[:]
variable[item] assign[=] call[name[NetstatItem], parameter[]]
call[name[self].queue.put, parameter[name[item]]] | keyword[def] identifier[build_items] ( identifier[self] ):
literal[string]
identifier[protocols] =[ literal[string] , literal[string] ]
keyword[for] identifier[protocol] keyword[in] identifier[protocols] :
identifier[procfile] = identifier[open] ( literal[string] . identifier[format] ( identifier[protocol] ), literal[string] )
identifier[stats] = identifier[self] . identifier[count] ( identifier[procfile] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[stats] . identifier[items] ():
identifier[item] = identifier[NetstatItem] ( identifier[key] = identifier[key] ,
identifier[value] = identifier[value] ,
identifier[host] = identifier[self] . identifier[hostname]
)
identifier[self] . identifier[queue] . identifier[put] ( identifier[item] , identifier[block] = keyword[False] ) | def build_items(self):
u"""This method called by Executer.
/proc/net/tcp -> {host:host, key:key, value:value, clock:clock}
"""
protocols = ['tcp', 'tcp6']
for protocol in protocols:
procfile = open('/proc/net/{0}'.format(protocol), 'r')
stats = self.count(procfile)
for (key, value) in stats.items():
item = NetstatItem(key=key, value=value, host=self.hostname)
self.queue.put(item, block=False) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['protocol']] |
def segmentAcceptable(RCV_NXT, RCV_WND, SEG_SEQ, SEG_LEN):
"""
An acceptable segment: RFC 793 page 26.
"""
if SEG_LEN == 0 and RCV_WND == 0:
return SEG_SEQ == RCV_NXT
if SEG_LEN == 0 and RCV_WND > 0:
return ((RCV_NXT <= SEG_SEQ) and (SEG_SEQ < RCV_NXT + RCV_WND))
if SEG_LEN > 0 and RCV_WND == 0:
return False
if SEG_LEN > 0 and RCV_WND > 0:
return (( (RCV_NXT <= SEG_SEQ) and (SEG_SEQ < RCV_NXT + RCV_WND))
or ((RCV_NXT <= SEG_SEQ+SEG_LEN-1) and
(SEG_SEQ+SEG_LEN-1 < RCV_NXT + RCV_WND)))
assert 0, 'Should be impossible to get here.'
return False | def function[segmentAcceptable, parameter[RCV_NXT, RCV_WND, SEG_SEQ, SEG_LEN]]:
constant[
An acceptable segment: RFC 793 page 26.
]
if <ast.BoolOp object at 0x7da20c6c59f0> begin[:]
return[compare[name[SEG_SEQ] equal[==] name[RCV_NXT]]]
if <ast.BoolOp object at 0x7da20c6c5060> begin[:]
return[<ast.BoolOp object at 0x7da18eb57190>]
if <ast.BoolOp object at 0x7da18eb54c10> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da18eb55780> begin[:]
return[<ast.BoolOp object at 0x7da18eb57d30>]
assert[constant[0]]
return[constant[False]] | keyword[def] identifier[segmentAcceptable] ( identifier[RCV_NXT] , identifier[RCV_WND] , identifier[SEG_SEQ] , identifier[SEG_LEN] ):
literal[string]
keyword[if] identifier[SEG_LEN] == literal[int] keyword[and] identifier[RCV_WND] == literal[int] :
keyword[return] identifier[SEG_SEQ] == identifier[RCV_NXT]
keyword[if] identifier[SEG_LEN] == literal[int] keyword[and] identifier[RCV_WND] > literal[int] :
keyword[return] (( identifier[RCV_NXT] <= identifier[SEG_SEQ] ) keyword[and] ( identifier[SEG_SEQ] < identifier[RCV_NXT] + identifier[RCV_WND] ))
keyword[if] identifier[SEG_LEN] > literal[int] keyword[and] identifier[RCV_WND] == literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[SEG_LEN] > literal[int] keyword[and] identifier[RCV_WND] > literal[int] :
keyword[return] ((( identifier[RCV_NXT] <= identifier[SEG_SEQ] ) keyword[and] ( identifier[SEG_SEQ] < identifier[RCV_NXT] + identifier[RCV_WND] ))
keyword[or] (( identifier[RCV_NXT] <= identifier[SEG_SEQ] + identifier[SEG_LEN] - literal[int] ) keyword[and]
( identifier[SEG_SEQ] + identifier[SEG_LEN] - literal[int] < identifier[RCV_NXT] + identifier[RCV_WND] )))
keyword[assert] literal[int] , literal[string]
keyword[return] keyword[False] | def segmentAcceptable(RCV_NXT, RCV_WND, SEG_SEQ, SEG_LEN):
"""
An acceptable segment: RFC 793 page 26.
"""
if SEG_LEN == 0 and RCV_WND == 0:
return SEG_SEQ == RCV_NXT # depends on [control=['if'], data=[]]
if SEG_LEN == 0 and RCV_WND > 0:
return RCV_NXT <= SEG_SEQ and SEG_SEQ < RCV_NXT + RCV_WND # depends on [control=['if'], data=[]]
if SEG_LEN > 0 and RCV_WND == 0:
return False # depends on [control=['if'], data=[]]
if SEG_LEN > 0 and RCV_WND > 0:
return RCV_NXT <= SEG_SEQ and SEG_SEQ < RCV_NXT + RCV_WND or (RCV_NXT <= SEG_SEQ + SEG_LEN - 1 and SEG_SEQ + SEG_LEN - 1 < RCV_NXT + RCV_WND) # depends on [control=['if'], data=[]]
assert 0, 'Should be impossible to get here.'
return False |
def to_dict(self) -> Dict:
""" Export the CAG to a dict that can be serialized to JSON. """
return {
"name": self.name,
"dateCreated": str(self.dateCreated),
"variables": lmap(
lambda n: self.export_node(n), self.nodes(data=True)
),
"timeStep": str(self.Δt),
"edge_data": lmap(export_edge, self.edges(data=True)),
} | def function[to_dict, parameter[self]]:
constant[ Export the CAG to a dict that can be serialized to JSON. ]
return[dictionary[[<ast.Constant object at 0x7da18f58c070>, <ast.Constant object at 0x7da18f58c6a0>, <ast.Constant object at 0x7da18f58e2f0>, <ast.Constant object at 0x7da18f58f250>, <ast.Constant object at 0x7da18f58ea40>], [<ast.Attribute object at 0x7da18f58f670>, <ast.Call object at 0x7da18f58c850>, <ast.Call object at 0x7da18f58d330>, <ast.Call object at 0x7da18f58d510>, <ast.Call object at 0x7da18f58e1d0>]]] | keyword[def] identifier[to_dict] ( identifier[self] )-> identifier[Dict] :
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[str] ( identifier[self] . identifier[dateCreated] ),
literal[string] : identifier[lmap] (
keyword[lambda] identifier[n] : identifier[self] . identifier[export_node] ( identifier[n] ), identifier[self] . identifier[nodes] ( identifier[data] = keyword[True] )
),
literal[string] : identifier[str] ( identifier[self] . identifier[Δt] ),
literal[string] : identifier[lmap] ( identifier[export_edge] , identifier[self] . identifier[edges] ( identifier[data] = keyword[True] )),
} | def to_dict(self) -> Dict:
""" Export the CAG to a dict that can be serialized to JSON. """
return {'name': self.name, 'dateCreated': str(self.dateCreated), 'variables': lmap(lambda n: self.export_node(n), self.nodes(data=True)), 'timeStep': str(self.Δt), 'edge_data': lmap(export_edge, self.edges(data=True))} |
def fromgroups(args):
"""
%prog fromgroups groupsfile a.bed b.bed ...
Flatten the gene familes into pairs, the groupsfile is a file with each line
containing the members, separated by comma. The commands also require
several bed files in order to sort the pairs into different piles (e.g.
pairs of species in comparison.
"""
from jcvi.formats.bed import Bed
p = OptionParser(fromgroups.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
groupsfile = args[0]
bedfiles = args[1:]
beds = [Bed(x) for x in bedfiles]
fp = open(groupsfile)
groups = [row.strip().split(",") for row in fp]
for b1, b2 in product(beds, repeat=2):
extract_pairs(b1, b2, groups) | def function[fromgroups, parameter[args]]:
constant[
%prog fromgroups groupsfile a.bed b.bed ...
Flatten the gene familes into pairs, the groupsfile is a file with each line
containing the members, separated by comma. The commands also require
several bed files in order to sort the pairs into different piles (e.g.
pairs of species in comparison.
]
from relative_module[jcvi.formats.bed] import module[Bed]
variable[p] assign[=] call[name[OptionParser], parameter[name[fromgroups].__doc__]]
<ast.Tuple object at 0x7da20c76d480> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c76cf70>]]
variable[groupsfile] assign[=] call[name[args]][constant[0]]
variable[bedfiles] assign[=] call[name[args]][<ast.Slice object at 0x7da20c76df30>]
variable[beds] assign[=] <ast.ListComp object at 0x7da20c76f6a0>
variable[fp] assign[=] call[name[open], parameter[name[groupsfile]]]
variable[groups] assign[=] <ast.ListComp object at 0x7da20c76e200>
for taget[tuple[[<ast.Name object at 0x7da20c76d750>, <ast.Name object at 0x7da20c76d540>]]] in starred[call[name[product], parameter[name[beds]]]] begin[:]
call[name[extract_pairs], parameter[name[b1], name[b2], name[groups]]] | keyword[def] identifier[fromgroups] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[bed] keyword[import] identifier[Bed]
identifier[p] = identifier[OptionParser] ( identifier[fromgroups] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[groupsfile] = identifier[args] [ literal[int] ]
identifier[bedfiles] = identifier[args] [ literal[int] :]
identifier[beds] =[ identifier[Bed] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[bedfiles] ]
identifier[fp] = identifier[open] ( identifier[groupsfile] )
identifier[groups] =[ identifier[row] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[for] identifier[row] keyword[in] identifier[fp] ]
keyword[for] identifier[b1] , identifier[b2] keyword[in] identifier[product] ( identifier[beds] , identifier[repeat] = literal[int] ):
identifier[extract_pairs] ( identifier[b1] , identifier[b2] , identifier[groups] ) | def fromgroups(args):
"""
%prog fromgroups groupsfile a.bed b.bed ...
Flatten the gene familes into pairs, the groupsfile is a file with each line
containing the members, separated by comma. The commands also require
several bed files in order to sort the pairs into different piles (e.g.
pairs of species in comparison.
"""
from jcvi.formats.bed import Bed
p = OptionParser(fromgroups.__doc__)
(opts, args) = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
groupsfile = args[0]
bedfiles = args[1:]
beds = [Bed(x) for x in bedfiles]
fp = open(groupsfile)
groups = [row.strip().split(',') for row in fp]
for (b1, b2) in product(beds, repeat=2):
extract_pairs(b1, b2, groups) # depends on [control=['for'], data=[]] |
def _strvar(a, prec='{:G}'):
r"""Return variable as a string to print, with given precision."""
return ' '.join([prec.format(i) for i in np.atleast_1d(a)]) | def function[_strvar, parameter[a, prec]]:
constant[Return variable as a string to print, with given precision.]
return[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da18ede5210>]]] | keyword[def] identifier[_strvar] ( identifier[a] , identifier[prec] = literal[string] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[prec] . identifier[format] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[np] . identifier[atleast_1d] ( identifier[a] )]) | def _strvar(a, prec='{:G}'):
"""Return variable as a string to print, with given precision."""
return ' '.join([prec.format(i) for i in np.atleast_1d(a)]) |
def p_iteration_statement_1(self, p):
"""
iteration_statement \
: DO statement WHILE LPAREN expr RPAREN SEMI
| DO statement WHILE LPAREN expr RPAREN AUTOSEMI
"""
p[0] = self.asttypes.DoWhile(predicate=p[5], statement=p[2])
p[0].setpos(p) | def function[p_iteration_statement_1, parameter[self, p]]:
constant[
iteration_statement : DO statement WHILE LPAREN expr RPAREN SEMI
| DO statement WHILE LPAREN expr RPAREN AUTOSEMI
]
call[name[p]][constant[0]] assign[=] call[name[self].asttypes.DoWhile, parameter[]]
call[call[name[p]][constant[0]].setpos, parameter[name[p]]] | keyword[def] identifier[p_iteration_statement_1] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[self] . identifier[asttypes] . identifier[DoWhile] ( identifier[predicate] = identifier[p] [ literal[int] ], identifier[statement] = identifier[p] [ literal[int] ])
identifier[p] [ literal[int] ]. identifier[setpos] ( identifier[p] ) | def p_iteration_statement_1(self, p):
"""
iteration_statement : DO statement WHILE LPAREN expr RPAREN SEMI
| DO statement WHILE LPAREN expr RPAREN AUTOSEMI
"""
p[0] = self.asttypes.DoWhile(predicate=p[5], statement=p[2])
p[0].setpos(p) |
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for _ in range(0, response_size):
response_item = client_message.read_data()
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters | def function[decode_response, parameter[client_message, to_object]]:
constant[ Decode response from client message]
variable[parameters] assign[=] call[name[dict], parameter[]]
variable[response_size] assign[=] call[name[client_message].read_int, parameter[]]
variable[response] assign[=] list[[]]
for taget[name[_]] in starred[call[name[range], parameter[constant[0], name[response_size]]]] begin[:]
variable[response_item] assign[=] call[name[client_message].read_data, parameter[]]
call[name[response].append, parameter[name[response_item]]]
call[name[parameters]][constant[response]] assign[=] call[name[ImmutableLazyDataList], parameter[name[response], name[to_object]]]
return[name[parameters]] | keyword[def] identifier[decode_response] ( identifier[client_message] , identifier[to_object] = keyword[None] ):
literal[string]
identifier[parameters] = identifier[dict] ( identifier[response] = keyword[None] )
identifier[response_size] = identifier[client_message] . identifier[read_int] ()
identifier[response] =[]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[response_size] ):
identifier[response_item] = identifier[client_message] . identifier[read_data] ()
identifier[response] . identifier[append] ( identifier[response_item] )
identifier[parameters] [ literal[string] ]= identifier[ImmutableLazyDataList] ( identifier[response] , identifier[to_object] )
keyword[return] identifier[parameters] | def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for _ in range(0, response_size):
response_item = client_message.read_data()
response.append(response_item) # depends on [control=['for'], data=[]]
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters |
def modify_site(name, sourcepath=None, apppool=None, preload=None):
'''
Modify a basic website in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The IIS site name.
sourcepath (str): The physical path of the IIS site.
apppool (str): The name of the IIS application pool.
preload (bool): Whether preloading should be enabled
Returns:
bool: True if successful, otherwise False.
.. note::
If an application pool is specified, and that application pool does not
already exist, it will be created.
CLI Example:
.. code-block:: bash
salt '*' win_iis.modify_site name='My Test Site' sourcepath='c:\\new_path' apppool='NewTestPool' preload=True
'''
site_path = r'IIS:\Sites\{0}'.format(name)
current_sites = list_sites()
if name not in current_sites:
log.debug("Site '%s' not defined.", name)
return False
ps_cmd = list()
if sourcepath:
ps_cmd.extend(['Set-ItemProperty',
'-Path', r"'{0}'".format(site_path),
'-Name', 'PhysicalPath',
'-Value', r"'{0}'".format(sourcepath)])
if apppool:
if apppool in list_apppools():
log.debug('Utilizing pre-existing application pool: %s', apppool)
else:
log.debug('Application pool will be created: %s', apppool)
create_apppool(apppool)
# If ps_cmd isn't empty, we need to add a semi-colon to run two commands
if ps_cmd:
ps_cmd.append(';')
ps_cmd.extend(['Set-ItemProperty',
'-Path', r"'{0}'".format(site_path),
'-Name', 'ApplicationPool',
'-Value', r"'{0}'".format(apppool)])
if preload:
ps_cmd.extend(['Set-ItemProperty',
'-Path', "'{0}'".format(site_path),
'-Name', 'applicationDefaults.preloadEnabled',
'-Value', "{0};".format(preload)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify site: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Site modified successfully: %s', name)
return True | def function[modify_site, parameter[name, sourcepath, apppool, preload]]:
constant[
Modify a basic website in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The IIS site name.
sourcepath (str): The physical path of the IIS site.
apppool (str): The name of the IIS application pool.
preload (bool): Whether preloading should be enabled
Returns:
bool: True if successful, otherwise False.
.. note::
If an application pool is specified, and that application pool does not
already exist, it will be created.
CLI Example:
.. code-block:: bash
salt '*' win_iis.modify_site name='My Test Site' sourcepath='c:\new_path' apppool='NewTestPool' preload=True
]
variable[site_path] assign[=] call[constant[IIS:\Sites\{0}].format, parameter[name[name]]]
variable[current_sites] assign[=] call[name[list_sites], parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[current_sites]] begin[:]
call[name[log].debug, parameter[constant[Site '%s' not defined.], name[name]]]
return[constant[False]]
variable[ps_cmd] assign[=] call[name[list], parameter[]]
if name[sourcepath] begin[:]
call[name[ps_cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2135300>, <ast.Constant object at 0x7da1b2134dc0>, <ast.Call object at 0x7da1b2135870>, <ast.Constant object at 0x7da1b2136fb0>, <ast.Constant object at 0x7da1b21374c0>, <ast.Constant object at 0x7da1b2134340>, <ast.Call object at 0x7da1b2136f50>]]]]
if name[apppool] begin[:]
if compare[name[apppool] in call[name[list_apppools], parameter[]]] begin[:]
call[name[log].debug, parameter[constant[Utilizing pre-existing application pool: %s], name[apppool]]]
if name[ps_cmd] begin[:]
call[name[ps_cmd].append, parameter[constant[;]]]
call[name[ps_cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b1c05870>, <ast.Constant object at 0x7da1b1c07eb0>, <ast.Call object at 0x7da1b1c07640>, <ast.Constant object at 0x7da1b1c053f0>, <ast.Constant object at 0x7da1b1c06c20>, <ast.Constant object at 0x7da1b1c053c0>, <ast.Call object at 0x7da1b1c056f0>]]]]
if name[preload] begin[:]
call[name[ps_cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b1c05bd0>, <ast.Constant object at 0x7da1b1c07d00>, <ast.Call object at 0x7da1b1c05900>, <ast.Constant object at 0x7da1b1c04640>, <ast.Constant object at 0x7da1b1c06ad0>, <ast.Constant object at 0x7da1b1c05780>, <ast.Call object at 0x7da1b1c04be0>]]]]
variable[cmd_ret] assign[=] call[name[_srvmgr], parameter[name[ps_cmd]]]
if compare[call[name[cmd_ret]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] call[constant[Unable to modify site: {0}
Error: {1}].format, parameter[name[name], call[name[cmd_ret]][constant[stderr]]]]
<ast.Raise object at 0x7da1b1c05690>
call[name[log].debug, parameter[constant[Site modified successfully: %s], name[name]]]
return[constant[True]] | keyword[def] identifier[modify_site] ( identifier[name] , identifier[sourcepath] = keyword[None] , identifier[apppool] = keyword[None] , identifier[preload] = keyword[None] ):
literal[string]
identifier[site_path] = literal[string] . identifier[format] ( identifier[name] )
identifier[current_sites] = identifier[list_sites] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[current_sites] :
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[False]
identifier[ps_cmd] = identifier[list] ()
keyword[if] identifier[sourcepath] :
identifier[ps_cmd] . identifier[extend] ([ literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[site_path] ),
literal[string] , literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[sourcepath] )])
keyword[if] identifier[apppool] :
keyword[if] identifier[apppool] keyword[in] identifier[list_apppools] ():
identifier[log] . identifier[debug] ( literal[string] , identifier[apppool] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[apppool] )
identifier[create_apppool] ( identifier[apppool] )
keyword[if] identifier[ps_cmd] :
identifier[ps_cmd] . identifier[append] ( literal[string] )
identifier[ps_cmd] . identifier[extend] ([ literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[site_path] ),
literal[string] , literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[apppool] )])
keyword[if] identifier[preload] :
identifier[ps_cmd] . identifier[extend] ([ literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[site_path] ),
literal[string] , literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[preload] )])
identifier[cmd_ret] = identifier[_srvmgr] ( identifier[ps_cmd] )
keyword[if] identifier[cmd_ret] [ literal[string] ]!= literal[int] :
identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[cmd_ret] [ literal[string] ])
keyword[raise] identifier[CommandExecutionError] ( identifier[msg] )
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[True] | def modify_site(name, sourcepath=None, apppool=None, preload=None):
"""
Modify a basic website in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The IIS site name.
sourcepath (str): The physical path of the IIS site.
apppool (str): The name of the IIS application pool.
preload (bool): Whether preloading should be enabled
Returns:
bool: True if successful, otherwise False.
.. note::
If an application pool is specified, and that application pool does not
already exist, it will be created.
CLI Example:
.. code-block:: bash
salt '*' win_iis.modify_site name='My Test Site' sourcepath='c:\\new_path' apppool='NewTestPool' preload=True
"""
site_path = 'IIS:\\Sites\\{0}'.format(name)
current_sites = list_sites()
if name not in current_sites:
log.debug("Site '%s' not defined.", name)
return False # depends on [control=['if'], data=['name']]
ps_cmd = list()
if sourcepath:
ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(site_path), '-Name', 'PhysicalPath', '-Value', "'{0}'".format(sourcepath)]) # depends on [control=['if'], data=[]]
if apppool:
if apppool in list_apppools():
log.debug('Utilizing pre-existing application pool: %s', apppool) # depends on [control=['if'], data=['apppool']]
else:
log.debug('Application pool will be created: %s', apppool)
create_apppool(apppool)
# If ps_cmd isn't empty, we need to add a semi-colon to run two commands
if ps_cmd:
ps_cmd.append(';') # depends on [control=['if'], data=[]]
ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(site_path), '-Name', 'ApplicationPool', '-Value', "'{0}'".format(apppool)]) # depends on [control=['if'], data=[]]
if preload:
ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(site_path), '-Name', 'applicationDefaults.preloadEnabled', '-Value', '{0};'.format(preload)]) # depends on [control=['if'], data=[]]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify site: {0}\nError: {1}'.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg) # depends on [control=['if'], data=[]]
log.debug('Site modified successfully: %s', name)
return True |
def transform_generator(fn):
"""A decorator that marks transform pipes that should be called to create the real transform"""
if six.PY2:
fn.func_dict['is_transform_generator'] = True
else:
# py3
fn.__dict__['is_transform_generator'] = True
return fn | def function[transform_generator, parameter[fn]]:
constant[A decorator that marks transform pipes that should be called to create the real transform]
if name[six].PY2 begin[:]
call[name[fn].func_dict][constant[is_transform_generator]] assign[=] constant[True]
return[name[fn]] | keyword[def] identifier[transform_generator] ( identifier[fn] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] :
identifier[fn] . identifier[func_dict] [ literal[string] ]= keyword[True]
keyword[else] :
identifier[fn] . identifier[__dict__] [ literal[string] ]= keyword[True]
keyword[return] identifier[fn] | def transform_generator(fn):
"""A decorator that marks transform pipes that should be called to create the real transform"""
if six.PY2:
fn.func_dict['is_transform_generator'] = True # depends on [control=['if'], data=[]]
else:
# py3
fn.__dict__['is_transform_generator'] = True
return fn |
def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None):
"""
Get the summary of exceptions for component_name and list of instances.
Empty instance list will fetch all exceptions.
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
exception_request = tmaster_pb2.ExceptionLogRequest()
exception_request.component_name = component_name
if len(instances) > 0:
exception_request.instances.extend(instances)
request_str = exception_request.SerializeToString()
port = str(tmaster.stats_port)
host = tmaster.host
url = "http://{0}:{1}/exceptionsummary".format(host, port)
Log.debug("Creating request object.")
request = tornado.httpclient.HTTPRequest(url,
body=request_str,
method='POST',
request_timeout=5)
Log.debug('Making HTTP call to fetch exceptionsummary url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting exceptions from Tmaster, code: " + responseCode
Log.error(message)
raise tornado.gen.Return({
"message": message
})
# Parse the response from tmaster.
exception_response = tmaster_pb2.ExceptionLogResponse()
exception_response.ParseFromString(result.body)
if exception_response.status.status == common_pb2.NOTOK:
if exception_response.status.HasField("message"):
raise tornado.gen.Return({
"message": exception_response.status.message
})
# Send response
ret = []
for exception_log in exception_response.exceptions:
ret.append({'class_name': exception_log.stacktrace,
'lasttime': exception_log.lasttime,
'firsttime': exception_log.firsttime,
'count': str(exception_log.count)})
raise tornado.gen.Return(ret) | def function[getComponentExceptionSummary, parameter[self, tmaster, component_name, instances, callback]]:
constant[
Get the summary of exceptions for component_name and list of instances.
Empty instance list will fetch all exceptions.
]
if <ast.BoolOp object at 0x7da2054a4160> begin[:]
return[None]
variable[exception_request] assign[=] call[name[tmaster_pb2].ExceptionLogRequest, parameter[]]
name[exception_request].component_name assign[=] name[component_name]
if compare[call[name[len], parameter[name[instances]]] greater[>] constant[0]] begin[:]
call[name[exception_request].instances.extend, parameter[name[instances]]]
variable[request_str] assign[=] call[name[exception_request].SerializeToString, parameter[]]
variable[port] assign[=] call[name[str], parameter[name[tmaster].stats_port]]
variable[host] assign[=] name[tmaster].host
variable[url] assign[=] call[constant[http://{0}:{1}/exceptionsummary].format, parameter[name[host], name[port]]]
call[name[Log].debug, parameter[constant[Creating request object.]]]
variable[request] assign[=] call[name[tornado].httpclient.HTTPRequest, parameter[name[url]]]
call[name[Log].debug, parameter[constant[Making HTTP call to fetch exceptionsummary url: %s], name[url]]]
<ast.Try object at 0x7da2054a4ac0>
variable[responseCode] assign[=] name[result].code
if compare[name[responseCode] greater_or_equal[>=] constant[400]] begin[:]
variable[message] assign[=] binary_operation[constant[Error in getting exceptions from Tmaster, code: ] + name[responseCode]]
call[name[Log].error, parameter[name[message]]]
<ast.Raise object at 0x7da2054a7850>
variable[exception_response] assign[=] call[name[tmaster_pb2].ExceptionLogResponse, parameter[]]
call[name[exception_response].ParseFromString, parameter[name[result].body]]
if compare[name[exception_response].status.status equal[==] name[common_pb2].NOTOK] begin[:]
if call[name[exception_response].status.HasField, parameter[constant[message]]] begin[:]
<ast.Raise object at 0x7da2054a6920>
variable[ret] assign[=] list[[]]
for taget[name[exception_log]] in starred[name[exception_response].exceptions] begin[:]
call[name[ret].append, parameter[dictionary[[<ast.Constant object at 0x7da2054a6c20>, <ast.Constant object at 0x7da2054a4700>, <ast.Constant object at 0x7da2054a5c30>, <ast.Constant object at 0x7da2054a6ec0>], [<ast.Attribute object at 0x7da2054a6ad0>, <ast.Attribute object at 0x7da2054a5930>, <ast.Attribute object at 0x7da2054a5e40>, <ast.Call object at 0x7da2054a45e0>]]]]
<ast.Raise object at 0x7da2054a4f70> | keyword[def] identifier[getComponentExceptionSummary] ( identifier[self] , identifier[tmaster] , identifier[component_name] , identifier[instances] =[], identifier[callback] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[tmaster] keyword[or] keyword[not] identifier[tmaster] . identifier[host] keyword[or] keyword[not] identifier[tmaster] . identifier[stats_port] :
keyword[return]
identifier[exception_request] = identifier[tmaster_pb2] . identifier[ExceptionLogRequest] ()
identifier[exception_request] . identifier[component_name] = identifier[component_name]
keyword[if] identifier[len] ( identifier[instances] )> literal[int] :
identifier[exception_request] . identifier[instances] . identifier[extend] ( identifier[instances] )
identifier[request_str] = identifier[exception_request] . identifier[SerializeToString] ()
identifier[port] = identifier[str] ( identifier[tmaster] . identifier[stats_port] )
identifier[host] = identifier[tmaster] . identifier[host]
identifier[url] = literal[string] . identifier[format] ( identifier[host] , identifier[port] )
identifier[Log] . identifier[debug] ( literal[string] )
identifier[request] = identifier[tornado] . identifier[httpclient] . identifier[HTTPRequest] ( identifier[url] ,
identifier[body] = identifier[request_str] ,
identifier[method] = literal[string] ,
identifier[request_timeout] = literal[int] )
identifier[Log] . identifier[debug] ( literal[string] , identifier[url] )
keyword[try] :
identifier[client] = identifier[tornado] . identifier[httpclient] . identifier[AsyncHTTPClient] ()
identifier[result] = keyword[yield] identifier[client] . identifier[fetch] ( identifier[request] )
identifier[Log] . identifier[debug] ( literal[string] )
keyword[except] identifier[tornado] . identifier[httpclient] . identifier[HTTPError] keyword[as] identifier[e] :
keyword[raise] identifier[Exception] ( identifier[str] ( identifier[e] ))
identifier[responseCode] = identifier[result] . identifier[code]
keyword[if] identifier[responseCode] >= literal[int] :
identifier[message] = literal[string] + identifier[responseCode]
identifier[Log] . identifier[error] ( identifier[message] )
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ({
literal[string] : identifier[message]
})
identifier[exception_response] = identifier[tmaster_pb2] . identifier[ExceptionLogResponse] ()
identifier[exception_response] . identifier[ParseFromString] ( identifier[result] . identifier[body] )
keyword[if] identifier[exception_response] . identifier[status] . identifier[status] == identifier[common_pb2] . identifier[NOTOK] :
keyword[if] identifier[exception_response] . identifier[status] . identifier[HasField] ( literal[string] ):
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ({
literal[string] : identifier[exception_response] . identifier[status] . identifier[message]
})
identifier[ret] =[]
keyword[for] identifier[exception_log] keyword[in] identifier[exception_response] . identifier[exceptions] :
identifier[ret] . identifier[append] ({ literal[string] : identifier[exception_log] . identifier[stacktrace] ,
literal[string] : identifier[exception_log] . identifier[lasttime] ,
literal[string] : identifier[exception_log] . identifier[firsttime] ,
literal[string] : identifier[str] ( identifier[exception_log] . identifier[count] )})
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( identifier[ret] ) | def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None):
"""
Get the summary of exceptions for component_name and list of instances.
Empty instance list will fetch all exceptions.
"""
if not tmaster or not tmaster.host or (not tmaster.stats_port):
return # depends on [control=['if'], data=[]]
exception_request = tmaster_pb2.ExceptionLogRequest()
exception_request.component_name = component_name
if len(instances) > 0:
exception_request.instances.extend(instances) # depends on [control=['if'], data=[]]
request_str = exception_request.SerializeToString()
port = str(tmaster.stats_port)
host = tmaster.host
url = 'http://{0}:{1}/exceptionsummary'.format(host, port)
Log.debug('Creating request object.')
request = tornado.httpclient.HTTPRequest(url, body=request_str, method='POST', request_timeout=5)
Log.debug('Making HTTP call to fetch exceptionsummary url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = (yield client.fetch(request))
Log.debug('HTTP call complete.') # depends on [control=['try'], data=[]]
except tornado.httpclient.HTTPError as e:
raise Exception(str(e)) # depends on [control=['except'], data=['e']]
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = 'Error in getting exceptions from Tmaster, code: ' + responseCode
Log.error(message)
raise tornado.gen.Return({'message': message}) # depends on [control=['if'], data=['responseCode']]
# Parse the response from tmaster.
exception_response = tmaster_pb2.ExceptionLogResponse()
exception_response.ParseFromString(result.body)
if exception_response.status.status == common_pb2.NOTOK:
if exception_response.status.HasField('message'):
raise tornado.gen.Return({'message': exception_response.status.message}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Send response
ret = []
for exception_log in exception_response.exceptions:
ret.append({'class_name': exception_log.stacktrace, 'lasttime': exception_log.lasttime, 'firsttime': exception_log.firsttime, 'count': str(exception_log.count)}) # depends on [control=['for'], data=['exception_log']]
raise tornado.gen.Return(ret) |
def create(cls, options, session, build_root=None, exclude_patterns=None, tags=None):
"""
:param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root.
"""
# Determine the literal target roots.
spec_roots = cls.parse_specs(
target_specs=options.target_specs,
build_root=build_root,
exclude_patterns=exclude_patterns,
tags=tags)
# Determine `Changed` arguments directly from options to support pre-`Subsystem`
# initialization paths.
changed_options = options.for_scope('changed')
changed_request = ChangedRequest.from_options(changed_options)
# Determine the `--owner-of=` arguments provided from the global options
owned_files = options.for_global_scope().owner_of
logger.debug('spec_roots are: %s', spec_roots)
logger.debug('changed_request is: %s', changed_request)
logger.debug('owned_files are: %s', owned_files)
targets_specified = sum(1 for item
in (changed_request.is_actionable(), owned_files, spec_roots.dependencies)
if item)
if targets_specified > 1:
# We've been provided more than one of: a change request, an owner request, or spec roots.
raise InvalidSpecConstraint(
'Multiple target selection methods provided. Please use only one of '
'--changed-*, --owner-of, or target specs'
)
if changed_request.is_actionable():
scm = get_scm()
if not scm:
raise InvalidSpecConstraint(
'The --changed-* options are not available without a recognized SCM (usually git).'
)
changed_files = cls.changed_files(
scm,
changes_since=changed_request.changes_since,
diffspec=changed_request.diffspec)
# We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(changed_files),
include_dependees=str(changed_request.include_dependees))
changed_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug('changed addresses: %s', changed_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in changed_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
if owned_files:
# We've been provided no spec roots (e.g. `./pants list`) AND a owner request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(owned_files), include_dependees=str('none'))
owner_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug('owner addresses: %s', owner_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in owner_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
return TargetRoots(spec_roots) | def function[create, parameter[cls, options, session, build_root, exclude_patterns, tags]]:
constant[
:param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root.
]
variable[spec_roots] assign[=] call[name[cls].parse_specs, parameter[]]
variable[changed_options] assign[=] call[name[options].for_scope, parameter[constant[changed]]]
variable[changed_request] assign[=] call[name[ChangedRequest].from_options, parameter[name[changed_options]]]
variable[owned_files] assign[=] call[name[options].for_global_scope, parameter[]].owner_of
call[name[logger].debug, parameter[constant[spec_roots are: %s], name[spec_roots]]]
call[name[logger].debug, parameter[constant[changed_request is: %s], name[changed_request]]]
call[name[logger].debug, parameter[constant[owned_files are: %s], name[owned_files]]]
variable[targets_specified] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b1eedf30>]]
if compare[name[targets_specified] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1eec670>
if call[name[changed_request].is_actionable, parameter[]] begin[:]
variable[scm] assign[=] call[name[get_scm], parameter[]]
if <ast.UnaryOp object at 0x7da1b1eee2c0> begin[:]
<ast.Raise object at 0x7da1b1eef8b0>
variable[changed_files] assign[=] call[name[cls].changed_files, parameter[name[scm]]]
variable[request] assign[=] call[name[OwnersRequest], parameter[]]
<ast.Tuple object at 0x7da1b1eefaf0> assign[=] call[name[session].product_request, parameter[name[BuildFileAddresses], list[[<ast.Name object at 0x7da1b1eed0c0>]]]]
call[name[logger].debug, parameter[constant[changed addresses: %s], name[changed_addresses]]]
variable[dependencies] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1eed1b0>]]
return[call[name[TargetRoots], parameter[call[name[Specs], parameter[]]]]]
if name[owned_files] begin[:]
variable[request] assign[=] call[name[OwnersRequest], parameter[]]
<ast.Tuple object at 0x7da1b1eeec50> assign[=] call[name[session].product_request, parameter[name[BuildFileAddresses], list[[<ast.Name object at 0x7da1b1eec7f0>]]]]
call[name[logger].debug, parameter[constant[owner addresses: %s], name[owner_addresses]]]
variable[dependencies] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1eed810>]]
return[call[name[TargetRoots], parameter[call[name[Specs], parameter[]]]]]
return[call[name[TargetRoots], parameter[name[spec_roots]]]] | keyword[def] identifier[create] ( identifier[cls] , identifier[options] , identifier[session] , identifier[build_root] = keyword[None] , identifier[exclude_patterns] = keyword[None] , identifier[tags] = keyword[None] ):
literal[string]
identifier[spec_roots] = identifier[cls] . identifier[parse_specs] (
identifier[target_specs] = identifier[options] . identifier[target_specs] ,
identifier[build_root] = identifier[build_root] ,
identifier[exclude_patterns] = identifier[exclude_patterns] ,
identifier[tags] = identifier[tags] )
identifier[changed_options] = identifier[options] . identifier[for_scope] ( literal[string] )
identifier[changed_request] = identifier[ChangedRequest] . identifier[from_options] ( identifier[changed_options] )
identifier[owned_files] = identifier[options] . identifier[for_global_scope] (). identifier[owner_of]
identifier[logger] . identifier[debug] ( literal[string] , identifier[spec_roots] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[changed_request] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[owned_files] )
identifier[targets_specified] = identifier[sum] ( literal[int] keyword[for] identifier[item]
keyword[in] ( identifier[changed_request] . identifier[is_actionable] (), identifier[owned_files] , identifier[spec_roots] . identifier[dependencies] )
keyword[if] identifier[item] )
keyword[if] identifier[targets_specified] > literal[int] :
keyword[raise] identifier[InvalidSpecConstraint] (
literal[string]
literal[string]
)
keyword[if] identifier[changed_request] . identifier[is_actionable] ():
identifier[scm] = identifier[get_scm] ()
keyword[if] keyword[not] identifier[scm] :
keyword[raise] identifier[InvalidSpecConstraint] (
literal[string]
)
identifier[changed_files] = identifier[cls] . identifier[changed_files] (
identifier[scm] ,
identifier[changes_since] = identifier[changed_request] . identifier[changes_since] ,
identifier[diffspec] = identifier[changed_request] . identifier[diffspec] )
identifier[request] = identifier[OwnersRequest] ( identifier[sources] = identifier[tuple] ( identifier[changed_files] ),
identifier[include_dependees] = identifier[str] ( identifier[changed_request] . identifier[include_dependees] ))
identifier[changed_addresses] ,= identifier[session] . identifier[product_request] ( identifier[BuildFileAddresses] ,[ identifier[request] ])
identifier[logger] . identifier[debug] ( literal[string] , identifier[changed_addresses] )
identifier[dependencies] = identifier[tuple] ( identifier[SingleAddress] ( identifier[a] . identifier[spec_path] , identifier[a] . identifier[target_name] ) keyword[for] identifier[a] keyword[in] identifier[changed_addresses] )
keyword[return] identifier[TargetRoots] ( identifier[Specs] ( identifier[dependencies] = identifier[dependencies] , identifier[exclude_patterns] = identifier[exclude_patterns] , identifier[tags] = identifier[tags] ))
keyword[if] identifier[owned_files] :
identifier[request] = identifier[OwnersRequest] ( identifier[sources] = identifier[tuple] ( identifier[owned_files] ), identifier[include_dependees] = identifier[str] ( literal[string] ))
identifier[owner_addresses] ,= identifier[session] . identifier[product_request] ( identifier[BuildFileAddresses] ,[ identifier[request] ])
identifier[logger] . identifier[debug] ( literal[string] , identifier[owner_addresses] )
identifier[dependencies] = identifier[tuple] ( identifier[SingleAddress] ( identifier[a] . identifier[spec_path] , identifier[a] . identifier[target_name] ) keyword[for] identifier[a] keyword[in] identifier[owner_addresses] )
keyword[return] identifier[TargetRoots] ( identifier[Specs] ( identifier[dependencies] = identifier[dependencies] , identifier[exclude_patterns] = identifier[exclude_patterns] , identifier[tags] = identifier[tags] ))
keyword[return] identifier[TargetRoots] ( identifier[spec_roots] ) | def create(cls, options, session, build_root=None, exclude_patterns=None, tags=None):
"""
:param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root.
"""
# Determine the literal target roots.
spec_roots = cls.parse_specs(target_specs=options.target_specs, build_root=build_root, exclude_patterns=exclude_patterns, tags=tags)
# Determine `Changed` arguments directly from options to support pre-`Subsystem`
# initialization paths.
changed_options = options.for_scope('changed')
changed_request = ChangedRequest.from_options(changed_options)
# Determine the `--owner-of=` arguments provided from the global options
owned_files = options.for_global_scope().owner_of
logger.debug('spec_roots are: %s', spec_roots)
logger.debug('changed_request is: %s', changed_request)
logger.debug('owned_files are: %s', owned_files)
targets_specified = sum((1 for item in (changed_request.is_actionable(), owned_files, spec_roots.dependencies) if item))
if targets_specified > 1:
# We've been provided more than one of: a change request, an owner request, or spec roots.
raise InvalidSpecConstraint('Multiple target selection methods provided. Please use only one of --changed-*, --owner-of, or target specs') # depends on [control=['if'], data=[]]
if changed_request.is_actionable():
scm = get_scm()
if not scm:
raise InvalidSpecConstraint('The --changed-* options are not available without a recognized SCM (usually git).') # depends on [control=['if'], data=[]]
changed_files = cls.changed_files(scm, changes_since=changed_request.changes_since, diffspec=changed_request.diffspec)
# We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(changed_files), include_dependees=str(changed_request.include_dependees))
(changed_addresses,) = session.product_request(BuildFileAddresses, [request])
logger.debug('changed addresses: %s', changed_addresses)
dependencies = tuple((SingleAddress(a.spec_path, a.target_name) for a in changed_addresses))
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags)) # depends on [control=['if'], data=[]]
if owned_files:
# We've been provided no spec roots (e.g. `./pants list`) AND a owner request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(owned_files), include_dependees=str('none'))
(owner_addresses,) = session.product_request(BuildFileAddresses, [request])
logger.debug('owner addresses: %s', owner_addresses)
dependencies = tuple((SingleAddress(a.spec_path, a.target_name) for a in owner_addresses))
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags)) # depends on [control=['if'], data=[]]
return TargetRoots(spec_roots) |
def check_output(args):
"""Pipe-safe (and 2.6 compatible) version of subprocess.check_output
"""
proc = Popen(args, stdout=PIPE)
out = proc.communicate()[0]
if proc.returncode:
raise CalledProcessError(proc.returncode, args, output=out)
return out | def function[check_output, parameter[args]]:
constant[Pipe-safe (and 2.6 compatible) version of subprocess.check_output
]
variable[proc] assign[=] call[name[Popen], parameter[name[args]]]
variable[out] assign[=] call[call[name[proc].communicate, parameter[]]][constant[0]]
if name[proc].returncode begin[:]
<ast.Raise object at 0x7da1b05bea40>
return[name[out]] | keyword[def] identifier[check_output] ( identifier[args] ):
literal[string]
identifier[proc] = identifier[Popen] ( identifier[args] , identifier[stdout] = identifier[PIPE] )
identifier[out] = identifier[proc] . identifier[communicate] ()[ literal[int] ]
keyword[if] identifier[proc] . identifier[returncode] :
keyword[raise] identifier[CalledProcessError] ( identifier[proc] . identifier[returncode] , identifier[args] , identifier[output] = identifier[out] )
keyword[return] identifier[out] | def check_output(args):
"""Pipe-safe (and 2.6 compatible) version of subprocess.check_output
"""
proc = Popen(args, stdout=PIPE)
out = proc.communicate()[0]
if proc.returncode:
raise CalledProcessError(proc.returncode, args, output=out) # depends on [control=['if'], data=[]]
return out |
def _parseStylesheet(self, src):
"""stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
"""
# FIXME: BYTES to STR
if type(src) == six.binary_type:
src=six.text_type(src)
# Get rid of the comments
src = self.re_comment.sub('', src)
# [ CHARSET_SYM S* STRING S* ';' ]?
src = self._parseAtCharset(src)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
# [ import [S|CDO|CDC]* ]*
src, stylesheetImports = self._parseAtImports(src)
# [ namespace [S|CDO|CDC]* ]*
src = self._parseAtNamespace(src)
stylesheetElements = []
# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*
while src: # due to ending with ]*
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None and atResults != NotImplemented:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return src, stylesheet | def function[_parseStylesheet, parameter[self, src]]:
constant[stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
]
if compare[call[name[type], parameter[name[src]]] equal[==] name[six].binary_type] begin[:]
variable[src] assign[=] call[name[six].text_type, parameter[name[src]]]
variable[src] assign[=] call[name[self].re_comment.sub, parameter[constant[], name[src]]]
variable[src] assign[=] call[name[self]._parseAtCharset, parameter[name[src]]]
variable[src] assign[=] call[name[self]._parseSCDOCDC, parameter[name[src]]]
<ast.Tuple object at 0x7da18f09ed40> assign[=] call[name[self]._parseAtImports, parameter[name[src]]]
variable[src] assign[=] call[name[self]._parseAtNamespace, parameter[name[src]]]
variable[stylesheetElements] assign[=] list[[]]
while name[src] begin[:]
if call[name[src].startswith, parameter[constant[@]]] begin[:]
<ast.Tuple object at 0x7da18f09d210> assign[=] call[name[self]._parseAtKeyword, parameter[name[src]]]
if <ast.BoolOp object at 0x7da18f09c520> begin[:]
call[name[stylesheetElements].extend, parameter[name[atResults]]]
variable[src] assign[=] call[name[self]._parseSCDOCDC, parameter[name[src]]]
variable[stylesheet] assign[=] call[name[self].cssBuilder.stylesheet, parameter[name[stylesheetElements], name[stylesheetImports]]]
return[tuple[[<ast.Name object at 0x7da1b11f5180>, <ast.Name object at 0x7da1b11f7e20>]]] | keyword[def] identifier[_parseStylesheet] ( identifier[self] , identifier[src] ):
literal[string]
keyword[if] identifier[type] ( identifier[src] )== identifier[six] . identifier[binary_type] :
identifier[src] = identifier[six] . identifier[text_type] ( identifier[src] )
identifier[src] = identifier[self] . identifier[re_comment] . identifier[sub] ( literal[string] , identifier[src] )
identifier[src] = identifier[self] . identifier[_parseAtCharset] ( identifier[src] )
identifier[src] = identifier[self] . identifier[_parseSCDOCDC] ( identifier[src] )
identifier[src] , identifier[stylesheetImports] = identifier[self] . identifier[_parseAtImports] ( identifier[src] )
identifier[src] = identifier[self] . identifier[_parseAtNamespace] ( identifier[src] )
identifier[stylesheetElements] =[]
keyword[while] identifier[src] :
keyword[if] identifier[src] . identifier[startswith] ( literal[string] ):
identifier[src] , identifier[atResults] = identifier[self] . identifier[_parseAtKeyword] ( identifier[src] )
keyword[if] identifier[atResults] keyword[is] keyword[not] keyword[None] keyword[and] identifier[atResults] != identifier[NotImplemented] :
identifier[stylesheetElements] . identifier[extend] ( identifier[atResults] )
keyword[else] :
identifier[src] , identifier[ruleset] = identifier[self] . identifier[_parseRuleset] ( identifier[src] )
identifier[stylesheetElements] . identifier[append] ( identifier[ruleset] )
identifier[src] = identifier[self] . identifier[_parseSCDOCDC] ( identifier[src] )
identifier[stylesheet] = identifier[self] . identifier[cssBuilder] . identifier[stylesheet] ( identifier[stylesheetElements] , identifier[stylesheetImports] )
keyword[return] identifier[src] , identifier[stylesheet] | def _parseStylesheet(self, src):
"""stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
""" # FIXME: BYTES to STR
if type(src) == six.binary_type:
src = six.text_type(src) # depends on [control=['if'], data=[]]
# Get rid of the comments
src = self.re_comment.sub('', src)
# [ CHARSET_SYM S* STRING S* ';' ]?
src = self._parseAtCharset(src)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
# [ import [S|CDO|CDC]* ]*
(src, stylesheetImports) = self._parseAtImports(src)
# [ namespace [S|CDO|CDC]* ]*
src = self._parseAtNamespace(src)
stylesheetElements = []
# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*
while src: # due to ending with ]*
if src.startswith('@'):
# @media, @page, @font-face
(src, atResults) = self._parseAtKeyword(src)
if atResults is not None and atResults != NotImplemented:
stylesheetElements.extend(atResults) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# ruleset
(src, ruleset) = self._parseRuleset(src)
stylesheetElements.append(ruleset)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src) # depends on [control=['while'], data=[]]
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return (src, stylesheet) |
def make_op_return_output(data):
'''Generates OP_RETURN output for data less than 78 bytes.
If data is 76 or 77 bytes, OP_PUSHDATA1 is included:
<OP_RETURN><OP_PUSHDATA1><data len><data>
If data is less than 76 bytes, OP_PUSHDATA1 is not included:
<OP_RETURN><data len><data>
80 bytes is the default setting for an OP_RETURN output script.
https://github.com/bitpay/bitcore/issues/1389
Args:
data (bytes): data included in output
Returns:
(TxOut): TxOut object with OP_RETURN output
'''
if len(data) > 77: # 77 bytes is the limit
raise ValueError('Data is too long. Expected <= 77 bytes')
pk_script = bytearray()
pk_script.extend(b'\x6a') # OP_RETURN
# OP_PUSHDATA1 only used if data is greater than 75 bytes
if len(data) in [76, 77]:
pk_script.extend(b'\x4c') # OP_PUSHDATA1
pk_script.extend([len(data)]) # One byte for length of data
pk_script.extend(data) # Data
return _make_output(utils.i2le_padded(0, 8), pk_script) | def function[make_op_return_output, parameter[data]]:
constant[Generates OP_RETURN output for data less than 78 bytes.
If data is 76 or 77 bytes, OP_PUSHDATA1 is included:
<OP_RETURN><OP_PUSHDATA1><data len><data>
If data is less than 76 bytes, OP_PUSHDATA1 is not included:
<OP_RETURN><data len><data>
80 bytes is the default setting for an OP_RETURN output script.
https://github.com/bitpay/bitcore/issues/1389
Args:
data (bytes): data included in output
Returns:
(TxOut): TxOut object with OP_RETURN output
]
if compare[call[name[len], parameter[name[data]]] greater[>] constant[77]] begin[:]
<ast.Raise object at 0x7da1b0650be0>
variable[pk_script] assign[=] call[name[bytearray], parameter[]]
call[name[pk_script].extend, parameter[constant[b'j']]]
if compare[call[name[len], parameter[name[data]]] in list[[<ast.Constant object at 0x7da1b0651480>, <ast.Constant object at 0x7da1b0652380>]]] begin[:]
call[name[pk_script].extend, parameter[constant[b'L']]]
call[name[pk_script].extend, parameter[list[[<ast.Call object at 0x7da1b0652b00>]]]]
call[name[pk_script].extend, parameter[name[data]]]
return[call[name[_make_output], parameter[call[name[utils].i2le_padded, parameter[constant[0], constant[8]]], name[pk_script]]]] | keyword[def] identifier[make_op_return_output] ( identifier[data] ):
literal[string]
keyword[if] identifier[len] ( identifier[data] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[pk_script] = identifier[bytearray] ()
identifier[pk_script] . identifier[extend] ( literal[string] )
keyword[if] identifier[len] ( identifier[data] ) keyword[in] [ literal[int] , literal[int] ]:
identifier[pk_script] . identifier[extend] ( literal[string] )
identifier[pk_script] . identifier[extend] ([ identifier[len] ( identifier[data] )])
identifier[pk_script] . identifier[extend] ( identifier[data] )
keyword[return] identifier[_make_output] ( identifier[utils] . identifier[i2le_padded] ( literal[int] , literal[int] ), identifier[pk_script] ) | def make_op_return_output(data):
"""Generates OP_RETURN output for data less than 78 bytes.
If data is 76 or 77 bytes, OP_PUSHDATA1 is included:
<OP_RETURN><OP_PUSHDATA1><data len><data>
If data is less than 76 bytes, OP_PUSHDATA1 is not included:
<OP_RETURN><data len><data>
80 bytes is the default setting for an OP_RETURN output script.
https://github.com/bitpay/bitcore/issues/1389
Args:
data (bytes): data included in output
Returns:
(TxOut): TxOut object with OP_RETURN output
"""
if len(data) > 77: # 77 bytes is the limit
raise ValueError('Data is too long. Expected <= 77 bytes') # depends on [control=['if'], data=[]]
pk_script = bytearray()
pk_script.extend(b'j') # OP_RETURN
# OP_PUSHDATA1 only used if data is greater than 75 bytes
if len(data) in [76, 77]:
pk_script.extend(b'L') # OP_PUSHDATA1 # depends on [control=['if'], data=[]]
pk_script.extend([len(data)]) # One byte for length of data
pk_script.extend(data) # Data
return _make_output(utils.i2le_padded(0, 8), pk_script) |
def _write_input(self, input_dir="."):
"""
Write the packmol input file to the input directory.
Args:
input_dir (string): path to the input directory
"""
with open(os.path.join(input_dir, self.input_file), 'wt', encoding="utf-8") as inp:
for k, v in self.control_params.items():
inp.write('{} {}\n'.format(k, self._format_param_val(v)))
# write the structures of the constituent molecules to file and set
# the molecule id and the corresponding filename in the packmol
# input file.
for idx, mol in enumerate(self.mols):
filename = os.path.join(
input_dir, '{}.{}'.format(
idx, self.control_params["filetype"])).encode("ascii")
# pdb
if self.control_params["filetype"] == "pdb":
self.write_pdb(mol, filename, num=idx+1)
# all other filetypes
else:
a = BabelMolAdaptor(mol)
pm = pb.Molecule(a.openbabel_mol)
pm.write(self.control_params["filetype"], filename=filename,
overwrite=True)
inp.write("\n")
inp.write(
"structure {}.{}\n".format(
os.path.join(input_dir, str(idx)),
self.control_params["filetype"]))
for k, v in self.param_list[idx].items():
inp.write(' {} {}\n'.format(k, self._format_param_val(v)))
inp.write('end structure\n') | def function[_write_input, parameter[self, input_dir]]:
constant[
Write the packmol input file to the input directory.
Args:
input_dir (string): path to the input directory
]
with call[name[open], parameter[call[name[os].path.join, parameter[name[input_dir], name[self].input_file]], constant[wt]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c9916c0>, <ast.Name object at 0x7da20c9917b0>]]] in starred[call[name[self].control_params.items, parameter[]]] begin[:]
call[name[inp].write, parameter[call[constant[{} {}
].format, parameter[name[k], call[name[self]._format_param_val, parameter[name[v]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da20c991690>, <ast.Name object at 0x7da20c990ac0>]]] in starred[call[name[enumerate], parameter[name[self].mols]]] begin[:]
variable[filename] assign[=] call[call[name[os].path.join, parameter[name[input_dir], call[constant[{}.{}].format, parameter[name[idx], call[name[self].control_params][constant[filetype]]]]]].encode, parameter[constant[ascii]]]
if compare[call[name[self].control_params][constant[filetype]] equal[==] constant[pdb]] begin[:]
call[name[self].write_pdb, parameter[name[mol], name[filename]]]
call[name[inp].write, parameter[constant[
]]]
call[name[inp].write, parameter[call[constant[structure {}.{}
].format, parameter[call[name[os].path.join, parameter[name[input_dir], call[name[str], parameter[name[idx]]]]], call[name[self].control_params][constant[filetype]]]]]]
for taget[tuple[[<ast.Name object at 0x7da20c990610>, <ast.Name object at 0x7da20c991e70>]]] in starred[call[call[name[self].param_list][name[idx]].items, parameter[]]] begin[:]
call[name[inp].write, parameter[call[constant[ {} {}
].format, parameter[name[k], call[name[self]._format_param_val, parameter[name[v]]]]]]]
call[name[inp].write, parameter[constant[end structure
]]] | keyword[def] identifier[_write_input] ( identifier[self] , identifier[input_dir] = literal[string] ):
literal[string]
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir] , identifier[self] . identifier[input_file] ), literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[inp] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[control_params] . identifier[items] ():
identifier[inp] . identifier[write] ( literal[string] . identifier[format] ( identifier[k] , identifier[self] . identifier[_format_param_val] ( identifier[v] )))
keyword[for] identifier[idx] , identifier[mol] keyword[in] identifier[enumerate] ( identifier[self] . identifier[mols] ):
identifier[filename] = identifier[os] . identifier[path] . identifier[join] (
identifier[input_dir] , literal[string] . identifier[format] (
identifier[idx] , identifier[self] . identifier[control_params] [ literal[string] ])). identifier[encode] ( literal[string] )
keyword[if] identifier[self] . identifier[control_params] [ literal[string] ]== literal[string] :
identifier[self] . identifier[write_pdb] ( identifier[mol] , identifier[filename] , identifier[num] = identifier[idx] + literal[int] )
keyword[else] :
identifier[a] = identifier[BabelMolAdaptor] ( identifier[mol] )
identifier[pm] = identifier[pb] . identifier[Molecule] ( identifier[a] . identifier[openbabel_mol] )
identifier[pm] . identifier[write] ( identifier[self] . identifier[control_params] [ literal[string] ], identifier[filename] = identifier[filename] ,
identifier[overwrite] = keyword[True] )
identifier[inp] . identifier[write] ( literal[string] )
identifier[inp] . identifier[write] (
literal[string] . identifier[format] (
identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir] , identifier[str] ( identifier[idx] )),
identifier[self] . identifier[control_params] [ literal[string] ]))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[param_list] [ identifier[idx] ]. identifier[items] ():
identifier[inp] . identifier[write] ( literal[string] . identifier[format] ( identifier[k] , identifier[self] . identifier[_format_param_val] ( identifier[v] )))
identifier[inp] . identifier[write] ( literal[string] ) | def _write_input(self, input_dir='.'):
"""
Write the packmol input file to the input directory.
Args:
input_dir (string): path to the input directory
"""
with open(os.path.join(input_dir, self.input_file), 'wt', encoding='utf-8') as inp:
for (k, v) in self.control_params.items():
inp.write('{} {}\n'.format(k, self._format_param_val(v))) # depends on [control=['for'], data=[]]
# write the structures of the constituent molecules to file and set
# the molecule id and the corresponding filename in the packmol
# input file.
for (idx, mol) in enumerate(self.mols):
filename = os.path.join(input_dir, '{}.{}'.format(idx, self.control_params['filetype'])).encode('ascii')
# pdb
if self.control_params['filetype'] == 'pdb':
self.write_pdb(mol, filename, num=idx + 1) # depends on [control=['if'], data=[]]
else:
# all other filetypes
a = BabelMolAdaptor(mol)
pm = pb.Molecule(a.openbabel_mol)
pm.write(self.control_params['filetype'], filename=filename, overwrite=True)
inp.write('\n')
inp.write('structure {}.{}\n'.format(os.path.join(input_dir, str(idx)), self.control_params['filetype']))
for (k, v) in self.param_list[idx].items():
inp.write(' {} {}\n'.format(k, self._format_param_val(v))) # depends on [control=['for'], data=[]]
inp.write('end structure\n') # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['inp']] |
def _init_metadata(self):
"""stub"""
MultiChoiceAnswerFormRecord._init_metadata(self)
FilesAnswerFormRecord._init_metadata(self)
FeedbackAnswerFormRecord._init_metadata(self)
super(MultiChoiceFeedbackAndFilesAnswerFormRecord, self)._init_metadata() | def function[_init_metadata, parameter[self]]:
constant[stub]
call[name[MultiChoiceAnswerFormRecord]._init_metadata, parameter[name[self]]]
call[name[FilesAnswerFormRecord]._init_metadata, parameter[name[self]]]
call[name[FeedbackAnswerFormRecord]._init_metadata, parameter[name[self]]]
call[call[name[super], parameter[name[MultiChoiceFeedbackAndFilesAnswerFormRecord], name[self]]]._init_metadata, parameter[]] | keyword[def] identifier[_init_metadata] ( identifier[self] ):
literal[string]
identifier[MultiChoiceAnswerFormRecord] . identifier[_init_metadata] ( identifier[self] )
identifier[FilesAnswerFormRecord] . identifier[_init_metadata] ( identifier[self] )
identifier[FeedbackAnswerFormRecord] . identifier[_init_metadata] ( identifier[self] )
identifier[super] ( identifier[MultiChoiceFeedbackAndFilesAnswerFormRecord] , identifier[self] ). identifier[_init_metadata] () | def _init_metadata(self):
"""stub"""
MultiChoiceAnswerFormRecord._init_metadata(self)
FilesAnswerFormRecord._init_metadata(self)
FeedbackAnswerFormRecord._init_metadata(self)
super(MultiChoiceFeedbackAndFilesAnswerFormRecord, self)._init_metadata() |
def source(source_id=None, **kwargs):
"""Get a source of economic data."""
if source_id is not None:
kwargs['source_id'] = source_id
elif 'id' in kwargs:
source_id = kwargs.pop('id')
kwargs['source_id'] = source_id
if 'releases' in kwargs:
kwargs.pop('releases')
path = 'releases'
else:
path = None
return Fred().source(path, **kwargs) | def function[source, parameter[source_id]]:
constant[Get a source of economic data.]
if compare[name[source_id] is_not constant[None]] begin[:]
call[name[kwargs]][constant[source_id]] assign[=] name[source_id]
if compare[constant[releases] in name[kwargs]] begin[:]
call[name[kwargs].pop, parameter[constant[releases]]]
variable[path] assign[=] constant[releases]
return[call[call[name[Fred], parameter[]].source, parameter[name[path]]]] | keyword[def] identifier[source] ( identifier[source_id] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[source_id] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[source_id]
keyword[elif] literal[string] keyword[in] identifier[kwargs] :
identifier[source_id] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[kwargs] [ literal[string] ]= identifier[source_id]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[path] = literal[string]
keyword[else] :
identifier[path] = keyword[None]
keyword[return] identifier[Fred] (). identifier[source] ( identifier[path] ,** identifier[kwargs] ) | def source(source_id=None, **kwargs):
"""Get a source of economic data."""
if source_id is not None:
kwargs['source_id'] = source_id # depends on [control=['if'], data=['source_id']]
elif 'id' in kwargs:
source_id = kwargs.pop('id')
kwargs['source_id'] = source_id # depends on [control=['if'], data=['kwargs']]
if 'releases' in kwargs:
kwargs.pop('releases')
path = 'releases' # depends on [control=['if'], data=['kwargs']]
else:
path = None
return Fred().source(path, **kwargs) |
def full_size(self):
'''show image at full size'''
self.dragpos = wx.Point(0, 0)
self.zoom = 1.0
self.need_redraw = True | def function[full_size, parameter[self]]:
constant[show image at full size]
name[self].dragpos assign[=] call[name[wx].Point, parameter[constant[0], constant[0]]]
name[self].zoom assign[=] constant[1.0]
name[self].need_redraw assign[=] constant[True] | keyword[def] identifier[full_size] ( identifier[self] ):
literal[string]
identifier[self] . identifier[dragpos] = identifier[wx] . identifier[Point] ( literal[int] , literal[int] )
identifier[self] . identifier[zoom] = literal[int]
identifier[self] . identifier[need_redraw] = keyword[True] | def full_size(self):
"""show image at full size"""
self.dragpos = wx.Point(0, 0)
self.zoom = 1.0
self.need_redraw = True |
def on_shared_folder_change(self, global_p):
"""Triggered when a permanent (global or machine) shared folder has been
created or removed.
We don't pass shared folder parameters in this notification because
the order in which parallel notifications are delivered is not defined,
therefore it could happen that these parameters were outdated by the
time of processing this notification.
in global_p of type bool
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(global_p, bool):
raise TypeError("global_p can only be an instance of type bool")
self._call("onSharedFolderChange",
in_p=[global_p]) | def function[on_shared_folder_change, parameter[self, global_p]]:
constant[Triggered when a permanent (global or machine) shared folder has been
created or removed.
We don't pass shared folder parameters in this notification because
the order in which parallel notifications are delivered is not defined,
therefore it could happen that these parameters were outdated by the
time of processing this notification.
in global_p of type bool
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
]
if <ast.UnaryOp object at 0x7da20c6c5a20> begin[:]
<ast.Raise object at 0x7da20c6c53f0>
call[name[self]._call, parameter[constant[onSharedFolderChange]]] | keyword[def] identifier[on_shared_folder_change] ( identifier[self] , identifier[global_p] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[global_p] , identifier[bool] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[global_p] ]) | def on_shared_folder_change(self, global_p):
"""Triggered when a permanent (global or machine) shared folder has been
created or removed.
We don't pass shared folder parameters in this notification because
the order in which parallel notifications are delivered is not defined,
therefore it could happen that these parameters were outdated by the
time of processing this notification.
in global_p of type bool
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(global_p, bool):
raise TypeError('global_p can only be an instance of type bool') # depends on [control=['if'], data=[]]
self._call('onSharedFolderChange', in_p=[global_p]) |
def get_country_from_request(request):
"""
Analyzes the request to find which country the user wants
the system to recognize. It checks the following sources
in the given order:
* session,
* cookie,
* HTTP_ACCEPT_LANGUAGE HTTP header, and
* IP address if USE_GEOIP is True.
It returns country code in ISO 3166-1 alpha-2 format.
"""
if hasattr(request, 'session'):
country_code = request.session.get(COUNTRY_SESSION_KEY)
if country_code:
return get_supported_country(country_code)
country_code = request.COOKIES.get(COUNTRY_COOKIE_NAME)
if country_code:
return get_supported_country(country_code)
if USE_GEOIP:
ip = _extract_ip_address(request.META)
country_code = _geo.country_code_by_addr(ip)
if country_code:
return get_supported_country(country_code)
if USE_LOCALE:
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, _ in trans_real.parse_accept_lang_header(accept):
if LANG_COUNTRY_DELIM in accept_lang:
country_code = accept_lang.split(LANG_COUNTRY_DELIM)[-1]
if country_code:
return get_supported_country(country_code)
return DEFAULT_COUNTRY_CODE | def function[get_country_from_request, parameter[request]]:
constant[
Analyzes the request to find which country the user wants
the system to recognize. It checks the following sources
in the given order:
* session,
* cookie,
* HTTP_ACCEPT_LANGUAGE HTTP header, and
* IP address if USE_GEOIP is True.
It returns country code in ISO 3166-1 alpha-2 format.
]
if call[name[hasattr], parameter[name[request], constant[session]]] begin[:]
variable[country_code] assign[=] call[name[request].session.get, parameter[name[COUNTRY_SESSION_KEY]]]
if name[country_code] begin[:]
return[call[name[get_supported_country], parameter[name[country_code]]]]
variable[country_code] assign[=] call[name[request].COOKIES.get, parameter[name[COUNTRY_COOKIE_NAME]]]
if name[country_code] begin[:]
return[call[name[get_supported_country], parameter[name[country_code]]]]
if name[USE_GEOIP] begin[:]
variable[ip] assign[=] call[name[_extract_ip_address], parameter[name[request].META]]
variable[country_code] assign[=] call[name[_geo].country_code_by_addr, parameter[name[ip]]]
if name[country_code] begin[:]
return[call[name[get_supported_country], parameter[name[country_code]]]]
if name[USE_LOCALE] begin[:]
variable[accept] assign[=] call[name[request].META.get, parameter[constant[HTTP_ACCEPT_LANGUAGE], constant[]]]
for taget[tuple[[<ast.Name object at 0x7da204344070>, <ast.Name object at 0x7da204346cb0>]]] in starred[call[name[trans_real].parse_accept_lang_header, parameter[name[accept]]]] begin[:]
if compare[name[LANG_COUNTRY_DELIM] in name[accept_lang]] begin[:]
variable[country_code] assign[=] call[call[name[accept_lang].split, parameter[name[LANG_COUNTRY_DELIM]]]][<ast.UnaryOp object at 0x7da204347d30>]
if name[country_code] begin[:]
return[call[name[get_supported_country], parameter[name[country_code]]]]
return[name[DEFAULT_COUNTRY_CODE]] | keyword[def] identifier[get_country_from_request] ( identifier[request] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[request] , literal[string] ):
identifier[country_code] = identifier[request] . identifier[session] . identifier[get] ( identifier[COUNTRY_SESSION_KEY] )
keyword[if] identifier[country_code] :
keyword[return] identifier[get_supported_country] ( identifier[country_code] )
identifier[country_code] = identifier[request] . identifier[COOKIES] . identifier[get] ( identifier[COUNTRY_COOKIE_NAME] )
keyword[if] identifier[country_code] :
keyword[return] identifier[get_supported_country] ( identifier[country_code] )
keyword[if] identifier[USE_GEOIP] :
identifier[ip] = identifier[_extract_ip_address] ( identifier[request] . identifier[META] )
identifier[country_code] = identifier[_geo] . identifier[country_code_by_addr] ( identifier[ip] )
keyword[if] identifier[country_code] :
keyword[return] identifier[get_supported_country] ( identifier[country_code] )
keyword[if] identifier[USE_LOCALE] :
identifier[accept] = identifier[request] . identifier[META] . identifier[get] ( literal[string] , literal[string] )
keyword[for] identifier[accept_lang] , identifier[_] keyword[in] identifier[trans_real] . identifier[parse_accept_lang_header] ( identifier[accept] ):
keyword[if] identifier[LANG_COUNTRY_DELIM] keyword[in] identifier[accept_lang] :
identifier[country_code] = identifier[accept_lang] . identifier[split] ( identifier[LANG_COUNTRY_DELIM] )[- literal[int] ]
keyword[if] identifier[country_code] :
keyword[return] identifier[get_supported_country] ( identifier[country_code] )
keyword[return] identifier[DEFAULT_COUNTRY_CODE] | def get_country_from_request(request):
"""
Analyzes the request to find which country the user wants
the system to recognize. It checks the following sources
in the given order:
* session,
* cookie,
* HTTP_ACCEPT_LANGUAGE HTTP header, and
* IP address if USE_GEOIP is True.
It returns country code in ISO 3166-1 alpha-2 format.
"""
if hasattr(request, 'session'):
country_code = request.session.get(COUNTRY_SESSION_KEY)
if country_code:
return get_supported_country(country_code) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
country_code = request.COOKIES.get(COUNTRY_COOKIE_NAME)
if country_code:
return get_supported_country(country_code) # depends on [control=['if'], data=[]]
if USE_GEOIP:
ip = _extract_ip_address(request.META)
country_code = _geo.country_code_by_addr(ip)
if country_code:
return get_supported_country(country_code) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if USE_LOCALE:
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for (accept_lang, _) in trans_real.parse_accept_lang_header(accept):
if LANG_COUNTRY_DELIM in accept_lang:
country_code = accept_lang.split(LANG_COUNTRY_DELIM)[-1]
if country_code:
return get_supported_country(country_code) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['LANG_COUNTRY_DELIM', 'accept_lang']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return DEFAULT_COUNTRY_CODE |
def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models | def function[get_by_page, parameter[query, page, page_size]]:
constant[
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
]
variable[pager] assign[=] call[name[Paginator], parameter[name[query], name[page_size]]]
<ast.Try object at 0x7da1b26ad6f0>
return[name[models]] | keyword[def] identifier[get_by_page] ( identifier[query] , identifier[page] , identifier[page_size] ):
literal[string]
identifier[pager] = identifier[Paginator] ( identifier[query] , identifier[page_size] )
keyword[try] :
identifier[models] = identifier[pager] . identifier[page] ( identifier[page] )
keyword[except] identifier[PageNotAnInteger] :
identifier[models] = identifier[pager] . identifier[page] ( literal[int] )
keyword[except] identifier[EmptyPage] :
identifier[models] = identifier[pager] . identifier[page] ( identifier[pager] . identifier[num_pages] )
keyword[return] identifier[models] | def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page) # depends on [control=['try'], data=[]]
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1) # depends on [control=['except'], data=[]]
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages) # depends on [control=['except'], data=[]]
return models |
def scatterplot(self, x, y, **kw):
"""plot after clearing current plot """
self.panel.scatterplot(x, y, **kw) | def function[scatterplot, parameter[self, x, y]]:
constant[plot after clearing current plot ]
call[name[self].panel.scatterplot, parameter[name[x], name[y]]] | keyword[def] identifier[scatterplot] ( identifier[self] , identifier[x] , identifier[y] ,** identifier[kw] ):
literal[string]
identifier[self] . identifier[panel] . identifier[scatterplot] ( identifier[x] , identifier[y] ,** identifier[kw] ) | def scatterplot(self, x, y, **kw):
"""plot after clearing current plot """
self.panel.scatterplot(x, y, **kw) |
def _enforceDataType(self, data):
""" Converts to str so that this CTI always stores that type.
"""
qColor = QtGui.QColor(data) # TODO: store a RGB string?
if not qColor.isValid():
raise ValueError("Invalid color specification: {!r}".format(data))
return qColor | def function[_enforceDataType, parameter[self, data]]:
constant[ Converts to str so that this CTI always stores that type.
]
variable[qColor] assign[=] call[name[QtGui].QColor, parameter[name[data]]]
if <ast.UnaryOp object at 0x7da1b04f9cf0> begin[:]
<ast.Raise object at 0x7da1b04176a0>
return[name[qColor]] | keyword[def] identifier[_enforceDataType] ( identifier[self] , identifier[data] ):
literal[string]
identifier[qColor] = identifier[QtGui] . identifier[QColor] ( identifier[data] )
keyword[if] keyword[not] identifier[qColor] . identifier[isValid] ():
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[data] ))
keyword[return] identifier[qColor] | def _enforceDataType(self, data):
""" Converts to str so that this CTI always stores that type.
"""
qColor = QtGui.QColor(data) # TODO: store a RGB string?
if not qColor.isValid():
raise ValueError('Invalid color specification: {!r}'.format(data)) # depends on [control=['if'], data=[]]
return qColor |
def clearParameters(self):
"""Removes all parameters from model"""
self.beginRemoveRows(QtCore.QModelIndex(), 0, self.rowCount())
self.model.clear_parameters()
self.endRemoveRows() | def function[clearParameters, parameter[self]]:
constant[Removes all parameters from model]
call[name[self].beginRemoveRows, parameter[call[name[QtCore].QModelIndex, parameter[]], constant[0], call[name[self].rowCount, parameter[]]]]
call[name[self].model.clear_parameters, parameter[]]
call[name[self].endRemoveRows, parameter[]] | keyword[def] identifier[clearParameters] ( identifier[self] ):
literal[string]
identifier[self] . identifier[beginRemoveRows] ( identifier[QtCore] . identifier[QModelIndex] (), literal[int] , identifier[self] . identifier[rowCount] ())
identifier[self] . identifier[model] . identifier[clear_parameters] ()
identifier[self] . identifier[endRemoveRows] () | def clearParameters(self):
"""Removes all parameters from model"""
self.beginRemoveRows(QtCore.QModelIndex(), 0, self.rowCount())
self.model.clear_parameters()
self.endRemoveRows() |
def set_executable(filename):
"""Set the exectuable bit on the given filename"""
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC) | def function[set_executable, parameter[filename]]:
constant[Set the exectuable bit on the given filename]
variable[st] assign[=] call[name[os].stat, parameter[name[filename]]]
call[name[os].chmod, parameter[name[filename], binary_operation[name[st].st_mode <ast.BitOr object at 0x7da2590d6aa0> name[stat].S_IEXEC]]] | keyword[def] identifier[set_executable] ( identifier[filename] ):
literal[string]
identifier[st] = identifier[os] . identifier[stat] ( identifier[filename] )
identifier[os] . identifier[chmod] ( identifier[filename] , identifier[st] . identifier[st_mode] | identifier[stat] . identifier[S_IEXEC] ) | def set_executable(filename):
"""Set the exectuable bit on the given filename"""
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC) |
def parse_abstract(xml_dict):
"""
Parse PubMed XML dictionary to retrieve abstract.
"""
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation',
'Article', 'Abstract', 'AbstractText']
abstract_xml = reduce(dict.get, key_path, xml_dict)
abstract_paragraphs = []
if isinstance(abstract_xml, str):
abstract_paragraphs.append(abstract_xml)
elif isinstance(abstract_xml, dict):
abstract_text = abstract_xml.get('#text')
try:
abstract_label = abstract_xml['@Label']
except KeyError:
abstract_paragraphs.append(abstract_text)
else:
abstract_paragraphs.append(
"{}: {}".format(abstract_label, abstract_text))
elif isinstance(abstract_xml, list):
for abstract_section in abstract_xml:
try:
abstract_text = abstract_section['#text']
except KeyError:
abstract_text = abstract_section
try:
abstract_label = abstract_section['@Label']
except KeyError:
abstract_paragraphs.append(abstract_text)
else:
abstract_paragraphs.append(
"{}: {}".format(abstract_label, abstract_text))
else:
raise RuntimeError("Error parsing abstract.")
return "\n\n".join(abstract_paragraphs) | def function[parse_abstract, parameter[xml_dict]]:
constant[
Parse PubMed XML dictionary to retrieve abstract.
]
variable[key_path] assign[=] list[[<ast.Constant object at 0x7da18f722f50>, <ast.Constant object at 0x7da18f7216f0>, <ast.Constant object at 0x7da18f721780>, <ast.Constant object at 0x7da18f723010>, <ast.Constant object at 0x7da18f720220>, <ast.Constant object at 0x7da18f721960>]]
variable[abstract_xml] assign[=] call[name[reduce], parameter[name[dict].get, name[key_path], name[xml_dict]]]
variable[abstract_paragraphs] assign[=] list[[]]
if call[name[isinstance], parameter[name[abstract_xml], name[str]]] begin[:]
call[name[abstract_paragraphs].append, parameter[name[abstract_xml]]]
return[call[constant[
].join, parameter[name[abstract_paragraphs]]]] | keyword[def] identifier[parse_abstract] ( identifier[xml_dict] ):
literal[string]
identifier[key_path] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
identifier[abstract_xml] = identifier[reduce] ( identifier[dict] . identifier[get] , identifier[key_path] , identifier[xml_dict] )
identifier[abstract_paragraphs] =[]
keyword[if] identifier[isinstance] ( identifier[abstract_xml] , identifier[str] ):
identifier[abstract_paragraphs] . identifier[append] ( identifier[abstract_xml] )
keyword[elif] identifier[isinstance] ( identifier[abstract_xml] , identifier[dict] ):
identifier[abstract_text] = identifier[abstract_xml] . identifier[get] ( literal[string] )
keyword[try] :
identifier[abstract_label] = identifier[abstract_xml] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[abstract_paragraphs] . identifier[append] ( identifier[abstract_text] )
keyword[else] :
identifier[abstract_paragraphs] . identifier[append] (
literal[string] . identifier[format] ( identifier[abstract_label] , identifier[abstract_text] ))
keyword[elif] identifier[isinstance] ( identifier[abstract_xml] , identifier[list] ):
keyword[for] identifier[abstract_section] keyword[in] identifier[abstract_xml] :
keyword[try] :
identifier[abstract_text] = identifier[abstract_section] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[abstract_text] = identifier[abstract_section]
keyword[try] :
identifier[abstract_label] = identifier[abstract_section] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[abstract_paragraphs] . identifier[append] ( identifier[abstract_text] )
keyword[else] :
identifier[abstract_paragraphs] . identifier[append] (
literal[string] . identifier[format] ( identifier[abstract_label] , identifier[abstract_text] ))
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[abstract_paragraphs] ) | def parse_abstract(xml_dict):
"""
Parse PubMed XML dictionary to retrieve abstract.
"""
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation', 'Article', 'Abstract', 'AbstractText']
abstract_xml = reduce(dict.get, key_path, xml_dict)
abstract_paragraphs = []
if isinstance(abstract_xml, str):
abstract_paragraphs.append(abstract_xml) # depends on [control=['if'], data=[]]
elif isinstance(abstract_xml, dict):
abstract_text = abstract_xml.get('#text')
try:
abstract_label = abstract_xml['@Label'] # depends on [control=['try'], data=[]]
except KeyError:
abstract_paragraphs.append(abstract_text) # depends on [control=['except'], data=[]]
else:
abstract_paragraphs.append('{}: {}'.format(abstract_label, abstract_text)) # depends on [control=['if'], data=[]]
elif isinstance(abstract_xml, list):
for abstract_section in abstract_xml:
try:
abstract_text = abstract_section['#text'] # depends on [control=['try'], data=[]]
except KeyError:
abstract_text = abstract_section # depends on [control=['except'], data=[]]
try:
abstract_label = abstract_section['@Label'] # depends on [control=['try'], data=[]]
except KeyError:
abstract_paragraphs.append(abstract_text) # depends on [control=['except'], data=[]]
else:
abstract_paragraphs.append('{}: {}'.format(abstract_label, abstract_text)) # depends on [control=['for'], data=['abstract_section']] # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Error parsing abstract.')
return '\n\n'.join(abstract_paragraphs) |
def update_group(self, group_id, group, force=False, minimal=True):
"""Update a group.
Applies writable settings in `group` to `group_id`
Note: this method can not be used to rename groups.
:param str group_id: target group ID
:param group: group settings
:type group: :class:`marathon.models.group.MarathonGroup`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
"""
# Changes won't take if version is set - blank it for convenience
group.version = None
params = {'force': force}
data = group.to_json(minimal=minimal)
response = self._do_request(
'PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=data, params=params)
return response.json() | def function[update_group, parameter[self, group_id, group, force, minimal]]:
constant[Update a group.
Applies writable settings in `group` to `group_id`
Note: this method can not be used to rename groups.
:param str group_id: target group ID
:param group: group settings
:type group: :class:`marathon.models.group.MarathonGroup`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
]
name[group].version assign[=] constant[None]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f58be0>], [<ast.Name object at 0x7da1b0f58760>]]
variable[data] assign[=] call[name[group].to_json, parameter[]]
variable[response] assign[=] call[name[self]._do_request, parameter[constant[PUT], call[constant[/v2/groups/{group_id}].format, parameter[]]]]
return[call[name[response].json, parameter[]]] | keyword[def] identifier[update_group] ( identifier[self] , identifier[group_id] , identifier[group] , identifier[force] = keyword[False] , identifier[minimal] = keyword[True] ):
literal[string]
identifier[group] . identifier[version] = keyword[None]
identifier[params] ={ literal[string] : identifier[force] }
identifier[data] = identifier[group] . identifier[to_json] ( identifier[minimal] = identifier[minimal] )
identifier[response] = identifier[self] . identifier[_do_request] (
literal[string] , literal[string] . identifier[format] ( identifier[group_id] = identifier[group_id] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] )
keyword[return] identifier[response] . identifier[json] () | def update_group(self, group_id, group, force=False, minimal=True):
"""Update a group.
Applies writable settings in `group` to `group_id`
Note: this method can not be used to rename groups.
:param str group_id: target group ID
:param group: group settings
:type group: :class:`marathon.models.group.MarathonGroup`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
"""
# Changes won't take if version is set - blank it for convenience
group.version = None
params = {'force': force}
data = group.to_json(minimal=minimal)
response = self._do_request('PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=data, params=params)
return response.json() |
def is_dark_terminal_background(cls):
"""
:return: Whether we have a dark Terminal background color, or None if unknown.
We currently just check the env var COLORFGBG,
which some terminals define like "<foreground-color>:<background-color>",
and if <background-color> in {0,1,2,3,4,5,6,8}, then we have some dark background.
There are many other complex heuristics we could do here, which work in some cases but not in others.
See e.g. `here <https://stackoverflow.com/questions/2507337/terminals-background-color>`__.
But instead of adding more heuristics, we think that explicitly setting COLORFGBG would be the best thing,
in case it's not like you want it.
:rtype: bool|None
"""
if os.environ.get("COLORFGBG", None):
parts = os.environ["COLORFGBG"].split(";")
try:
last_number = int(parts[-1])
if 0 <= last_number <= 6 or last_number == 8:
return True
else:
return False
except ValueError: # not an integer?
pass
return None | def function[is_dark_terminal_background, parameter[cls]]:
constant[
:return: Whether we have a dark Terminal background color, or None if unknown.
We currently just check the env var COLORFGBG,
which some terminals define like "<foreground-color>:<background-color>",
and if <background-color> in {0,1,2,3,4,5,6,8}, then we have some dark background.
There are many other complex heuristics we could do here, which work in some cases but not in others.
See e.g. `here <https://stackoverflow.com/questions/2507337/terminals-background-color>`__.
But instead of adding more heuristics, we think that explicitly setting COLORFGBG would be the best thing,
in case it's not like you want it.
:rtype: bool|None
]
if call[name[os].environ.get, parameter[constant[COLORFGBG], constant[None]]] begin[:]
variable[parts] assign[=] call[call[name[os].environ][constant[COLORFGBG]].split, parameter[constant[;]]]
<ast.Try object at 0x7da1b23938e0>
return[constant[None]] | keyword[def] identifier[is_dark_terminal_background] ( identifier[cls] ):
literal[string]
keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] ):
identifier[parts] = identifier[os] . identifier[environ] [ literal[string] ]. identifier[split] ( literal[string] )
keyword[try] :
identifier[last_number] = identifier[int] ( identifier[parts] [- literal[int] ])
keyword[if] literal[int] <= identifier[last_number] <= literal[int] keyword[or] identifier[last_number] == literal[int] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] keyword[None] | def is_dark_terminal_background(cls):
"""
:return: Whether we have a dark Terminal background color, or None if unknown.
We currently just check the env var COLORFGBG,
which some terminals define like "<foreground-color>:<background-color>",
and if <background-color> in {0,1,2,3,4,5,6,8}, then we have some dark background.
There are many other complex heuristics we could do here, which work in some cases but not in others.
See e.g. `here <https://stackoverflow.com/questions/2507337/terminals-background-color>`__.
But instead of adding more heuristics, we think that explicitly setting COLORFGBG would be the best thing,
in case it's not like you want it.
:rtype: bool|None
"""
if os.environ.get('COLORFGBG', None):
parts = os.environ['COLORFGBG'].split(';')
try:
last_number = int(parts[-1])
if 0 <= last_number <= 6 or last_number == 8:
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['try'], data=[]]
except ValueError: # not an integer?
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return None |
def write_batch_json(self, content):
"""Write batch json data to a file."""
timestamp = str(time.time()).replace('.', '')
batch_json_file = os.path.join(
self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp)
)
with open(batch_json_file, 'w') as fh:
json.dump(content, fh, indent=2) | def function[write_batch_json, parameter[self, content]]:
constant[Write batch json data to a file.]
variable[timestamp] assign[=] call[call[name[str], parameter[call[name[time].time, parameter[]]]].replace, parameter[constant[.], constant[]]]
variable[batch_json_file] assign[=] call[name[os].path.join, parameter[name[self].tcex.args.tc_temp_path, call[constant[batch-{}.json].format, parameter[name[timestamp]]]]]
with call[name[open], parameter[name[batch_json_file], constant[w]]] begin[:]
call[name[json].dump, parameter[name[content], name[fh]]] | keyword[def] identifier[write_batch_json] ( identifier[self] , identifier[content] ):
literal[string]
identifier[timestamp] = identifier[str] ( identifier[time] . identifier[time] ()). identifier[replace] ( literal[string] , literal[string] )
identifier[batch_json_file] = identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[tcex] . identifier[args] . identifier[tc_temp_path] , literal[string] . identifier[format] ( identifier[timestamp] )
)
keyword[with] identifier[open] ( identifier[batch_json_file] , literal[string] ) keyword[as] identifier[fh] :
identifier[json] . identifier[dump] ( identifier[content] , identifier[fh] , identifier[indent] = literal[int] ) | def write_batch_json(self, content):
"""Write batch json data to a file."""
timestamp = str(time.time()).replace('.', '')
batch_json_file = os.path.join(self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp))
with open(batch_json_file, 'w') as fh:
json.dump(content, fh, indent=2) # depends on [control=['with'], data=['fh']] |
def get_domains(self):
"""
Returns domains affected by operation.
@rtype: list
"""
if self.domains is None:
self.domains = list(
set(self.source.get_domains() + self.target.get_domains()))
return self.domains | def function[get_domains, parameter[self]]:
constant[
Returns domains affected by operation.
@rtype: list
]
if compare[name[self].domains is constant[None]] begin[:]
name[self].domains assign[=] call[name[list], parameter[call[name[set], parameter[binary_operation[call[name[self].source.get_domains, parameter[]] + call[name[self].target.get_domains, parameter[]]]]]]]
return[name[self].domains] | keyword[def] identifier[get_domains] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[domains] keyword[is] keyword[None] :
identifier[self] . identifier[domains] = identifier[list] (
identifier[set] ( identifier[self] . identifier[source] . identifier[get_domains] ()+ identifier[self] . identifier[target] . identifier[get_domains] ()))
keyword[return] identifier[self] . identifier[domains] | def get_domains(self):
"""
Returns domains affected by operation.
@rtype: list
"""
if self.domains is None:
self.domains = list(set(self.source.get_domains() + self.target.get_domains())) # depends on [control=['if'], data=[]]
return self.domains |
def transform_to_mods_mono(marc_xml, uuid, url):
"""
Convert `marc_xml` to MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
"""
marc_xml = _read_content_or_path(marc_xml)
transformed = xslt_transformation(
marc_xml,
_absolute_template_path("MARC21slim2MODS3-4-NDK.xsl")
)
return _apply_postprocessing(
marc_xml=marc_xml,
xml=transformed,
func=mods_postprocessor.postprocess_monograph,
uuid=uuid,
url=url,
) | def function[transform_to_mods_mono, parameter[marc_xml, uuid, url]]:
constant[
Convert `marc_xml` to MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
]
variable[marc_xml] assign[=] call[name[_read_content_or_path], parameter[name[marc_xml]]]
variable[transformed] assign[=] call[name[xslt_transformation], parameter[name[marc_xml], call[name[_absolute_template_path], parameter[constant[MARC21slim2MODS3-4-NDK.xsl]]]]]
return[call[name[_apply_postprocessing], parameter[]]] | keyword[def] identifier[transform_to_mods_mono] ( identifier[marc_xml] , identifier[uuid] , identifier[url] ):
literal[string]
identifier[marc_xml] = identifier[_read_content_or_path] ( identifier[marc_xml] )
identifier[transformed] = identifier[xslt_transformation] (
identifier[marc_xml] ,
identifier[_absolute_template_path] ( literal[string] )
)
keyword[return] identifier[_apply_postprocessing] (
identifier[marc_xml] = identifier[marc_xml] ,
identifier[xml] = identifier[transformed] ,
identifier[func] = identifier[mods_postprocessor] . identifier[postprocess_monograph] ,
identifier[uuid] = identifier[uuid] ,
identifier[url] = identifier[url] ,
) | def transform_to_mods_mono(marc_xml, uuid, url):
"""
Convert `marc_xml` to MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
"""
marc_xml = _read_content_or_path(marc_xml)
transformed = xslt_transformation(marc_xml, _absolute_template_path('MARC21slim2MODS3-4-NDK.xsl'))
return _apply_postprocessing(marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_monograph, uuid=uuid, url=url) |
def _nested_transactional(fn):
# type: (Callable) -> Callable
"""In a transactional method create a nested transaction."""
@wraps(fn)
def wrapped(self, *args, **kwargs):
# type: (SessionFactory) -> Any
try:
rv = fn(self, *args, **kwargs)
except _TransactionalPolicyViolationError as e:
getattr(self, _TX_HOLDER_ATTRIBUTE).rollback()
rv = e.result
return rv
return wrapped | def function[_nested_transactional, parameter[fn]]:
constant[In a transactional method create a nested transaction.]
def function[wrapped, parameter[self]]:
<ast.Try object at 0x7da207f00370>
return[name[rv]]
return[name[wrapped]] | keyword[def] identifier[_nested_transactional] ( identifier[fn] ):
literal[string]
@ identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapped] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
identifier[rv] = identifier[fn] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[_TransactionalPolicyViolationError] keyword[as] identifier[e] :
identifier[getattr] ( identifier[self] , identifier[_TX_HOLDER_ATTRIBUTE] ). identifier[rollback] ()
identifier[rv] = identifier[e] . identifier[result]
keyword[return] identifier[rv]
keyword[return] identifier[wrapped] | def _nested_transactional(fn):
# type: (Callable) -> Callable
'In a transactional method create a nested transaction.'
@wraps(fn)
def wrapped(self, *args, **kwargs):
# type: (SessionFactory) -> Any
try:
rv = fn(self, *args, **kwargs) # depends on [control=['try'], data=[]]
except _TransactionalPolicyViolationError as e:
getattr(self, _TX_HOLDER_ATTRIBUTE).rollback()
rv = e.result # depends on [control=['except'], data=['e']]
return rv
return wrapped |
def page_menu(context, token):
"""
Return a list of child pages for the given parent, storing all
pages in a dict in the context when first called using parents as keys
for retrieval on subsequent recursive calls from the menu template.
"""
# First arg could be the menu template file name, or the parent page.
# Also allow for both to be used.
template_name = None
parent_page = None
parts = token.split_contents()[1:]
for part in parts:
part = Variable(part).resolve(context)
if isinstance(part, str):
template_name = part
elif isinstance(part, Page):
parent_page = part
if template_name is None:
try:
template_name = context["menu_template_name"]
except KeyError:
error = "No template found for page_menu in: %s" % parts
raise TemplateSyntaxError(error)
context["menu_template_name"] = template_name
if "menu_pages" not in context:
try:
user = context["request"].user
slug = context["request"].path
except KeyError:
user = None
slug = ""
num_children = lambda id: lambda: len(context["menu_pages"][id])
has_children = lambda id: lambda: num_children(id)() > 0
rel = [m.__name__.lower()
for m in Page.get_content_models()
if not m._meta.proxy]
published = Page.objects.published(for_user=user).select_related(*rel)
# Store the current page being viewed in the context. Used
# for comparisons in page.set_menu_helpers.
if "page" not in context:
try:
context.dicts[0]["_current_page"] = published.exclude(
content_model="link").get(slug=slug)
except Page.DoesNotExist:
context.dicts[0]["_current_page"] = None
elif slug:
context.dicts[0]["_current_page"] = context["page"]
# Some homepage related context flags. on_home is just a helper
# indicated we're on the homepage. has_home indicates an actual
# page object exists for the homepage, which can be used to
# determine whether or not to show a hard-coded homepage link
# in the page menu.
home = home_slug()
context.dicts[0]["on_home"] = slug == home
context.dicts[0]["has_home"] = False
# Maintain a dict of page IDs -> parent IDs for fast
# lookup in setting page.is_current_or_ascendant in
# page.set_menu_helpers.
context.dicts[0]["_parent_page_ids"] = {}
pages = defaultdict(list)
for page in published.order_by("_order"):
page.set_helpers(context)
context["_parent_page_ids"][page.id] = page.parent_id
setattr(page, "num_children", num_children(page.id))
setattr(page, "has_children", has_children(page.id))
pages[page.parent_id].append(page)
if page.slug == home:
context.dicts[0]["has_home"] = True
# Include menu_pages in all contexts, not only in the
# block being rendered.
context.dicts[0]["menu_pages"] = pages
# ``branch_level`` must be stored against each page so that the
# calculation of it is correctly applied. This looks weird but if we do
# the ``branch_level`` as a separate arg to the template tag with the
# addition performed on it, the addition occurs each time the template
# tag is called rather than once per level.
context["branch_level"] = 0
parent_page_id = None
if parent_page is not None:
context["branch_level"] = getattr(parent_page, "branch_level", 0) + 1
parent_page_id = parent_page.id
# Build the ``page_branch`` template variable, which is the list of
# pages for the current parent. Here we also assign the attributes
# to the page object that determines whether it belongs in the
# current menu template being rendered.
context["page_branch"] = context["menu_pages"].get(parent_page_id, [])
context["page_branch_in_menu"] = False
for page in context["page_branch"]:
page.in_menu = page.in_menu_template(template_name)
page.num_children_in_menu = 0
if page.in_menu:
context["page_branch_in_menu"] = True
for child in context["menu_pages"].get(page.id, []):
if child.in_menu_template(template_name):
page.num_children_in_menu += 1
page.has_children_in_menu = page.num_children_in_menu > 0
page.branch_level = context["branch_level"]
page.parent = parent_page
context["parent_page"] = page.parent
# Prior to pages having the ``in_menus`` field, pages had two
# boolean fields ``in_navigation`` and ``in_footer`` for
# controlling menu inclusion. Attributes and variables
# simulating these are maintained here for backwards
# compatibility in templates, but will be removed eventually.
page.in_navigation = page.in_menu
page.in_footer = not (not page.in_menu and "footer" in template_name)
if page.in_navigation:
context["page_branch_in_navigation"] = True
if page.in_footer:
context["page_branch_in_footer"] = True
t = get_template(template_name)
return t.render(Context(context)) | def function[page_menu, parameter[context, token]]:
constant[
Return a list of child pages for the given parent, storing all
pages in a dict in the context when first called using parents as keys
for retrieval on subsequent recursive calls from the menu template.
]
variable[template_name] assign[=] constant[None]
variable[parent_page] assign[=] constant[None]
variable[parts] assign[=] call[call[name[token].split_contents, parameter[]]][<ast.Slice object at 0x7da18f723310>]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[part] assign[=] call[call[name[Variable], parameter[name[part]]].resolve, parameter[name[context]]]
if call[name[isinstance], parameter[name[part], name[str]]] begin[:]
variable[template_name] assign[=] name[part]
if compare[name[template_name] is constant[None]] begin[:]
<ast.Try object at 0x7da20c7c9990>
call[name[context]][constant[menu_template_name]] assign[=] name[template_name]
if compare[constant[menu_pages] <ast.NotIn object at 0x7da2590d7190> name[context]] begin[:]
<ast.Try object at 0x7da20c7cbc40>
variable[num_children] assign[=] <ast.Lambda object at 0x7da20c7c8940>
variable[has_children] assign[=] <ast.Lambda object at 0x7da20c6c5b40>
variable[rel] assign[=] <ast.ListComp object at 0x7da20e960130>
variable[published] assign[=] call[call[name[Page].objects.published, parameter[]].select_related, parameter[<ast.Starred object at 0x7da18bcc9bd0>]]
if compare[constant[page] <ast.NotIn object at 0x7da2590d7190> name[context]] begin[:]
<ast.Try object at 0x7da18bcca650>
variable[home] assign[=] call[name[home_slug], parameter[]]
call[call[name[context].dicts][constant[0]]][constant[on_home]] assign[=] compare[name[slug] equal[==] name[home]]
call[call[name[context].dicts][constant[0]]][constant[has_home]] assign[=] constant[False]
call[call[name[context].dicts][constant[0]]][constant[_parent_page_ids]] assign[=] dictionary[[], []]
variable[pages] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[page]] in starred[call[name[published].order_by, parameter[constant[_order]]]] begin[:]
call[name[page].set_helpers, parameter[name[context]]]
call[call[name[context]][constant[_parent_page_ids]]][name[page].id] assign[=] name[page].parent_id
call[name[setattr], parameter[name[page], constant[num_children], call[name[num_children], parameter[name[page].id]]]]
call[name[setattr], parameter[name[page], constant[has_children], call[name[has_children], parameter[name[page].id]]]]
call[call[name[pages]][name[page].parent_id].append, parameter[name[page]]]
if compare[name[page].slug equal[==] name[home]] begin[:]
call[call[name[context].dicts][constant[0]]][constant[has_home]] assign[=] constant[True]
call[call[name[context].dicts][constant[0]]][constant[menu_pages]] assign[=] name[pages]
call[name[context]][constant[branch_level]] assign[=] constant[0]
variable[parent_page_id] assign[=] constant[None]
if compare[name[parent_page] is_not constant[None]] begin[:]
call[name[context]][constant[branch_level]] assign[=] binary_operation[call[name[getattr], parameter[name[parent_page], constant[branch_level], constant[0]]] + constant[1]]
variable[parent_page_id] assign[=] name[parent_page].id
call[name[context]][constant[page_branch]] assign[=] call[call[name[context]][constant[menu_pages]].get, parameter[name[parent_page_id], list[[]]]]
call[name[context]][constant[page_branch_in_menu]] assign[=] constant[False]
for taget[name[page]] in starred[call[name[context]][constant[page_branch]]] begin[:]
name[page].in_menu assign[=] call[name[page].in_menu_template, parameter[name[template_name]]]
name[page].num_children_in_menu assign[=] constant[0]
if name[page].in_menu begin[:]
call[name[context]][constant[page_branch_in_menu]] assign[=] constant[True]
for taget[name[child]] in starred[call[call[name[context]][constant[menu_pages]].get, parameter[name[page].id, list[[]]]]] begin[:]
if call[name[child].in_menu_template, parameter[name[template_name]]] begin[:]
<ast.AugAssign object at 0x7da1b2347940>
name[page].has_children_in_menu assign[=] compare[name[page].num_children_in_menu greater[>] constant[0]]
name[page].branch_level assign[=] call[name[context]][constant[branch_level]]
name[page].parent assign[=] name[parent_page]
call[name[context]][constant[parent_page]] assign[=] name[page].parent
name[page].in_navigation assign[=] name[page].in_menu
name[page].in_footer assign[=] <ast.UnaryOp object at 0x7da1b23476d0>
if name[page].in_navigation begin[:]
call[name[context]][constant[page_branch_in_navigation]] assign[=] constant[True]
if name[page].in_footer begin[:]
call[name[context]][constant[page_branch_in_footer]] assign[=] constant[True]
variable[t] assign[=] call[name[get_template], parameter[name[template_name]]]
return[call[name[t].render, parameter[call[name[Context], parameter[name[context]]]]]] | keyword[def] identifier[page_menu] ( identifier[context] , identifier[token] ):
literal[string]
identifier[template_name] = keyword[None]
identifier[parent_page] = keyword[None]
identifier[parts] = identifier[token] . identifier[split_contents] ()[ literal[int] :]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[part] = identifier[Variable] ( identifier[part] ). identifier[resolve] ( identifier[context] )
keyword[if] identifier[isinstance] ( identifier[part] , identifier[str] ):
identifier[template_name] = identifier[part]
keyword[elif] identifier[isinstance] ( identifier[part] , identifier[Page] ):
identifier[parent_page] = identifier[part]
keyword[if] identifier[template_name] keyword[is] keyword[None] :
keyword[try] :
identifier[template_name] = identifier[context] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[error] = literal[string] % identifier[parts]
keyword[raise] identifier[TemplateSyntaxError] ( identifier[error] )
identifier[context] [ literal[string] ]= identifier[template_name]
keyword[if] literal[string] keyword[not] keyword[in] identifier[context] :
keyword[try] :
identifier[user] = identifier[context] [ literal[string] ]. identifier[user]
identifier[slug] = identifier[context] [ literal[string] ]. identifier[path]
keyword[except] identifier[KeyError] :
identifier[user] = keyword[None]
identifier[slug] = literal[string]
identifier[num_children] = keyword[lambda] identifier[id] : keyword[lambda] : identifier[len] ( identifier[context] [ literal[string] ][ identifier[id] ])
identifier[has_children] = keyword[lambda] identifier[id] : keyword[lambda] : identifier[num_children] ( identifier[id] )()> literal[int]
identifier[rel] =[ identifier[m] . identifier[__name__] . identifier[lower] ()
keyword[for] identifier[m] keyword[in] identifier[Page] . identifier[get_content_models] ()
keyword[if] keyword[not] identifier[m] . identifier[_meta] . identifier[proxy] ]
identifier[published] = identifier[Page] . identifier[objects] . identifier[published] ( identifier[for_user] = identifier[user] ). identifier[select_related] (* identifier[rel] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[context] :
keyword[try] :
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= identifier[published] . identifier[exclude] (
identifier[content_model] = literal[string] ). identifier[get] ( identifier[slug] = identifier[slug] )
keyword[except] identifier[Page] . identifier[DoesNotExist] :
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= keyword[None]
keyword[elif] identifier[slug] :
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= identifier[context] [ literal[string] ]
identifier[home] = identifier[home_slug] ()
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= identifier[slug] == identifier[home]
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= keyword[False]
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]={}
identifier[pages] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[page] keyword[in] identifier[published] . identifier[order_by] ( literal[string] ):
identifier[page] . identifier[set_helpers] ( identifier[context] )
identifier[context] [ literal[string] ][ identifier[page] . identifier[id] ]= identifier[page] . identifier[parent_id]
identifier[setattr] ( identifier[page] , literal[string] , identifier[num_children] ( identifier[page] . identifier[id] ))
identifier[setattr] ( identifier[page] , literal[string] , identifier[has_children] ( identifier[page] . identifier[id] ))
identifier[pages] [ identifier[page] . identifier[parent_id] ]. identifier[append] ( identifier[page] )
keyword[if] identifier[page] . identifier[slug] == identifier[home] :
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= keyword[True]
identifier[context] . identifier[dicts] [ literal[int] ][ literal[string] ]= identifier[pages]
identifier[context] [ literal[string] ]= literal[int]
identifier[parent_page_id] = keyword[None]
keyword[if] identifier[parent_page] keyword[is] keyword[not] keyword[None] :
identifier[context] [ literal[string] ]= identifier[getattr] ( identifier[parent_page] , literal[string] , literal[int] )+ literal[int]
identifier[parent_page_id] = identifier[parent_page] . identifier[id]
identifier[context] [ literal[string] ]= identifier[context] [ literal[string] ]. identifier[get] ( identifier[parent_page_id] ,[])
identifier[context] [ literal[string] ]= keyword[False]
keyword[for] identifier[page] keyword[in] identifier[context] [ literal[string] ]:
identifier[page] . identifier[in_menu] = identifier[page] . identifier[in_menu_template] ( identifier[template_name] )
identifier[page] . identifier[num_children_in_menu] = literal[int]
keyword[if] identifier[page] . identifier[in_menu] :
identifier[context] [ literal[string] ]= keyword[True]
keyword[for] identifier[child] keyword[in] identifier[context] [ literal[string] ]. identifier[get] ( identifier[page] . identifier[id] ,[]):
keyword[if] identifier[child] . identifier[in_menu_template] ( identifier[template_name] ):
identifier[page] . identifier[num_children_in_menu] += literal[int]
identifier[page] . identifier[has_children_in_menu] = identifier[page] . identifier[num_children_in_menu] > literal[int]
identifier[page] . identifier[branch_level] = identifier[context] [ literal[string] ]
identifier[page] . identifier[parent] = identifier[parent_page]
identifier[context] [ literal[string] ]= identifier[page] . identifier[parent]
identifier[page] . identifier[in_navigation] = identifier[page] . identifier[in_menu]
identifier[page] . identifier[in_footer] = keyword[not] ( keyword[not] identifier[page] . identifier[in_menu] keyword[and] literal[string] keyword[in] identifier[template_name] )
keyword[if] identifier[page] . identifier[in_navigation] :
identifier[context] [ literal[string] ]= keyword[True]
keyword[if] identifier[page] . identifier[in_footer] :
identifier[context] [ literal[string] ]= keyword[True]
identifier[t] = identifier[get_template] ( identifier[template_name] )
keyword[return] identifier[t] . identifier[render] ( identifier[Context] ( identifier[context] )) | def page_menu(context, token):
"""
Return a list of child pages for the given parent, storing all
pages in a dict in the context when first called using parents as keys
for retrieval on subsequent recursive calls from the menu template.
"""
# First arg could be the menu template file name, or the parent page.
# Also allow for both to be used.
template_name = None
parent_page = None
parts = token.split_contents()[1:]
for part in parts:
part = Variable(part).resolve(context)
if isinstance(part, str):
template_name = part # depends on [control=['if'], data=[]]
elif isinstance(part, Page):
parent_page = part # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']]
if template_name is None:
try:
template_name = context['menu_template_name'] # depends on [control=['try'], data=[]]
except KeyError:
error = 'No template found for page_menu in: %s' % parts
raise TemplateSyntaxError(error) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['template_name']]
context['menu_template_name'] = template_name
if 'menu_pages' not in context:
try:
user = context['request'].user
slug = context['request'].path # depends on [control=['try'], data=[]]
except KeyError:
user = None
slug = '' # depends on [control=['except'], data=[]]
num_children = lambda id: lambda : len(context['menu_pages'][id])
has_children = lambda id: lambda : num_children(id)() > 0
rel = [m.__name__.lower() for m in Page.get_content_models() if not m._meta.proxy]
published = Page.objects.published(for_user=user).select_related(*rel)
# Store the current page being viewed in the context. Used
# for comparisons in page.set_menu_helpers.
if 'page' not in context:
try:
context.dicts[0]['_current_page'] = published.exclude(content_model='link').get(slug=slug) # depends on [control=['try'], data=[]]
except Page.DoesNotExist:
context.dicts[0]['_current_page'] = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['context']]
elif slug:
context.dicts[0]['_current_page'] = context['page'] # depends on [control=['if'], data=[]]
# Some homepage related context flags. on_home is just a helper
# indicated we're on the homepage. has_home indicates an actual
# page object exists for the homepage, which can be used to
# determine whether or not to show a hard-coded homepage link
# in the page menu.
home = home_slug()
context.dicts[0]['on_home'] = slug == home
context.dicts[0]['has_home'] = False
# Maintain a dict of page IDs -> parent IDs for fast
# lookup in setting page.is_current_or_ascendant in
# page.set_menu_helpers.
context.dicts[0]['_parent_page_ids'] = {}
pages = defaultdict(list)
for page in published.order_by('_order'):
page.set_helpers(context)
context['_parent_page_ids'][page.id] = page.parent_id
setattr(page, 'num_children', num_children(page.id))
setattr(page, 'has_children', has_children(page.id))
pages[page.parent_id].append(page)
if page.slug == home:
context.dicts[0]['has_home'] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['page']]
# Include menu_pages in all contexts, not only in the
# block being rendered.
context.dicts[0]['menu_pages'] = pages # depends on [control=['if'], data=['context']]
# ``branch_level`` must be stored against each page so that the
# calculation of it is correctly applied. This looks weird but if we do
# the ``branch_level`` as a separate arg to the template tag with the
# addition performed on it, the addition occurs each time the template
# tag is called rather than once per level.
context['branch_level'] = 0
parent_page_id = None
if parent_page is not None:
context['branch_level'] = getattr(parent_page, 'branch_level', 0) + 1
parent_page_id = parent_page.id # depends on [control=['if'], data=['parent_page']]
# Build the ``page_branch`` template variable, which is the list of
# pages for the current parent. Here we also assign the attributes
# to the page object that determines whether it belongs in the
# current menu template being rendered.
context['page_branch'] = context['menu_pages'].get(parent_page_id, [])
context['page_branch_in_menu'] = False
for page in context['page_branch']:
page.in_menu = page.in_menu_template(template_name)
page.num_children_in_menu = 0
if page.in_menu:
context['page_branch_in_menu'] = True # depends on [control=['if'], data=[]]
for child in context['menu_pages'].get(page.id, []):
if child.in_menu_template(template_name):
page.num_children_in_menu += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
page.has_children_in_menu = page.num_children_in_menu > 0
page.branch_level = context['branch_level']
page.parent = parent_page
context['parent_page'] = page.parent
# Prior to pages having the ``in_menus`` field, pages had two
# boolean fields ``in_navigation`` and ``in_footer`` for
# controlling menu inclusion. Attributes and variables
# simulating these are maintained here for backwards
# compatibility in templates, but will be removed eventually.
page.in_navigation = page.in_menu
page.in_footer = not (not page.in_menu and 'footer' in template_name)
if page.in_navigation:
context['page_branch_in_navigation'] = True # depends on [control=['if'], data=[]]
if page.in_footer:
context['page_branch_in_footer'] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['page']]
t = get_template(template_name)
return t.render(Context(context)) |
def kmean_clustering(network, n_clusters=10, load_cluster=False,
line_length_factor=1.25,
remove_stubs=False, use_reduced_coordinates=False,
bus_weight_tocsv=None, bus_weight_fromcsv=None,
n_init=10, max_iter=300, tol=1e-4,
n_jobs=1):
""" Main function of the k-mean clustering approach. Maps an original
network to a new one with adjustable number of nodes and new coordinates.
Parameters
----------
network : :class:`pypsa.Network
Container for all network components.
n_clusters : int
Desired number of clusters.
load_cluster : boolean
Loads cluster coordinates from a former calculation.
line_length_factor : float
Factor to multiply the crow-flies distance between new buses in order
to get new line lengths.
remove_stubs: boolean
Removes stubs and stubby trees (i.e. sequentially reducing dead-ends).
use_reduced_coordinates: boolean
If True, do not average cluster coordinates, but take from busmap.
bus_weight_tocsv : str
Creates a bus weighting based on conventional generation and load
and save it to a csv file.
bus_weight_fromcsv : str
Loads a bus weighting from a csv file to apply it to the clustering
algorithm.
Returns
-------
network : pypsa.Network object
Container for all network components.
"""
def weighting_for_scenario(x, save=None):
"""
"""
b_i = x.index
g = normed(gen.reindex(b_i, fill_value=0))
l = normed(load.reindex(b_i, fill_value=0))
w = g + l
weight = ((w * (100000. / w.max())).astype(int)
).reindex(network.buses.index, fill_value=1)
if save:
weight.to_csv(save)
return weight
def normed(x):
return (x / x.sum()).fillna(0.)
print('start k-mean clustering')
# prepare k-mean
# k-means clustering (first try)
network.generators.control = "PV"
network.storage_units.control[network.storage_units.carrier == \
'extendable_storage'] = "PV"
network.buses['v_nom'] = 380.
# problem our lines have no v_nom. this is implicitly defined by the
# connected buses:
network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom)
# adjust the x of the lines which are not 380.
lines_v_nom_b = network.lines.v_nom != 380
network.lines.loc[lines_v_nom_b, 'x'] *= \
(380. / network.lines.loc[lines_v_nom_b, 'v_nom'])**2
network.lines.loc[lines_v_nom_b, 'v_nom'] = 380.
trafo_index = network.transformers.index
transformer_voltages = \
pd.concat([network.transformers.bus0.map(network.buses.v_nom),
network.transformers.bus1.map(network.buses.v_nom)], axis=1)
network.import_components_from_dataframe(
network.transformers.loc[:, [
'bus0', 'bus1', 'x', 's_nom', 'capital_cost', 'sub_network', 's_nom_total']]
.assign(x=network.transformers.x * (380. /
transformer_voltages.max(axis=1))**2, length = 1)
.set_index('T' + trafo_index),
'Line')
network.transformers.drop(trafo_index, inplace=True)
for attr in network.transformers_t:
network.transformers_t[attr] = network.transformers_t[attr]\
.reindex(columns=[])
# remove stubs
if remove_stubs:
network.determine_network_topology()
busmap = busmap_by_stubs(network)
network.generators['weight'] = network.generators['p_nom']
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
# reset coordinates to the new reduced guys, rather than taking an
# average (copied from pypsa.networkclustering)
if use_reduced_coordinates:
# TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS,
# i.e. network is changed in place!!
network.buses.loc[busmap.index, ['x', 'y']
] = network.buses.loc[busmap, ['x', 'y']].values
clustering = get_clustering_from_busmap(
network,
busmap,
aggregate_generators_weighted=True,
aggregate_one_ports=aggregate_one_ports,
line_length_factor=line_length_factor)
network = clustering.network
# define weighting based on conventional 'old' generator spatial
# distribution
non_conv_types = {
'biomass',
'wind_onshore',
'wind_offshore',
'solar',
'geothermal',
'load shedding',
'extendable_storage'}
# Attention: network.generators.carrier.unique()
gen = (network.generators.loc[(network.generators.carrier
.isin(non_conv_types) == False)]
.groupby('bus').p_nom.sum()
.reindex(network.buses.index, fill_value=0.) +
network.storage_units
.loc[(network.storage_units.carrier
.isin(non_conv_types) == False)]
.groupby('bus').p_nom.sum()
.reindex(network.buses.index, fill_value=0.))
load = network.loads_t.p_set.mean().groupby(network.loads.bus).sum()
# k-mean clustering
# busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(np.repeat(1,
# len(network.buses)), index=network.buses.index) , n_clusters= 10)
# State whether to create a bus weighting and save it, create or not save
# it, or use a bus weighting from a csv file
if bus_weight_tocsv is not None:
weight = weighting_for_scenario(x=network.buses, save=bus_weight_tocsv)
elif bus_weight_fromcsv is not None:
weight = pd.Series.from_csv(bus_weight_fromcsv)
weight.index = weight.index.astype(str)
else:
weight = weighting_for_scenario(x=network.buses, save=False)
busmap = busmap_by_kmeans(
network,
bus_weightings=pd.Series(weight),
n_clusters=n_clusters,
load_cluster=load_cluster,
n_init=n_init,
max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
# ToDo change function in order to use bus_strategies or similar
network.generators['weight'] = network.generators['p_nom']
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
clustering = get_clustering_from_busmap(
network,
busmap,
aggregate_generators_weighted=True,
aggregate_one_ports=aggregate_one_ports)
return clustering | def function[kmean_clustering, parameter[network, n_clusters, load_cluster, line_length_factor, remove_stubs, use_reduced_coordinates, bus_weight_tocsv, bus_weight_fromcsv, n_init, max_iter, tol, n_jobs]]:
constant[ Main function of the k-mean clustering approach. Maps an original
network to a new one with adjustable number of nodes and new coordinates.
Parameters
----------
network : :class:`pypsa.Network
Container for all network components.
n_clusters : int
Desired number of clusters.
load_cluster : boolean
Loads cluster coordinates from a former calculation.
line_length_factor : float
Factor to multiply the crow-flies distance between new buses in order
to get new line lengths.
remove_stubs: boolean
Removes stubs and stubby trees (i.e. sequentially reducing dead-ends).
use_reduced_coordinates: boolean
If True, do not average cluster coordinates, but take from busmap.
bus_weight_tocsv : str
Creates a bus weighting based on conventional generation and load
and save it to a csv file.
bus_weight_fromcsv : str
Loads a bus weighting from a csv file to apply it to the clustering
algorithm.
Returns
-------
network : pypsa.Network object
Container for all network components.
]
def function[weighting_for_scenario, parameter[x, save]]:
constant[
]
variable[b_i] assign[=] name[x].index
variable[g] assign[=] call[name[normed], parameter[call[name[gen].reindex, parameter[name[b_i]]]]]
variable[l] assign[=] call[name[normed], parameter[call[name[load].reindex, parameter[name[b_i]]]]]
variable[w] assign[=] binary_operation[name[g] + name[l]]
variable[weight] assign[=] call[call[binary_operation[name[w] * binary_operation[constant[100000.0] / call[name[w].max, parameter[]]]].astype, parameter[name[int]]].reindex, parameter[name[network].buses.index]]
if name[save] begin[:]
call[name[weight].to_csv, parameter[name[save]]]
return[name[weight]]
def function[normed, parameter[x]]:
return[call[binary_operation[name[x] / call[name[x].sum, parameter[]]].fillna, parameter[constant[0.0]]]]
call[name[print], parameter[constant[start k-mean clustering]]]
name[network].generators.control assign[=] constant[PV]
call[name[network].storage_units.control][compare[name[network].storage_units.carrier equal[==] constant[extendable_storage]]] assign[=] constant[PV]
call[name[network].buses][constant[v_nom]] assign[=] constant[380.0]
call[name[network].lines][constant[v_nom]] assign[=] call[name[network].lines.bus0.map, parameter[name[network].buses.v_nom]]
variable[lines_v_nom_b] assign[=] compare[name[network].lines.v_nom not_equal[!=] constant[380]]
<ast.AugAssign object at 0x7da1b1b9c9a0>
call[name[network].lines.loc][tuple[[<ast.Name object at 0x7da1b1b9cac0>, <ast.Constant object at 0x7da1b1b9dbd0>]]] assign[=] constant[380.0]
variable[trafo_index] assign[=] name[network].transformers.index
variable[transformer_voltages] assign[=] call[name[pd].concat, parameter[list[[<ast.Call object at 0x7da1b1c7f340>, <ast.Call object at 0x7da1b1c7d9c0>]]]]
call[name[network].import_components_from_dataframe, parameter[call[call[call[name[network].transformers.loc][tuple[[<ast.Slice object at 0x7da1b1c7c910>, <ast.List object at 0x7da1b1c7f2e0>]]].assign, parameter[]].set_index, parameter[binary_operation[constant[T] + name[trafo_index]]]], constant[Line]]]
call[name[network].transformers.drop, parameter[name[trafo_index]]]
for taget[name[attr]] in starred[name[network].transformers_t] begin[:]
call[name[network].transformers_t][name[attr]] assign[=] call[call[name[network].transformers_t][name[attr]].reindex, parameter[]]
if name[remove_stubs] begin[:]
call[name[network].determine_network_topology, parameter[]]
variable[busmap] assign[=] call[name[busmap_by_stubs], parameter[name[network]]]
call[name[network].generators][constant[weight]] assign[=] call[name[network].generators][constant[p_nom]]
variable[aggregate_one_ports] assign[=] call[name[components].one_port_components.copy, parameter[]]
call[name[aggregate_one_ports].discard, parameter[constant[Generator]]]
if name[use_reduced_coordinates] begin[:]
call[name[network].buses.loc][tuple[[<ast.Attribute object at 0x7da1b1c7fd60>, <ast.List object at 0x7da1b1c7fc70>]]] assign[=] call[name[network].buses.loc][tuple[[<ast.Name object at 0x7da1b1c7ef50>, <ast.List object at 0x7da1b1c7e4a0>]]].values
variable[clustering] assign[=] call[name[get_clustering_from_busmap], parameter[name[network], name[busmap]]]
variable[network] assign[=] name[clustering].network
variable[non_conv_types] assign[=] <ast.Set object at 0x7da1b1a8e320>
variable[gen] assign[=] binary_operation[call[call[call[call[name[network].generators.loc][compare[call[name[network].generators.carrier.isin, parameter[name[non_conv_types]]] equal[==] constant[False]]].groupby, parameter[constant[bus]]].p_nom.sum, parameter[]].reindex, parameter[name[network].buses.index]] + call[call[call[call[name[network].storage_units.loc][compare[call[name[network].storage_units.carrier.isin, parameter[name[non_conv_types]]] equal[==] constant[False]]].groupby, parameter[constant[bus]]].p_nom.sum, parameter[]].reindex, parameter[name[network].buses.index]]]
variable[load] assign[=] call[call[call[name[network].loads_t.p_set.mean, parameter[]].groupby, parameter[name[network].loads.bus]].sum, parameter[]]
if compare[name[bus_weight_tocsv] is_not constant[None]] begin[:]
variable[weight] assign[=] call[name[weighting_for_scenario], parameter[]]
variable[busmap] assign[=] call[name[busmap_by_kmeans], parameter[name[network]]]
call[name[network].generators][constant[weight]] assign[=] call[name[network].generators][constant[p_nom]]
variable[aggregate_one_ports] assign[=] call[name[components].one_port_components.copy, parameter[]]
call[name[aggregate_one_ports].discard, parameter[constant[Generator]]]
variable[clustering] assign[=] call[name[get_clustering_from_busmap], parameter[name[network], name[busmap]]]
return[name[clustering]] | keyword[def] identifier[kmean_clustering] ( identifier[network] , identifier[n_clusters] = literal[int] , identifier[load_cluster] = keyword[False] ,
identifier[line_length_factor] = literal[int] ,
identifier[remove_stubs] = keyword[False] , identifier[use_reduced_coordinates] = keyword[False] ,
identifier[bus_weight_tocsv] = keyword[None] , identifier[bus_weight_fromcsv] = keyword[None] ,
identifier[n_init] = literal[int] , identifier[max_iter] = literal[int] , identifier[tol] = literal[int] ,
identifier[n_jobs] = literal[int] ):
literal[string]
keyword[def] identifier[weighting_for_scenario] ( identifier[x] , identifier[save] = keyword[None] ):
literal[string]
identifier[b_i] = identifier[x] . identifier[index]
identifier[g] = identifier[normed] ( identifier[gen] . identifier[reindex] ( identifier[b_i] , identifier[fill_value] = literal[int] ))
identifier[l] = identifier[normed] ( identifier[load] . identifier[reindex] ( identifier[b_i] , identifier[fill_value] = literal[int] ))
identifier[w] = identifier[g] + identifier[l]
identifier[weight] =(( identifier[w] *( literal[int] / identifier[w] . identifier[max] ())). identifier[astype] ( identifier[int] )
). identifier[reindex] ( identifier[network] . identifier[buses] . identifier[index] , identifier[fill_value] = literal[int] )
keyword[if] identifier[save] :
identifier[weight] . identifier[to_csv] ( identifier[save] )
keyword[return] identifier[weight]
keyword[def] identifier[normed] ( identifier[x] ):
keyword[return] ( identifier[x] / identifier[x] . identifier[sum] ()). identifier[fillna] ( literal[int] )
identifier[print] ( literal[string] )
identifier[network] . identifier[generators] . identifier[control] = literal[string]
identifier[network] . identifier[storage_units] . identifier[control] [ identifier[network] . identifier[storage_units] . identifier[carrier] == literal[string] ]= literal[string]
identifier[network] . identifier[buses] [ literal[string] ]= literal[int]
identifier[network] . identifier[lines] [ literal[string] ]= identifier[network] . identifier[lines] . identifier[bus0] . identifier[map] ( identifier[network] . identifier[buses] . identifier[v_nom] )
identifier[lines_v_nom_b] = identifier[network] . identifier[lines] . identifier[v_nom] != literal[int]
identifier[network] . identifier[lines] . identifier[loc] [ identifier[lines_v_nom_b] , literal[string] ]*=( literal[int] / identifier[network] . identifier[lines] . identifier[loc] [ identifier[lines_v_nom_b] , literal[string] ])** literal[int]
identifier[network] . identifier[lines] . identifier[loc] [ identifier[lines_v_nom_b] , literal[string] ]= literal[int]
identifier[trafo_index] = identifier[network] . identifier[transformers] . identifier[index]
identifier[transformer_voltages] = identifier[pd] . identifier[concat] ([ identifier[network] . identifier[transformers] . identifier[bus0] . identifier[map] ( identifier[network] . identifier[buses] . identifier[v_nom] ),
identifier[network] . identifier[transformers] . identifier[bus1] . identifier[map] ( identifier[network] . identifier[buses] . identifier[v_nom] )], identifier[axis] = literal[int] )
identifier[network] . identifier[import_components_from_dataframe] (
identifier[network] . identifier[transformers] . identifier[loc] [:,[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]]
. identifier[assign] ( identifier[x] = identifier[network] . identifier[transformers] . identifier[x] *( literal[int] /
identifier[transformer_voltages] . identifier[max] ( identifier[axis] = literal[int] ))** literal[int] , identifier[length] = literal[int] )
. identifier[set_index] ( literal[string] + identifier[trafo_index] ),
literal[string] )
identifier[network] . identifier[transformers] . identifier[drop] ( identifier[trafo_index] , identifier[inplace] = keyword[True] )
keyword[for] identifier[attr] keyword[in] identifier[network] . identifier[transformers_t] :
identifier[network] . identifier[transformers_t] [ identifier[attr] ]= identifier[network] . identifier[transformers_t] [ identifier[attr] ]. identifier[reindex] ( identifier[columns] =[])
keyword[if] identifier[remove_stubs] :
identifier[network] . identifier[determine_network_topology] ()
identifier[busmap] = identifier[busmap_by_stubs] ( identifier[network] )
identifier[network] . identifier[generators] [ literal[string] ]= identifier[network] . identifier[generators] [ literal[string] ]
identifier[aggregate_one_ports] = identifier[components] . identifier[one_port_components] . identifier[copy] ()
identifier[aggregate_one_ports] . identifier[discard] ( literal[string] )
keyword[if] identifier[use_reduced_coordinates] :
identifier[network] . identifier[buses] . identifier[loc] [ identifier[busmap] . identifier[index] ,[ literal[string] , literal[string] ]
]= identifier[network] . identifier[buses] . identifier[loc] [ identifier[busmap] ,[ literal[string] , literal[string] ]]. identifier[values]
identifier[clustering] = identifier[get_clustering_from_busmap] (
identifier[network] ,
identifier[busmap] ,
identifier[aggregate_generators_weighted] = keyword[True] ,
identifier[aggregate_one_ports] = identifier[aggregate_one_ports] ,
identifier[line_length_factor] = identifier[line_length_factor] )
identifier[network] = identifier[clustering] . identifier[network]
identifier[non_conv_types] ={
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] }
identifier[gen] =( identifier[network] . identifier[generators] . identifier[loc] [( identifier[network] . identifier[generators] . identifier[carrier]
. identifier[isin] ( identifier[non_conv_types] )== keyword[False] )]
. identifier[groupby] ( literal[string] ). identifier[p_nom] . identifier[sum] ()
. identifier[reindex] ( identifier[network] . identifier[buses] . identifier[index] , identifier[fill_value] = literal[int] )+
identifier[network] . identifier[storage_units]
. identifier[loc] [( identifier[network] . identifier[storage_units] . identifier[carrier]
. identifier[isin] ( identifier[non_conv_types] )== keyword[False] )]
. identifier[groupby] ( literal[string] ). identifier[p_nom] . identifier[sum] ()
. identifier[reindex] ( identifier[network] . identifier[buses] . identifier[index] , identifier[fill_value] = literal[int] ))
identifier[load] = identifier[network] . identifier[loads_t] . identifier[p_set] . identifier[mean] (). identifier[groupby] ( identifier[network] . identifier[loads] . identifier[bus] ). identifier[sum] ()
keyword[if] identifier[bus_weight_tocsv] keyword[is] keyword[not] keyword[None] :
identifier[weight] = identifier[weighting_for_scenario] ( identifier[x] = identifier[network] . identifier[buses] , identifier[save] = identifier[bus_weight_tocsv] )
keyword[elif] identifier[bus_weight_fromcsv] keyword[is] keyword[not] keyword[None] :
identifier[weight] = identifier[pd] . identifier[Series] . identifier[from_csv] ( identifier[bus_weight_fromcsv] )
identifier[weight] . identifier[index] = identifier[weight] . identifier[index] . identifier[astype] ( identifier[str] )
keyword[else] :
identifier[weight] = identifier[weighting_for_scenario] ( identifier[x] = identifier[network] . identifier[buses] , identifier[save] = keyword[False] )
identifier[busmap] = identifier[busmap_by_kmeans] (
identifier[network] ,
identifier[bus_weightings] = identifier[pd] . identifier[Series] ( identifier[weight] ),
identifier[n_clusters] = identifier[n_clusters] ,
identifier[load_cluster] = identifier[load_cluster] ,
identifier[n_init] = identifier[n_init] ,
identifier[max_iter] = identifier[max_iter] ,
identifier[tol] = identifier[tol] ,
identifier[n_jobs] = identifier[n_jobs] )
identifier[network] . identifier[generators] [ literal[string] ]= identifier[network] . identifier[generators] [ literal[string] ]
identifier[aggregate_one_ports] = identifier[components] . identifier[one_port_components] . identifier[copy] ()
identifier[aggregate_one_ports] . identifier[discard] ( literal[string] )
identifier[clustering] = identifier[get_clustering_from_busmap] (
identifier[network] ,
identifier[busmap] ,
identifier[aggregate_generators_weighted] = keyword[True] ,
identifier[aggregate_one_ports] = identifier[aggregate_one_ports] )
keyword[return] identifier[clustering] | def kmean_clustering(network, n_clusters=10, load_cluster=False, line_length_factor=1.25, remove_stubs=False, use_reduced_coordinates=False, bus_weight_tocsv=None, bus_weight_fromcsv=None, n_init=10, max_iter=300, tol=0.0001, n_jobs=1):
""" Main function of the k-mean clustering approach. Maps an original
network to a new one with adjustable number of nodes and new coordinates.
Parameters
----------
network : :class:`pypsa.Network
Container for all network components.
n_clusters : int
Desired number of clusters.
load_cluster : boolean
Loads cluster coordinates from a former calculation.
line_length_factor : float
Factor to multiply the crow-flies distance between new buses in order
to get new line lengths.
remove_stubs: boolean
Removes stubs and stubby trees (i.e. sequentially reducing dead-ends).
use_reduced_coordinates: boolean
If True, do not average cluster coordinates, but take from busmap.
bus_weight_tocsv : str
Creates a bus weighting based on conventional generation and load
and save it to a csv file.
bus_weight_fromcsv : str
Loads a bus weighting from a csv file to apply it to the clustering
algorithm.
Returns
-------
network : pypsa.Network object
Container for all network components.
"""
def weighting_for_scenario(x, save=None):
"""
"""
b_i = x.index
g = normed(gen.reindex(b_i, fill_value=0))
l = normed(load.reindex(b_i, fill_value=0))
w = g + l
weight = (w * (100000.0 / w.max())).astype(int).reindex(network.buses.index, fill_value=1)
if save:
weight.to_csv(save) # depends on [control=['if'], data=[]]
return weight
def normed(x):
return (x / x.sum()).fillna(0.0)
print('start k-mean clustering')
# prepare k-mean
# k-means clustering (first try)
network.generators.control = 'PV'
network.storage_units.control[network.storage_units.carrier == 'extendable_storage'] = 'PV'
network.buses['v_nom'] = 380.0
# problem our lines have no v_nom. this is implicitly defined by the
# connected buses:
network.lines['v_nom'] = network.lines.bus0.map(network.buses.v_nom)
# adjust the x of the lines which are not 380.
lines_v_nom_b = network.lines.v_nom != 380
network.lines.loc[lines_v_nom_b, 'x'] *= (380.0 / network.lines.loc[lines_v_nom_b, 'v_nom']) ** 2
network.lines.loc[lines_v_nom_b, 'v_nom'] = 380.0
trafo_index = network.transformers.index
transformer_voltages = pd.concat([network.transformers.bus0.map(network.buses.v_nom), network.transformers.bus1.map(network.buses.v_nom)], axis=1)
network.import_components_from_dataframe(network.transformers.loc[:, ['bus0', 'bus1', 'x', 's_nom', 'capital_cost', 'sub_network', 's_nom_total']].assign(x=network.transformers.x * (380.0 / transformer_voltages.max(axis=1)) ** 2, length=1).set_index('T' + trafo_index), 'Line')
network.transformers.drop(trafo_index, inplace=True)
for attr in network.transformers_t:
network.transformers_t[attr] = network.transformers_t[attr].reindex(columns=[]) # depends on [control=['for'], data=['attr']]
# remove stubs
if remove_stubs:
network.determine_network_topology()
busmap = busmap_by_stubs(network)
network.generators['weight'] = network.generators['p_nom']
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
# reset coordinates to the new reduced guys, rather than taking an
# average (copied from pypsa.networkclustering)
if use_reduced_coordinates:
# TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS,
# i.e. network is changed in place!!
network.buses.loc[busmap.index, ['x', 'y']] = network.buses.loc[busmap, ['x', 'y']].values # depends on [control=['if'], data=[]]
clustering = get_clustering_from_busmap(network, busmap, aggregate_generators_weighted=True, aggregate_one_ports=aggregate_one_ports, line_length_factor=line_length_factor)
network = clustering.network # depends on [control=['if'], data=[]]
# define weighting based on conventional 'old' generator spatial
# distribution
non_conv_types = {'biomass', 'wind_onshore', 'wind_offshore', 'solar', 'geothermal', 'load shedding', 'extendable_storage'}
# Attention: network.generators.carrier.unique()
gen = network.generators.loc[network.generators.carrier.isin(non_conv_types) == False].groupby('bus').p_nom.sum().reindex(network.buses.index, fill_value=0.0) + network.storage_units.loc[network.storage_units.carrier.isin(non_conv_types) == False].groupby('bus').p_nom.sum().reindex(network.buses.index, fill_value=0.0)
load = network.loads_t.p_set.mean().groupby(network.loads.bus).sum()
# k-mean clustering
# busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(np.repeat(1,
# len(network.buses)), index=network.buses.index) , n_clusters= 10)
# State whether to create a bus weighting and save it, create or not save
# it, or use a bus weighting from a csv file
if bus_weight_tocsv is not None:
weight = weighting_for_scenario(x=network.buses, save=bus_weight_tocsv) # depends on [control=['if'], data=['bus_weight_tocsv']]
elif bus_weight_fromcsv is not None:
weight = pd.Series.from_csv(bus_weight_fromcsv)
weight.index = weight.index.astype(str) # depends on [control=['if'], data=['bus_weight_fromcsv']]
else:
weight = weighting_for_scenario(x=network.buses, save=False)
busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(weight), n_clusters=n_clusters, load_cluster=load_cluster, n_init=n_init, max_iter=max_iter, tol=tol, n_jobs=n_jobs)
# ToDo change function in order to use bus_strategies or similar
network.generators['weight'] = network.generators['p_nom']
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
clustering = get_clustering_from_busmap(network, busmap, aggregate_generators_weighted=True, aggregate_one_ports=aggregate_one_ports)
return clustering |
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException as e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect() | def function[_send, parameter[self, data]]:
constant[
Insert the data
]
variable[data] assign[=] call[call[name[data].strip, parameter[]].split, parameter[constant[ ]]]
<ast.Try object at 0x7da207f9add0> | keyword[def] identifier[_send] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[try] :
identifier[cursor] = identifier[self] . identifier[conn] . identifier[cursor] ()
identifier[cursor] . identifier[execute] ( literal[string]
%( identifier[self] . identifier[table] , identifier[self] . identifier[col_metric] ,
identifier[self] . identifier[col_time] , identifier[self] . identifier[col_value] ),
( identifier[data] [ literal[int] ], identifier[data] [ literal[int] ], identifier[data] [ literal[int] ]))
identifier[cursor] . identifier[close] ()
identifier[self] . identifier[conn] . identifier[commit] ()
keyword[except] identifier[BaseException] keyword[as] identifier[e] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[e] )
identifier[self] . identifier[_connect] () | def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute('INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)' % (self.table, self.col_metric, self.col_time, self.col_value), (data[0], data[2], data[1]))
cursor.close()
self.conn.commit() # depends on [control=['try'], data=[]]
except BaseException as e:
# Log Error
self.log.error('MySQLHandler: Failed sending data. %s.', e)
# Attempt to restablish connection
self._connect() # depends on [control=['except'], data=['e']] |
def _fromJSON(cls, jsonobject):
"""Generates a new instance of :class:`maspy.core.Smi` from a decoded
JSON object (as generated by :func:`maspy.core.Smi._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`Smi`
"""
newInstance = cls(None, None)
attribDict = {}
attribDict['id'] = jsonobject[0]
attribDict['specfile'] = jsonobject[1]
attribDict['attributes'] = jsonobject[2]
attribDict['params'] = [tuple(param) for param in jsonobject[3]]
attribDict['scanListParams'] = [tuple(param) for param in jsonobject[4]]
attribDict['scanList'] = jsonobject[5]
attribDict['precursorList'] = jsonobject[6]
attribDict['productList'] = jsonobject[7]
for key, value in viewitems(attribDict):
setattr(newInstance, key, value)
return newInstance | def function[_fromJSON, parameter[cls, jsonobject]]:
constant[Generates a new instance of :class:`maspy.core.Smi` from a decoded
JSON object (as generated by :func:`maspy.core.Smi._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`Smi`
]
variable[newInstance] assign[=] call[name[cls], parameter[constant[None], constant[None]]]
variable[attribDict] assign[=] dictionary[[], []]
call[name[attribDict]][constant[id]] assign[=] call[name[jsonobject]][constant[0]]
call[name[attribDict]][constant[specfile]] assign[=] call[name[jsonobject]][constant[1]]
call[name[attribDict]][constant[attributes]] assign[=] call[name[jsonobject]][constant[2]]
call[name[attribDict]][constant[params]] assign[=] <ast.ListComp object at 0x7da207f9a920>
call[name[attribDict]][constant[scanListParams]] assign[=] <ast.ListComp object at 0x7da20e9b2950>
call[name[attribDict]][constant[scanList]] assign[=] call[name[jsonobject]][constant[5]]
call[name[attribDict]][constant[precursorList]] assign[=] call[name[jsonobject]][constant[6]]
call[name[attribDict]][constant[productList]] assign[=] call[name[jsonobject]][constant[7]]
for taget[tuple[[<ast.Name object at 0x7da20e9b3cd0>, <ast.Name object at 0x7da20e9b2770>]]] in starred[call[name[viewitems], parameter[name[attribDict]]]] begin[:]
call[name[setattr], parameter[name[newInstance], name[key], name[value]]]
return[name[newInstance]] | keyword[def] identifier[_fromJSON] ( identifier[cls] , identifier[jsonobject] ):
literal[string]
identifier[newInstance] = identifier[cls] ( keyword[None] , keyword[None] )
identifier[attribDict] ={}
identifier[attribDict] [ literal[string] ]= identifier[jsonobject] [ literal[int] ]
identifier[attribDict] [ literal[string] ]= identifier[jsonobject] [ literal[int] ]
identifier[attribDict] [ literal[string] ]= identifier[jsonobject] [ literal[int] ]
identifier[attribDict] [ literal[string] ]=[ identifier[tuple] ( identifier[param] ) keyword[for] identifier[param] keyword[in] identifier[jsonobject] [ literal[int] ]]
identifier[attribDict] [ literal[string] ]=[ identifier[tuple] ( identifier[param] ) keyword[for] identifier[param] keyword[in] identifier[jsonobject] [ literal[int] ]]
identifier[attribDict] [ literal[string] ]= identifier[jsonobject] [ literal[int] ]
identifier[attribDict] [ literal[string] ]= identifier[jsonobject] [ literal[int] ]
identifier[attribDict] [ literal[string] ]= identifier[jsonobject] [ literal[int] ]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[viewitems] ( identifier[attribDict] ):
identifier[setattr] ( identifier[newInstance] , identifier[key] , identifier[value] )
keyword[return] identifier[newInstance] | def _fromJSON(cls, jsonobject):
"""Generates a new instance of :class:`maspy.core.Smi` from a decoded
JSON object (as generated by :func:`maspy.core.Smi._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`Smi`
"""
newInstance = cls(None, None)
attribDict = {}
attribDict['id'] = jsonobject[0]
attribDict['specfile'] = jsonobject[1]
attribDict['attributes'] = jsonobject[2]
attribDict['params'] = [tuple(param) for param in jsonobject[3]]
attribDict['scanListParams'] = [tuple(param) for param in jsonobject[4]]
attribDict['scanList'] = jsonobject[5]
attribDict['precursorList'] = jsonobject[6]
attribDict['productList'] = jsonobject[7]
for (key, value) in viewitems(attribDict):
setattr(newInstance, key, value) # depends on [control=['for'], data=[]]
return newInstance |
def create_callback(self):
"""
create a callback, suitable to be passed to SenateCounter
"""
def __callback(question_posed):
logger.debug("%s: asked to choose between: %s" % (self._name, self._qstr(question_posed)))
if self._upto == len(self._data):
logger.error("%s: out of automation data, requested to pick between %s" % (self._name, self._qstr(question_posed)))
raise AutomationException("out of automation data")
question_archived, answer = self._data[self._upto]
if question_archived != question_posed:
logger.error("%s: automation data mismatch, expected question `%s', got question `%s'" % (self._name, self._qstr(question_archived), self._qstr(question_posed)))
resp = question_posed.index(answer)
self._upto += 1
return resp
return __callback | def function[create_callback, parameter[self]]:
constant[
create a callback, suitable to be passed to SenateCounter
]
def function[__callback, parameter[question_posed]]:
call[name[logger].debug, parameter[binary_operation[constant[%s: asked to choose between: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f00d030>, <ast.Call object at 0x7da18f00c880>]]]]]
if compare[name[self]._upto equal[==] call[name[len], parameter[name[self]._data]]] begin[:]
call[name[logger].error, parameter[binary_operation[constant[%s: out of automation data, requested to pick between %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f00c640>, <ast.Call object at 0x7da18f00e3e0>]]]]]
<ast.Raise object at 0x7da18f00de40>
<ast.Tuple object at 0x7da18f00e440> assign[=] call[name[self]._data][name[self]._upto]
if compare[name[question_archived] not_equal[!=] name[question_posed]] begin[:]
call[name[logger].error, parameter[binary_operation[constant[%s: automation data mismatch, expected question `%s', got question `%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f00cc10>, <ast.Call object at 0x7da18f00d3f0>, <ast.Call object at 0x7da18f00cf40>]]]]]
variable[resp] assign[=] call[name[question_posed].index, parameter[name[answer]]]
<ast.AugAssign object at 0x7da1b1d673a0>
return[name[resp]]
return[name[__callback]] | keyword[def] identifier[create_callback] ( identifier[self] ):
literal[string]
keyword[def] identifier[__callback] ( identifier[question_posed] ):
identifier[logger] . identifier[debug] ( literal[string] %( identifier[self] . identifier[_name] , identifier[self] . identifier[_qstr] ( identifier[question_posed] )))
keyword[if] identifier[self] . identifier[_upto] == identifier[len] ( identifier[self] . identifier[_data] ):
identifier[logger] . identifier[error] ( literal[string] %( identifier[self] . identifier[_name] , identifier[self] . identifier[_qstr] ( identifier[question_posed] )))
keyword[raise] identifier[AutomationException] ( literal[string] )
identifier[question_archived] , identifier[answer] = identifier[self] . identifier[_data] [ identifier[self] . identifier[_upto] ]
keyword[if] identifier[question_archived] != identifier[question_posed] :
identifier[logger] . identifier[error] ( literal[string] %( identifier[self] . identifier[_name] , identifier[self] . identifier[_qstr] ( identifier[question_archived] ), identifier[self] . identifier[_qstr] ( identifier[question_posed] )))
identifier[resp] = identifier[question_posed] . identifier[index] ( identifier[answer] )
identifier[self] . identifier[_upto] += literal[int]
keyword[return] identifier[resp]
keyword[return] identifier[__callback] | def create_callback(self):
"""
create a callback, suitable to be passed to SenateCounter
"""
def __callback(question_posed):
logger.debug('%s: asked to choose between: %s' % (self._name, self._qstr(question_posed)))
if self._upto == len(self._data):
logger.error('%s: out of automation data, requested to pick between %s' % (self._name, self._qstr(question_posed)))
raise AutomationException('out of automation data') # depends on [control=['if'], data=[]]
(question_archived, answer) = self._data[self._upto]
if question_archived != question_posed:
logger.error("%s: automation data mismatch, expected question `%s', got question `%s'" % (self._name, self._qstr(question_archived), self._qstr(question_posed))) # depends on [control=['if'], data=['question_archived', 'question_posed']]
resp = question_posed.index(answer)
self._upto += 1
return resp
return __callback |
def read_array(fo, writer_schema, reader_schema=None):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
"""
if reader_schema:
def item_reader(fo, w_schema, r_schema):
return read_data(fo, w_schema['items'], r_schema['items'])
else:
def item_reader(fo, w_schema, _):
return read_data(fo, w_schema['items'])
read_items = []
block_count = read_long(fo)
while block_count != 0:
if block_count < 0:
block_count = -block_count
# Read block size, unused
read_long(fo)
for i in xrange(block_count):
read_items.append(item_reader(fo, writer_schema, reader_schema))
block_count = read_long(fo)
return read_items | def function[read_array, parameter[fo, writer_schema, reader_schema]]:
constant[Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
]
if name[reader_schema] begin[:]
def function[item_reader, parameter[fo, w_schema, r_schema]]:
return[call[name[read_data], parameter[name[fo], call[name[w_schema]][constant[items]], call[name[r_schema]][constant[items]]]]]
variable[read_items] assign[=] list[[]]
variable[block_count] assign[=] call[name[read_long], parameter[name[fo]]]
while compare[name[block_count] not_equal[!=] constant[0]] begin[:]
if compare[name[block_count] less[<] constant[0]] begin[:]
variable[block_count] assign[=] <ast.UnaryOp object at 0x7da1b17cc850>
call[name[read_long], parameter[name[fo]]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[block_count]]]] begin[:]
call[name[read_items].append, parameter[call[name[item_reader], parameter[name[fo], name[writer_schema], name[reader_schema]]]]]
variable[block_count] assign[=] call[name[read_long], parameter[name[fo]]]
return[name[read_items]] | keyword[def] identifier[read_array] ( identifier[fo] , identifier[writer_schema] , identifier[reader_schema] = keyword[None] ):
literal[string]
keyword[if] identifier[reader_schema] :
keyword[def] identifier[item_reader] ( identifier[fo] , identifier[w_schema] , identifier[r_schema] ):
keyword[return] identifier[read_data] ( identifier[fo] , identifier[w_schema] [ literal[string] ], identifier[r_schema] [ literal[string] ])
keyword[else] :
keyword[def] identifier[item_reader] ( identifier[fo] , identifier[w_schema] , identifier[_] ):
keyword[return] identifier[read_data] ( identifier[fo] , identifier[w_schema] [ literal[string] ])
identifier[read_items] =[]
identifier[block_count] = identifier[read_long] ( identifier[fo] )
keyword[while] identifier[block_count] != literal[int] :
keyword[if] identifier[block_count] < literal[int] :
identifier[block_count] =- identifier[block_count]
identifier[read_long] ( identifier[fo] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[block_count] ):
identifier[read_items] . identifier[append] ( identifier[item_reader] ( identifier[fo] , identifier[writer_schema] , identifier[reader_schema] ))
identifier[block_count] = identifier[read_long] ( identifier[fo] )
keyword[return] identifier[read_items] | def read_array(fo, writer_schema, reader_schema=None):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
"""
if reader_schema:
def item_reader(fo, w_schema, r_schema):
return read_data(fo, w_schema['items'], r_schema['items']) # depends on [control=['if'], data=[]]
else:
def item_reader(fo, w_schema, _):
return read_data(fo, w_schema['items'])
read_items = []
block_count = read_long(fo)
while block_count != 0:
if block_count < 0:
block_count = -block_count
# Read block size, unused
read_long(fo) # depends on [control=['if'], data=['block_count']]
for i in xrange(block_count):
read_items.append(item_reader(fo, writer_schema, reader_schema)) # depends on [control=['for'], data=[]]
block_count = read_long(fo) # depends on [control=['while'], data=['block_count']]
return read_items |
def generateNamespaceNodeDocuments(self):
'''
Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace.
'''
# go through all of the top level namespaces
for n in self.namespaces:
# find any nested namespaces
nested_namespaces = []
for child in n.children:
child.findNestedNamespaces(nested_namespaces)
# generate the children first
for nested in reversed(sorted(nested_namespaces)):
self.generateSingleNamespace(nested)
# generate this top level namespace
self.generateSingleNamespace(n) | def function[generateNamespaceNodeDocuments, parameter[self]]:
constant[
Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace.
]
for taget[name[n]] in starred[name[self].namespaces] begin[:]
variable[nested_namespaces] assign[=] list[[]]
for taget[name[child]] in starred[name[n].children] begin[:]
call[name[child].findNestedNamespaces, parameter[name[nested_namespaces]]]
for taget[name[nested]] in starred[call[name[reversed], parameter[call[name[sorted], parameter[name[nested_namespaces]]]]]] begin[:]
call[name[self].generateSingleNamespace, parameter[name[nested]]]
call[name[self].generateSingleNamespace, parameter[name[n]]] | keyword[def] identifier[generateNamespaceNodeDocuments] ( identifier[self] ):
literal[string]
keyword[for] identifier[n] keyword[in] identifier[self] . identifier[namespaces] :
identifier[nested_namespaces] =[]
keyword[for] identifier[child] keyword[in] identifier[n] . identifier[children] :
identifier[child] . identifier[findNestedNamespaces] ( identifier[nested_namespaces] )
keyword[for] identifier[nested] keyword[in] identifier[reversed] ( identifier[sorted] ( identifier[nested_namespaces] )):
identifier[self] . identifier[generateSingleNamespace] ( identifier[nested] )
identifier[self] . identifier[generateSingleNamespace] ( identifier[n] ) | def generateNamespaceNodeDocuments(self):
"""
Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace.
"""
# go through all of the top level namespaces
for n in self.namespaces:
# find any nested namespaces
nested_namespaces = []
for child in n.children:
child.findNestedNamespaces(nested_namespaces) # depends on [control=['for'], data=['child']]
# generate the children first
for nested in reversed(sorted(nested_namespaces)):
self.generateSingleNamespace(nested) # depends on [control=['for'], data=['nested']]
# generate this top level namespace
self.generateSingleNamespace(n) # depends on [control=['for'], data=['n']] |
def assert_regex(text, regex, msg_fmt="{msg}"):
"""Fail if text does not match the regular expression.
regex can be either a regular expression string or a compiled regular
expression object.
>>> assert_regex("Hello World!", r"llo.*rld!$")
>>> assert_regex("Hello World!", r"\\d")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' does not match '\\\\d'
The following msg_fmt arguments are supported:
* msg - the default error message
* text - text that is matched
* pattern - regular expression pattern as string
"""
compiled = re.compile(regex)
if not compiled.search(text):
msg = "{!r} does not match {!r}".format(text, compiled.pattern)
fail(msg_fmt.format(msg=msg, text=text, pattern=compiled.pattern)) | def function[assert_regex, parameter[text, regex, msg_fmt]]:
constant[Fail if text does not match the regular expression.
regex can be either a regular expression string or a compiled regular
expression object.
>>> assert_regex("Hello World!", r"llo.*rld!$")
>>> assert_regex("Hello World!", r"\d")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' does not match '\\d'
The following msg_fmt arguments are supported:
* msg - the default error message
* text - text that is matched
* pattern - regular expression pattern as string
]
variable[compiled] assign[=] call[name[re].compile, parameter[name[regex]]]
if <ast.UnaryOp object at 0x7da204345b10> begin[:]
variable[msg] assign[=] call[constant[{!r} does not match {!r}].format, parameter[name[text], name[compiled].pattern]]
call[name[fail], parameter[call[name[msg_fmt].format, parameter[]]]] | keyword[def] identifier[assert_regex] ( identifier[text] , identifier[regex] , identifier[msg_fmt] = literal[string] ):
literal[string]
identifier[compiled] = identifier[re] . identifier[compile] ( identifier[regex] )
keyword[if] keyword[not] identifier[compiled] . identifier[search] ( identifier[text] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[text] , identifier[compiled] . identifier[pattern] )
identifier[fail] ( identifier[msg_fmt] . identifier[format] ( identifier[msg] = identifier[msg] , identifier[text] = identifier[text] , identifier[pattern] = identifier[compiled] . identifier[pattern] )) | def assert_regex(text, regex, msg_fmt='{msg}'):
"""Fail if text does not match the regular expression.
regex can be either a regular expression string or a compiled regular
expression object.
>>> assert_regex("Hello World!", r"llo.*rld!$")
>>> assert_regex("Hello World!", r"\\d")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' does not match '\\\\d'
The following msg_fmt arguments are supported:
* msg - the default error message
* text - text that is matched
* pattern - regular expression pattern as string
"""
compiled = re.compile(regex)
if not compiled.search(text):
msg = '{!r} does not match {!r}'.format(text, compiled.pattern)
fail(msg_fmt.format(msg=msg, text=text, pattern=compiled.pattern)) # depends on [control=['if'], data=[]] |
def dbgr(self, string):
'''Invoke a debugger command from inside a python shell called inside
the debugger.
'''
print('')
self.proc.cmd_queue.append(string)
self.proc.process_command()
return | def function[dbgr, parameter[self, string]]:
constant[Invoke a debugger command from inside a python shell called inside
the debugger.
]
call[name[print], parameter[constant[]]]
call[name[self].proc.cmd_queue.append, parameter[name[string]]]
call[name[self].proc.process_command, parameter[]]
return[None] | keyword[def] identifier[dbgr] ( identifier[self] , identifier[string] ):
literal[string]
identifier[print] ( literal[string] )
identifier[self] . identifier[proc] . identifier[cmd_queue] . identifier[append] ( identifier[string] )
identifier[self] . identifier[proc] . identifier[process_command] ()
keyword[return] | def dbgr(self, string):
"""Invoke a debugger command from inside a python shell called inside
the debugger.
"""
print('')
self.proc.cmd_queue.append(string)
self.proc.process_command()
return |
def redirect_to_handler(error, location):
"""
Cause a requset with an error to internally redirect to a URI path.
This is generally for internal use, but can be called from within a Pecan
controller to trigger a validation failure from *within* the controller
itself, e.g.::
@expose()
@validate(some_schema, '/some/handler')
def some_controller(self, **kw):
if some_bad_condition():
error_exception = ...
redirect_to_handler(error_exception, '/some/handler')
"""
if callable(location):
location = location()
request.environ['REQUEST_METHOD'] = 'GET'
redirect(location, internal=True) | def function[redirect_to_handler, parameter[error, location]]:
constant[
Cause a requset with an error to internally redirect to a URI path.
This is generally for internal use, but can be called from within a Pecan
controller to trigger a validation failure from *within* the controller
itself, e.g.::
@expose()
@validate(some_schema, '/some/handler')
def some_controller(self, **kw):
if some_bad_condition():
error_exception = ...
redirect_to_handler(error_exception, '/some/handler')
]
if call[name[callable], parameter[name[location]]] begin[:]
variable[location] assign[=] call[name[location], parameter[]]
call[name[request].environ][constant[REQUEST_METHOD]] assign[=] constant[GET]
call[name[redirect], parameter[name[location]]] | keyword[def] identifier[redirect_to_handler] ( identifier[error] , identifier[location] ):
literal[string]
keyword[if] identifier[callable] ( identifier[location] ):
identifier[location] = identifier[location] ()
identifier[request] . identifier[environ] [ literal[string] ]= literal[string]
identifier[redirect] ( identifier[location] , identifier[internal] = keyword[True] ) | def redirect_to_handler(error, location):
"""
Cause a requset with an error to internally redirect to a URI path.
This is generally for internal use, but can be called from within a Pecan
controller to trigger a validation failure from *within* the controller
itself, e.g.::
@expose()
@validate(some_schema, '/some/handler')
def some_controller(self, **kw):
if some_bad_condition():
error_exception = ...
redirect_to_handler(error_exception, '/some/handler')
"""
if callable(location):
location = location() # depends on [control=['if'], data=[]]
request.environ['REQUEST_METHOD'] = 'GET'
redirect(location, internal=True) |
def spop(self, name):
"""Emulate spop."""
redis_set = self._get_set(name, 'SPOP')
if not redis_set:
return None
member = choice(list(redis_set))
redis_set.remove(member)
if len(redis_set) == 0:
self.delete(name)
return member | def function[spop, parameter[self, name]]:
constant[Emulate spop.]
variable[redis_set] assign[=] call[name[self]._get_set, parameter[name[name], constant[SPOP]]]
if <ast.UnaryOp object at 0x7da18ede7700> begin[:]
return[constant[None]]
variable[member] assign[=] call[name[choice], parameter[call[name[list], parameter[name[redis_set]]]]]
call[name[redis_set].remove, parameter[name[member]]]
if compare[call[name[len], parameter[name[redis_set]]] equal[==] constant[0]] begin[:]
call[name[self].delete, parameter[name[name]]]
return[name[member]] | keyword[def] identifier[spop] ( identifier[self] , identifier[name] ):
literal[string]
identifier[redis_set] = identifier[self] . identifier[_get_set] ( identifier[name] , literal[string] )
keyword[if] keyword[not] identifier[redis_set] :
keyword[return] keyword[None]
identifier[member] = identifier[choice] ( identifier[list] ( identifier[redis_set] ))
identifier[redis_set] . identifier[remove] ( identifier[member] )
keyword[if] identifier[len] ( identifier[redis_set] )== literal[int] :
identifier[self] . identifier[delete] ( identifier[name] )
keyword[return] identifier[member] | def spop(self, name):
"""Emulate spop."""
redis_set = self._get_set(name, 'SPOP')
if not redis_set:
return None # depends on [control=['if'], data=[]]
member = choice(list(redis_set))
redis_set.remove(member)
if len(redis_set) == 0:
self.delete(name) # depends on [control=['if'], data=[]]
return member |
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls) | def function[_AddPropertiesForField, parameter[field, cls]]:
constant[Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
]
assert[compare[name[_FieldDescriptor].MAX_CPPTYPE equal[==] constant[10]]]
variable[constant_name] assign[=] binary_operation[call[name[field].name.upper, parameter[]] + constant[_FIELD_NUMBER]]
call[name[setattr], parameter[name[cls], name[constant_name], name[field].number]]
if compare[name[field].label equal[==] name[_FieldDescriptor].LABEL_REPEATED] begin[:]
call[name[_AddPropertiesForRepeatedField], parameter[name[field], name[cls]]] | keyword[def] identifier[_AddPropertiesForField] ( identifier[field] , identifier[cls] ):
literal[string]
keyword[assert] identifier[_FieldDescriptor] . identifier[MAX_CPPTYPE] == literal[int]
identifier[constant_name] = identifier[field] . identifier[name] . identifier[upper] ()+ literal[string]
identifier[setattr] ( identifier[cls] , identifier[constant_name] , identifier[field] . identifier[number] )
keyword[if] identifier[field] . identifier[label] == identifier[_FieldDescriptor] . identifier[LABEL_REPEATED] :
identifier[_AddPropertiesForRepeatedField] ( identifier[field] , identifier[cls] )
keyword[elif] identifier[field] . identifier[cpp_type] == identifier[_FieldDescriptor] . identifier[CPPTYPE_MESSAGE] :
identifier[_AddPropertiesForNonRepeatedCompositeField] ( identifier[field] , identifier[cls] )
keyword[else] :
identifier[_AddPropertiesForNonRepeatedScalarField] ( identifier[field] , identifier[cls] ) | def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls) # depends on [control=['if'], data=[]]
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls) # depends on [control=['if'], data=[]]
else:
_AddPropertiesForNonRepeatedScalarField(field, cls) |
def quadKeyToTileXYZ(self, quadKey):
'''
Computes tile x, y and z values based on quadKey.
'''
tileX = 0
tileY = 0
tileZ = len(quadKey)
for i in range(tileZ, 0, -1):
mask = 1 << (i - 1)
value = quadKey[tileZ - i]
if value == '0':
continue
elif value == '1':
tileX |= mask
elif value == '2':
tileY |= mask
elif value == '3':
tileX |= mask
tileY |= mask
else:
raise Exception('Invalid QuadKey')
return (tileX, tileY, tileZ) | def function[quadKeyToTileXYZ, parameter[self, quadKey]]:
constant[
Computes tile x, y and z values based on quadKey.
]
variable[tileX] assign[=] constant[0]
variable[tileY] assign[=] constant[0]
variable[tileZ] assign[=] call[name[len], parameter[name[quadKey]]]
for taget[name[i]] in starred[call[name[range], parameter[name[tileZ], constant[0], <ast.UnaryOp object at 0x7da1b0f3b280>]]] begin[:]
variable[mask] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> binary_operation[name[i] - constant[1]]]
variable[value] assign[=] call[name[quadKey]][binary_operation[name[tileZ] - name[i]]]
if compare[name[value] equal[==] constant[0]] begin[:]
continue
return[tuple[[<ast.Name object at 0x7da1b0f39360>, <ast.Name object at 0x7da1b0f3a830>, <ast.Name object at 0x7da1b0f3b910>]]] | keyword[def] identifier[quadKeyToTileXYZ] ( identifier[self] , identifier[quadKey] ):
literal[string]
identifier[tileX] = literal[int]
identifier[tileY] = literal[int]
identifier[tileZ] = identifier[len] ( identifier[quadKey] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[tileZ] , literal[int] ,- literal[int] ):
identifier[mask] = literal[int] <<( identifier[i] - literal[int] )
identifier[value] = identifier[quadKey] [ identifier[tileZ] - identifier[i] ]
keyword[if] identifier[value] == literal[string] :
keyword[continue]
keyword[elif] identifier[value] == literal[string] :
identifier[tileX] |= identifier[mask]
keyword[elif] identifier[value] == literal[string] :
identifier[tileY] |= identifier[mask]
keyword[elif] identifier[value] == literal[string] :
identifier[tileX] |= identifier[mask]
identifier[tileY] |= identifier[mask]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] ( identifier[tileX] , identifier[tileY] , identifier[tileZ] ) | def quadKeyToTileXYZ(self, quadKey):
"""
Computes tile x, y and z values based on quadKey.
"""
tileX = 0
tileY = 0
tileZ = len(quadKey)
for i in range(tileZ, 0, -1):
mask = 1 << i - 1
value = quadKey[tileZ - i]
if value == '0':
continue # depends on [control=['if'], data=[]]
elif value == '1':
tileX |= mask # depends on [control=['if'], data=[]]
elif value == '2':
tileY |= mask # depends on [control=['if'], data=[]]
elif value == '3':
tileX |= mask
tileY |= mask # depends on [control=['if'], data=[]]
else:
raise Exception('Invalid QuadKey') # depends on [control=['for'], data=['i']]
return (tileX, tileY, tileZ) |
def similarity_graph(self, k=5, radius=None, include_self_edges=False,
output_type='SGraph', verbose=True):
"""
Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+
"""
## Validate inputs.
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'k': k,
'radius': radius,
'include_self_edges': include_self_edges}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts)
knn = result['neighbors']
if output_type == "SFrame":
return knn
else:
sg = _SGraph(edges=knn, src_field='query_label',
dst_field='reference_label')
return sg | def function[similarity_graph, parameter[self, k, radius, include_self_edges, output_type, verbose]]:
constant[
Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+
]
if compare[name[k] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b1ef2230> begin[:]
<ast.Raise object at 0x7da1b1ef3220>
if compare[name[k] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1ef0e20>
if compare[name[radius] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b1ef2ce0> begin[:]
<ast.Raise object at 0x7da1b1f740d0>
if compare[name[radius] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1f76aa0>
if compare[name[k] is constant[None]] begin[:]
variable[k] assign[=] <ast.UnaryOp object at 0x7da1b1f77730>
if compare[name[radius] is constant[None]] begin[:]
variable[radius] assign[=] <ast.UnaryOp object at 0x7da1b1f74f40>
variable[opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f779a0>, <ast.Constant object at 0x7da1b1f76ad0>, <ast.Constant object at 0x7da1b1f77280>, <ast.Constant object at 0x7da1b1f77fa0>, <ast.Constant object at 0x7da1b1f775e0>], [<ast.Attribute object at 0x7da1b1f77ca0>, <ast.Attribute object at 0x7da1b1f75900>, <ast.Name object at 0x7da1b1f77760>, <ast.Name object at 0x7da1b1f774c0>, <ast.Name object at 0x7da1b1f764a0>]]
with call[name[QuietProgress], parameter[name[verbose]]] begin[:]
variable[result] assign[=] call[name[_turicreate].extensions._nearest_neighbors.similarity_graph, parameter[name[opts]]]
variable[knn] assign[=] call[name[result]][constant[neighbors]]
if compare[name[output_type] equal[==] constant[SFrame]] begin[:]
return[name[knn]] | keyword[def] identifier[similarity_graph] ( identifier[self] , identifier[k] = literal[int] , identifier[radius] = keyword[None] , identifier[include_self_edges] = keyword[False] ,
identifier[output_type] = literal[string] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[if] identifier[k] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[k] , identifier[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[k] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[radius] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[radius] ,( identifier[int] , identifier[float] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[radius] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[k] keyword[is] keyword[None] :
identifier[k] =- literal[int]
keyword[if] identifier[radius] keyword[is] keyword[None] :
identifier[radius] =- literal[int]
identifier[opts] ={ literal[string] : identifier[self] . identifier[__proxy__] ,
literal[string] : identifier[self] . identifier[__name__] ,
literal[string] : identifier[k] ,
literal[string] : identifier[radius] ,
literal[string] : identifier[include_self_edges] }
keyword[with] identifier[QuietProgress] ( identifier[verbose] ):
identifier[result] = identifier[_turicreate] . identifier[extensions] . identifier[_nearest_neighbors] . identifier[similarity_graph] ( identifier[opts] )
identifier[knn] = identifier[result] [ literal[string] ]
keyword[if] identifier[output_type] == literal[string] :
keyword[return] identifier[knn]
keyword[else] :
identifier[sg] = identifier[_SGraph] ( identifier[edges] = identifier[knn] , identifier[src_field] = literal[string] ,
identifier[dst_field] = literal[string] )
keyword[return] identifier[sg] | def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True):
"""
Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+
"""
## Validate inputs.
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.") # depends on [control=['if'], data=[]]
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['k']]
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.") # depends on [control=['if'], data=[]]
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['radius']]
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1 # depends on [control=['if'], data=['k']]
if radius is None:
radius = -1.0 # depends on [control=['if'], data=['radius']]
opts = {'model': self.__proxy__, 'model_name': self.__name__, 'k': k, 'radius': radius, 'include_self_edges': include_self_edges}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts) # depends on [control=['with'], data=[]]
knn = result['neighbors']
if output_type == 'SFrame':
return knn # depends on [control=['if'], data=[]]
else:
sg = _SGraph(edges=knn, src_field='query_label', dst_field='reference_label')
return sg |
def deploy_knowledge(filename, module_name, auth=None):
"""Deploy a file to the Artifactory BEL knowledge cache.
:param str filename: The physical file path
:param str module_name: The name of the module to deploy to
:param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of
:class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`.
:return: The resource path, if it was deployed successfully, else none.
:rtype: Optional[str]
"""
return _deploy_helper(
filename,
module_name,
get_knowledge_module_url,
get_knowledge_today,
hash_check=False,
auth=auth
) | def function[deploy_knowledge, parameter[filename, module_name, auth]]:
constant[Deploy a file to the Artifactory BEL knowledge cache.
:param str filename: The physical file path
:param str module_name: The name of the module to deploy to
:param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of
:class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`.
:return: The resource path, if it was deployed successfully, else none.
:rtype: Optional[str]
]
return[call[name[_deploy_helper], parameter[name[filename], name[module_name], name[get_knowledge_module_url], name[get_knowledge_today]]]] | keyword[def] identifier[deploy_knowledge] ( identifier[filename] , identifier[module_name] , identifier[auth] = keyword[None] ):
literal[string]
keyword[return] identifier[_deploy_helper] (
identifier[filename] ,
identifier[module_name] ,
identifier[get_knowledge_module_url] ,
identifier[get_knowledge_today] ,
identifier[hash_check] = keyword[False] ,
identifier[auth] = identifier[auth]
) | def deploy_knowledge(filename, module_name, auth=None):
"""Deploy a file to the Artifactory BEL knowledge cache.
:param str filename: The physical file path
:param str module_name: The name of the module to deploy to
:param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of
:class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`.
:return: The resource path, if it was deployed successfully, else none.
:rtype: Optional[str]
"""
return _deploy_helper(filename, module_name, get_knowledge_module_url, get_knowledge_today, hash_check=False, auth=auth) |
def loadd(self, ava, base64encode=False):
"""
Sets attributes, children, extension elements and extension
attributes of this element instance depending on what is in
the given dictionary. If there are already values on properties
those will be overwritten. If the keys in the dictionary does
not correspond to known attributes/children/.. they are ignored.
:param ava: The dictionary
:param base64encode: Whether the values on attributes or texts on
children shoule be base64encoded.
:return: The instance
"""
for prop, _typ, _req in self.c_attributes.values():
if prop in ava:
value = ava[prop]
if isinstance(value, (bool, int)):
setattr(self, prop, str(value))
else:
setattr(self, prop, value)
if "text" in ava:
self.set_text(ava["text"], base64encode)
for prop, klassdef in self.c_children.values():
# print("## %s, %s" % (prop, klassdef))
if prop in ava:
# print("### %s" % ava[prop])
# means there can be a list of values
if isinstance(klassdef, list):
make_vals(ava[prop], klassdef[0], self, prop,
base64encode=base64encode)
else:
cis = make_vals(ava[prop], klassdef, self, prop, True,
base64encode)
setattr(self, prop, cis)
if "extension_elements" in ava:
for item in ava["extension_elements"]:
self.extension_elements.append(ExtensionElement(
item["tag"]).loadd(item))
if "extension_attributes" in ava:
for key, val in ava["extension_attributes"].items():
self.extension_attributes[key] = val
return self | def function[loadd, parameter[self, ava, base64encode]]:
constant[
Sets attributes, children, extension elements and extension
attributes of this element instance depending on what is in
the given dictionary. If there are already values on properties
those will be overwritten. If the keys in the dictionary does
not correspond to known attributes/children/.. they are ignored.
:param ava: The dictionary
:param base64encode: Whether the values on attributes or texts on
children shoule be base64encoded.
:return: The instance
]
for taget[tuple[[<ast.Name object at 0x7da1b2068640>, <ast.Name object at 0x7da1b2069a80>, <ast.Name object at 0x7da1b206b550>]]] in starred[call[name[self].c_attributes.values, parameter[]]] begin[:]
if compare[name[prop] in name[ava]] begin[:]
variable[value] assign[=] call[name[ava]][name[prop]]
if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da1b2068340>, <ast.Name object at 0x7da1b2069e40>]]]] begin[:]
call[name[setattr], parameter[name[self], name[prop], call[name[str], parameter[name[value]]]]]
if compare[constant[text] in name[ava]] begin[:]
call[name[self].set_text, parameter[call[name[ava]][constant[text]], name[base64encode]]]
for taget[tuple[[<ast.Name object at 0x7da1b206bc10>, <ast.Name object at 0x7da1b206a0e0>]]] in starred[call[name[self].c_children.values, parameter[]]] begin[:]
if compare[name[prop] in name[ava]] begin[:]
if call[name[isinstance], parameter[name[klassdef], name[list]]] begin[:]
call[name[make_vals], parameter[call[name[ava]][name[prop]], call[name[klassdef]][constant[0]], name[self], name[prop]]]
if compare[constant[extension_elements] in name[ava]] begin[:]
for taget[name[item]] in starred[call[name[ava]][constant[extension_elements]]] begin[:]
call[name[self].extension_elements.append, parameter[call[call[name[ExtensionElement], parameter[call[name[item]][constant[tag]]]].loadd, parameter[name[item]]]]]
if compare[constant[extension_attributes] in name[ava]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2068520>, <ast.Name object at 0x7da1b206bc40>]]] in starred[call[call[name[ava]][constant[extension_attributes]].items, parameter[]]] begin[:]
call[name[self].extension_attributes][name[key]] assign[=] name[val]
return[name[self]] | keyword[def] identifier[loadd] ( identifier[self] , identifier[ava] , identifier[base64encode] = keyword[False] ):
literal[string]
keyword[for] identifier[prop] , identifier[_typ] , identifier[_req] keyword[in] identifier[self] . identifier[c_attributes] . identifier[values] ():
keyword[if] identifier[prop] keyword[in] identifier[ava] :
identifier[value] = identifier[ava] [ identifier[prop] ]
keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[bool] , identifier[int] )):
identifier[setattr] ( identifier[self] , identifier[prop] , identifier[str] ( identifier[value] ))
keyword[else] :
identifier[setattr] ( identifier[self] , identifier[prop] , identifier[value] )
keyword[if] literal[string] keyword[in] identifier[ava] :
identifier[self] . identifier[set_text] ( identifier[ava] [ literal[string] ], identifier[base64encode] )
keyword[for] identifier[prop] , identifier[klassdef] keyword[in] identifier[self] . identifier[c_children] . identifier[values] ():
keyword[if] identifier[prop] keyword[in] identifier[ava] :
keyword[if] identifier[isinstance] ( identifier[klassdef] , identifier[list] ):
identifier[make_vals] ( identifier[ava] [ identifier[prop] ], identifier[klassdef] [ literal[int] ], identifier[self] , identifier[prop] ,
identifier[base64encode] = identifier[base64encode] )
keyword[else] :
identifier[cis] = identifier[make_vals] ( identifier[ava] [ identifier[prop] ], identifier[klassdef] , identifier[self] , identifier[prop] , keyword[True] ,
identifier[base64encode] )
identifier[setattr] ( identifier[self] , identifier[prop] , identifier[cis] )
keyword[if] literal[string] keyword[in] identifier[ava] :
keyword[for] identifier[item] keyword[in] identifier[ava] [ literal[string] ]:
identifier[self] . identifier[extension_elements] . identifier[append] ( identifier[ExtensionElement] (
identifier[item] [ literal[string] ]). identifier[loadd] ( identifier[item] ))
keyword[if] literal[string] keyword[in] identifier[ava] :
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[ava] [ literal[string] ]. identifier[items] ():
identifier[self] . identifier[extension_attributes] [ identifier[key] ]= identifier[val]
keyword[return] identifier[self] | def loadd(self, ava, base64encode=False):
"""
Sets attributes, children, extension elements and extension
attributes of this element instance depending on what is in
the given dictionary. If there are already values on properties
those will be overwritten. If the keys in the dictionary does
not correspond to known attributes/children/.. they are ignored.
:param ava: The dictionary
:param base64encode: Whether the values on attributes or texts on
children shoule be base64encoded.
:return: The instance
"""
for (prop, _typ, _req) in self.c_attributes.values():
if prop in ava:
value = ava[prop]
if isinstance(value, (bool, int)):
setattr(self, prop, str(value)) # depends on [control=['if'], data=[]]
else:
setattr(self, prop, value) # depends on [control=['if'], data=['prop', 'ava']] # depends on [control=['for'], data=[]]
if 'text' in ava:
self.set_text(ava['text'], base64encode) # depends on [control=['if'], data=['ava']]
for (prop, klassdef) in self.c_children.values():
# print("## %s, %s" % (prop, klassdef))
if prop in ava:
# print("### %s" % ava[prop])
# means there can be a list of values
if isinstance(klassdef, list):
make_vals(ava[prop], klassdef[0], self, prop, base64encode=base64encode) # depends on [control=['if'], data=[]]
else:
cis = make_vals(ava[prop], klassdef, self, prop, True, base64encode)
setattr(self, prop, cis) # depends on [control=['if'], data=['prop', 'ava']] # depends on [control=['for'], data=[]]
if 'extension_elements' in ava:
for item in ava['extension_elements']:
self.extension_elements.append(ExtensionElement(item['tag']).loadd(item)) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=['ava']]
if 'extension_attributes' in ava:
for (key, val) in ava['extension_attributes'].items():
self.extension_attributes[key] = val # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['ava']]
return self |
def parse_value(self):
"""Parse a single literal value.
Returns:
The parsed value.
"""
parsers = [
self._maybe_parse_container, self._maybe_parse_basic_type,
self._maybe_parse_configurable_reference, self._maybe_parse_macro
]
for parser in parsers:
success, value = parser()
if success:
return value
self._raise_syntax_error('Unable to parse value.') | def function[parse_value, parameter[self]]:
constant[Parse a single literal value.
Returns:
The parsed value.
]
variable[parsers] assign[=] list[[<ast.Attribute object at 0x7da1b0317a60>, <ast.Attribute object at 0x7da1b0314eb0>, <ast.Attribute object at 0x7da1b0316230>, <ast.Attribute object at 0x7da1b0315660>]]
for taget[name[parser]] in starred[name[parsers]] begin[:]
<ast.Tuple object at 0x7da1b03169b0> assign[=] call[name[parser], parameter[]]
if name[success] begin[:]
return[name[value]]
call[name[self]._raise_syntax_error, parameter[constant[Unable to parse value.]]] | keyword[def] identifier[parse_value] ( identifier[self] ):
literal[string]
identifier[parsers] =[
identifier[self] . identifier[_maybe_parse_container] , identifier[self] . identifier[_maybe_parse_basic_type] ,
identifier[self] . identifier[_maybe_parse_configurable_reference] , identifier[self] . identifier[_maybe_parse_macro]
]
keyword[for] identifier[parser] keyword[in] identifier[parsers] :
identifier[success] , identifier[value] = identifier[parser] ()
keyword[if] identifier[success] :
keyword[return] identifier[value]
identifier[self] . identifier[_raise_syntax_error] ( literal[string] ) | def parse_value(self):
"""Parse a single literal value.
Returns:
The parsed value.
"""
parsers = [self._maybe_parse_container, self._maybe_parse_basic_type, self._maybe_parse_configurable_reference, self._maybe_parse_macro]
for parser in parsers:
(success, value) = parser()
if success:
return value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['parser']]
self._raise_syntax_error('Unable to parse value.') |
def authenticate(self, req_data, identifier: Optional[str]=None,
signature: Optional[str]=None, threshold: Optional[int] = None,
verifier: Verifier=DidVerifier):
"""
Prepares the data to be serialised for signing and then verifies the
signature
:param req_data:
:param identifier:
:param signature:
:param verifier:
:return:
"""
to_serialize = {k: v for k, v in req_data.items()
if k not in self.excluded_from_signing}
if req_data.get(f.SIG.nm) is None and \
req_data.get(f.SIGS.nm) is None and \
signature is None:
raise MissingSignature
if req_data.get(f.IDENTIFIER.nm) and (req_data.get(f.SIG.nm) or
signature):
try:
# if not identifier:
identifier = identifier or self._extract_identifier(req_data)
# if not signature:
signature = signature or self._extract_signature(req_data)
signatures = {identifier: signature}
except Exception as ex:
if ex in (MissingSignature, EmptySignature, MissingIdentifier,
EmptyIdentifier):
ex = ex(req_data.get(f.IDENTIFIER.nm), req_data.get(f.SIG.nm))
raise ex
else:
signatures = req_data.get(f.SIGS.nm, None)
return self.authenticate_multi(to_serialize, signatures=signatures,
threshold=threshold, verifier=verifier) | def function[authenticate, parameter[self, req_data, identifier, signature, threshold, verifier]]:
constant[
Prepares the data to be serialised for signing and then verifies the
signature
:param req_data:
:param identifier:
:param signature:
:param verifier:
:return:
]
variable[to_serialize] assign[=] <ast.DictComp object at 0x7da2054a70a0>
if <ast.BoolOp object at 0x7da2054a63b0> begin[:]
<ast.Raise object at 0x7da2054a5930>
if <ast.BoolOp object at 0x7da2054a6ad0> begin[:]
<ast.Try object at 0x7da2054a6a40>
return[call[name[self].authenticate_multi, parameter[name[to_serialize]]]] | keyword[def] identifier[authenticate] ( identifier[self] , identifier[req_data] , identifier[identifier] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[signature] : identifier[Optional] [ identifier[str] ]= keyword[None] , identifier[threshold] : identifier[Optional] [ identifier[int] ]= keyword[None] ,
identifier[verifier] : identifier[Verifier] = identifier[DidVerifier] ):
literal[string]
identifier[to_serialize] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[req_data] . identifier[items] ()
keyword[if] identifier[k] keyword[not] keyword[in] identifier[self] . identifier[excluded_from_signing] }
keyword[if] identifier[req_data] . identifier[get] ( identifier[f] . identifier[SIG] . identifier[nm] ) keyword[is] keyword[None] keyword[and] identifier[req_data] . identifier[get] ( identifier[f] . identifier[SIGS] . identifier[nm] ) keyword[is] keyword[None] keyword[and] identifier[signature] keyword[is] keyword[None] :
keyword[raise] identifier[MissingSignature]
keyword[if] identifier[req_data] . identifier[get] ( identifier[f] . identifier[IDENTIFIER] . identifier[nm] ) keyword[and] ( identifier[req_data] . identifier[get] ( identifier[f] . identifier[SIG] . identifier[nm] ) keyword[or]
identifier[signature] ):
keyword[try] :
identifier[identifier] = identifier[identifier] keyword[or] identifier[self] . identifier[_extract_identifier] ( identifier[req_data] )
identifier[signature] = identifier[signature] keyword[or] identifier[self] . identifier[_extract_signature] ( identifier[req_data] )
identifier[signatures] ={ identifier[identifier] : identifier[signature] }
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] identifier[ex] keyword[in] ( identifier[MissingSignature] , identifier[EmptySignature] , identifier[MissingIdentifier] ,
identifier[EmptyIdentifier] ):
identifier[ex] = identifier[ex] ( identifier[req_data] . identifier[get] ( identifier[f] . identifier[IDENTIFIER] . identifier[nm] ), identifier[req_data] . identifier[get] ( identifier[f] . identifier[SIG] . identifier[nm] ))
keyword[raise] identifier[ex]
keyword[else] :
identifier[signatures] = identifier[req_data] . identifier[get] ( identifier[f] . identifier[SIGS] . identifier[nm] , keyword[None] )
keyword[return] identifier[self] . identifier[authenticate_multi] ( identifier[to_serialize] , identifier[signatures] = identifier[signatures] ,
identifier[threshold] = identifier[threshold] , identifier[verifier] = identifier[verifier] ) | def authenticate(self, req_data, identifier: Optional[str]=None, signature: Optional[str]=None, threshold: Optional[int]=None, verifier: Verifier=DidVerifier):
"""
Prepares the data to be serialised for signing and then verifies the
signature
:param req_data:
:param identifier:
:param signature:
:param verifier:
:return:
"""
to_serialize = {k: v for (k, v) in req_data.items() if k not in self.excluded_from_signing}
if req_data.get(f.SIG.nm) is None and req_data.get(f.SIGS.nm) is None and (signature is None):
raise MissingSignature # depends on [control=['if'], data=[]]
if req_data.get(f.IDENTIFIER.nm) and (req_data.get(f.SIG.nm) or signature):
try:
# if not identifier:
identifier = identifier or self._extract_identifier(req_data)
# if not signature:
signature = signature or self._extract_signature(req_data)
signatures = {identifier: signature} # depends on [control=['try'], data=[]]
except Exception as ex:
if ex in (MissingSignature, EmptySignature, MissingIdentifier, EmptyIdentifier):
ex = ex(req_data.get(f.IDENTIFIER.nm), req_data.get(f.SIG.nm)) # depends on [control=['if'], data=['ex']]
raise ex # depends on [control=['except'], data=['ex']] # depends on [control=['if'], data=[]]
else:
signatures = req_data.get(f.SIGS.nm, None)
return self.authenticate_multi(to_serialize, signatures=signatures, threshold=threshold, verifier=verifier) |
def mdl(ll, k, data):
"""
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
"""
"""
N - no. of genes
n - no. of cells
k - no. of cell types
R - sum(Dataset) i.e. total no. of reads
function TotCost = TotBits(N,m,p,R,C)
# C is the cost from the cost function
TotCost = C + (N*m + m*p)*(log(R/(N*p)));
"""
N, m = data.shape
cost = ll + (N*m + m*k)*(np.log(data.sum()/(N*k)))
return cost | def function[mdl, parameter[ll, k, data]]:
constant[
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
]
constant[
N - no. of genes
n - no. of cells
k - no. of cell types
R - sum(Dataset) i.e. total no. of reads
function TotCost = TotBits(N,m,p,R,C)
# C is the cost from the cost function
TotCost = C + (N*m + m*p)*(log(R/(N*p)));
]
<ast.Tuple object at 0x7da1b1a74dc0> assign[=] name[data].shape
variable[cost] assign[=] binary_operation[name[ll] + binary_operation[binary_operation[binary_operation[name[N] * name[m]] + binary_operation[name[m] * name[k]]] * call[name[np].log, parameter[binary_operation[call[name[data].sum, parameter[]] / binary_operation[name[N] * name[k]]]]]]]
return[name[cost]] | keyword[def] identifier[mdl] ( identifier[ll] , identifier[k] , identifier[data] ):
literal[string]
literal[string]
identifier[N] , identifier[m] = identifier[data] . identifier[shape]
identifier[cost] = identifier[ll] +( identifier[N] * identifier[m] + identifier[m] * identifier[k] )*( identifier[np] . identifier[log] ( identifier[data] . identifier[sum] ()/( identifier[N] * identifier[k] )))
keyword[return] identifier[cost] | def mdl(ll, k, data):
"""
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
"""
'\n N - no. of genes\n n - no. of cells \n k - no. of cell types\n R - sum(Dataset) i.e. total no. of reads\n\n function TotCost = TotBits(N,m,p,R,C)\n # C is the cost from the cost function\n TotCost = C + (N*m + m*p)*(log(R/(N*p)));\n '
(N, m) = data.shape
cost = ll + (N * m + m * k) * np.log(data.sum() / (N * k))
return cost |
async def on_raw_730(self, message):
""" Someone we are monitoring just came online. """
for nick in message.params[1].split(','):
self._create_user(nick)
await self.on_user_online(nickname) | <ast.AsyncFunctionDef object at 0x7da20c6e6560> | keyword[async] keyword[def] identifier[on_raw_730] ( identifier[self] , identifier[message] ):
literal[string]
keyword[for] identifier[nick] keyword[in] identifier[message] . identifier[params] [ literal[int] ]. identifier[split] ( literal[string] ):
identifier[self] . identifier[_create_user] ( identifier[nick] )
keyword[await] identifier[self] . identifier[on_user_online] ( identifier[nickname] ) | async def on_raw_730(self, message):
""" Someone we are monitoring just came online. """
for nick in message.params[1].split(','):
self._create_user(nick)
await self.on_user_online(nickname) # depends on [control=['for'], data=['nick']] |
def read(self):
"""Read stdout and stdout pipes if process is no longer running."""
if self._process and self._process.poll() is not None:
ip = get_ipython()
err = ip.user_ns['error'].read().decode()
out = ip.user_ns['output'].read().decode()
else:
out = ''
err = ''
return out, err | def function[read, parameter[self]]:
constant[Read stdout and stdout pipes if process is no longer running.]
if <ast.BoolOp object at 0x7da1b14c5e10> begin[:]
variable[ip] assign[=] call[name[get_ipython], parameter[]]
variable[err] assign[=] call[call[call[name[ip].user_ns][constant[error]].read, parameter[]].decode, parameter[]]
variable[out] assign[=] call[call[call[name[ip].user_ns][constant[output]].read, parameter[]].decode, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b14c45b0>, <ast.Name object at 0x7da1b14c48e0>]]] | keyword[def] identifier[read] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_process] keyword[and] identifier[self] . identifier[_process] . identifier[poll] () keyword[is] keyword[not] keyword[None] :
identifier[ip] = identifier[get_ipython] ()
identifier[err] = identifier[ip] . identifier[user_ns] [ literal[string] ]. identifier[read] (). identifier[decode] ()
identifier[out] = identifier[ip] . identifier[user_ns] [ literal[string] ]. identifier[read] (). identifier[decode] ()
keyword[else] :
identifier[out] = literal[string]
identifier[err] = literal[string]
keyword[return] identifier[out] , identifier[err] | def read(self):
"""Read stdout and stdout pipes if process is no longer running."""
if self._process and self._process.poll() is not None:
ip = get_ipython()
err = ip.user_ns['error'].read().decode()
out = ip.user_ns['output'].read().decode() # depends on [control=['if'], data=[]]
else:
out = ''
err = ''
return (out, err) |
def get_kwargs(self, args):
"""
Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse.
"""
# Now we need to figure out which arguments the final function
# actually needs
kwargs = {}
argspec = inspect.getargspec(self._func)
required = set(argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
for arg_name in argspec.args:
try:
kwargs[arg_name] = getattr(args, arg_name)
except AttributeError:
if arg_name in required:
# If this happens, that's a programming failure
raise
# If the function accepts any keyword argument, add whatever
# remains
if argspec.keywords:
for key, value in args.__dict__.items():
if key in kwargs:
# Already handled
continue
kwargs[key] = value
return kwargs | def function[get_kwargs, parameter[self, args]]:
constant[
Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse.
]
variable[kwargs] assign[=] dictionary[[], []]
variable[argspec] assign[=] call[name[inspect].getargspec, parameter[name[self]._func]]
variable[required] assign[=] call[name[set], parameter[<ast.IfExp object at 0x7da18f8126e0>]]
for taget[name[arg_name]] in starred[name[argspec].args] begin[:]
<ast.Try object at 0x7da18f811c90>
if name[argspec].keywords begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f8119c0>, <ast.Name object at 0x7da18f810fd0>]]] in starred[call[name[args].__dict__.items, parameter[]]] begin[:]
if compare[name[key] in name[kwargs]] begin[:]
continue
call[name[kwargs]][name[key]] assign[=] name[value]
return[name[kwargs]] | keyword[def] identifier[get_kwargs] ( identifier[self] , identifier[args] ):
literal[string]
identifier[kwargs] ={}
identifier[argspec] = identifier[inspect] . identifier[getargspec] ( identifier[self] . identifier[_func] )
identifier[required] = identifier[set] ( identifier[argspec] . identifier[args] [:- identifier[len] ( identifier[argspec] . identifier[defaults] )]
keyword[if] identifier[argspec] . identifier[defaults] keyword[else] identifier[argspec] . identifier[args] )
keyword[for] identifier[arg_name] keyword[in] identifier[argspec] . identifier[args] :
keyword[try] :
identifier[kwargs] [ identifier[arg_name] ]= identifier[getattr] ( identifier[args] , identifier[arg_name] )
keyword[except] identifier[AttributeError] :
keyword[if] identifier[arg_name] keyword[in] identifier[required] :
keyword[raise]
keyword[if] identifier[argspec] . identifier[keywords] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[args] . identifier[__dict__] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[kwargs] :
keyword[continue]
identifier[kwargs] [ identifier[key] ]= identifier[value]
keyword[return] identifier[kwargs] | def get_kwargs(self, args):
"""
Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse.
"""
# Now we need to figure out which arguments the final function
# actually needs
kwargs = {}
argspec = inspect.getargspec(self._func)
required = set(argspec.args[:-len(argspec.defaults)] if argspec.defaults else argspec.args)
for arg_name in argspec.args:
try:
kwargs[arg_name] = getattr(args, arg_name) # depends on [control=['try'], data=[]]
except AttributeError:
if arg_name in required:
# If this happens, that's a programming failure
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['arg_name']]
# If the function accepts any keyword argument, add whatever
# remains
if argspec.keywords:
for (key, value) in args.__dict__.items():
if key in kwargs:
# Already handled
continue # depends on [control=['if'], data=[]]
kwargs[key] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return kwargs |
def histogram2D(xvalues, yvalues, bins=12, norm=1, c="g", alpha=1, fill=False):
"""
Build a 2D hexagonal histogram from a list of x and y values.
:param bool bins: nr of bins for the smaller range in x or y.
:param float norm: sets a scaling factor for the z axis.
:param bool fill: draw solid hexagons.
.. hint:: |histo2D| |histo2D.py|_
"""
xmin, xmax = np.min(xvalues), np.max(xvalues)
ymin, ymax = np.min(yvalues), np.max(yvalues)
dx, dy = xmax - xmin, ymax - ymin
if xmax - xmin < ymax - ymin:
n = bins
m = np.rint(dy / dx * n / 1.2 + 0.5).astype(int)
else:
m = bins
n = np.rint(dx / dy * m * 1.2 + 0.5).astype(int)
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(xvalues))
src.Update()
pointsPolydata = src.GetOutput()
values = list(zip(xvalues, yvalues))
zs = [[0.0]] * len(values)
values = np.append(values, zs, axis=1)
pointsPolydata.GetPoints().SetData(numpy_to_vtk(values, deep=True))
cloud = Actor(pointsPolydata)
c1 = vc.getColor(c)
c2 = np.array(c1) * 0.7
r = 0.47 / n * 1.2 * dx
hexs, binmax = [], 0
for i in range(n + 3):
for j in range(m + 2):
cyl = vtk.vtkCylinderSource()
cyl.SetResolution(6)
cyl.CappingOn()
cyl.SetRadius(0.5)
cyl.SetHeight(0.1)
cyl.Update()
t = vtk.vtkTransform()
if not i % 2:
p = (i / 1.33, j / 1.12, 0)
c = c1
else:
p = (i / 1.33, j / 1.12 + 0.443, 0)
c = c2
q = (p[0] / n * 1.2 * dx + xmin, p[1] / m * dy + ymin, 0)
ids = cloud.closestPoint(q, radius=r, returnIds=True)
ne = len(ids)
if fill:
t.Translate(p[0], p[1], ne / 2)
t.Scale(1, 1, ne * 5)
else:
t.Translate(p[0], p[1], ne)
t.RotateX(90) # put it along Z
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(cyl.GetOutput())
tf.SetTransform(t)
tf.Update()
h = Actor(tf.GetOutput(), c=c, alpha=alpha)
h.PickableOff()
hexs.append(h)
if ne > binmax:
binmax = ne
asse = Assembly(hexs)
asse.SetScale(1 / n * 1.2 * dx, 1 / m * dy, norm / binmax * (dx + dy) / 4)
asse.SetPosition(xmin, ymin, 0)
return asse | def function[histogram2D, parameter[xvalues, yvalues, bins, norm, c, alpha, fill]]:
constant[
Build a 2D hexagonal histogram from a list of x and y values.
:param bool bins: nr of bins for the smaller range in x or y.
:param float norm: sets a scaling factor for the z axis.
:param bool fill: draw solid hexagons.
.. hint:: |histo2D| |histo2D.py|_
]
<ast.Tuple object at 0x7da1b0608520> assign[=] tuple[[<ast.Call object at 0x7da1b0608fd0>, <ast.Call object at 0x7da1b060a230>]]
<ast.Tuple object at 0x7da1b06086d0> assign[=] tuple[[<ast.Call object at 0x7da1b060b310>, <ast.Call object at 0x7da1b060a8c0>]]
<ast.Tuple object at 0x7da1b060ae00> assign[=] tuple[[<ast.BinOp object at 0x7da1b060aa10>, <ast.BinOp object at 0x7da1b060a470>]]
if compare[binary_operation[name[xmax] - name[xmin]] less[<] binary_operation[name[ymax] - name[ymin]]] begin[:]
variable[n] assign[=] name[bins]
variable[m] assign[=] call[call[name[np].rint, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[dy] / name[dx]] * name[n]] / constant[1.2]] + constant[0.5]]]].astype, parameter[name[int]]]
variable[src] assign[=] call[name[vtk].vtkPointSource, parameter[]]
call[name[src].SetNumberOfPoints, parameter[call[name[len], parameter[name[xvalues]]]]]
call[name[src].Update, parameter[]]
variable[pointsPolydata] assign[=] call[name[src].GetOutput, parameter[]]
variable[values] assign[=] call[name[list], parameter[call[name[zip], parameter[name[xvalues], name[yvalues]]]]]
variable[zs] assign[=] binary_operation[list[[<ast.List object at 0x7da1b060a560>]] * call[name[len], parameter[name[values]]]]
variable[values] assign[=] call[name[np].append, parameter[name[values], name[zs]]]
call[call[name[pointsPolydata].GetPoints, parameter[]].SetData, parameter[call[name[numpy_to_vtk], parameter[name[values]]]]]
variable[cloud] assign[=] call[name[Actor], parameter[name[pointsPolydata]]]
variable[c1] assign[=] call[name[vc].getColor, parameter[name[c]]]
variable[c2] assign[=] binary_operation[call[name[np].array, parameter[name[c1]]] * constant[0.7]]
variable[r] assign[=] binary_operation[binary_operation[binary_operation[constant[0.47] / name[n]] * constant[1.2]] * name[dx]]
<ast.Tuple object at 0x7da18c4cddb0> assign[=] tuple[[<ast.List object at 0x7da18c4ce170>, <ast.Constant object at 0x7da18c4cd660>]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[n] + constant[3]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[binary_operation[name[m] + constant[2]]]]] begin[:]
variable[cyl] assign[=] call[name[vtk].vtkCylinderSource, parameter[]]
call[name[cyl].SetResolution, parameter[constant[6]]]
call[name[cyl].CappingOn, parameter[]]
call[name[cyl].SetRadius, parameter[constant[0.5]]]
call[name[cyl].SetHeight, parameter[constant[0.1]]]
call[name[cyl].Update, parameter[]]
variable[t] assign[=] call[name[vtk].vtkTransform, parameter[]]
if <ast.UnaryOp object at 0x7da18f720310> begin[:]
variable[p] assign[=] tuple[[<ast.BinOp object at 0x7da18f722da0>, <ast.BinOp object at 0x7da18f722b30>, <ast.Constant object at 0x7da18f723df0>]]
variable[c] assign[=] name[c1]
variable[q] assign[=] tuple[[<ast.BinOp object at 0x7da18f7204c0>, <ast.BinOp object at 0x7da18f720dc0>, <ast.Constant object at 0x7da18f721480>]]
variable[ids] assign[=] call[name[cloud].closestPoint, parameter[name[q]]]
variable[ne] assign[=] call[name[len], parameter[name[ids]]]
if name[fill] begin[:]
call[name[t].Translate, parameter[call[name[p]][constant[0]], call[name[p]][constant[1]], binary_operation[name[ne] / constant[2]]]]
call[name[t].Scale, parameter[constant[1], constant[1], binary_operation[name[ne] * constant[5]]]]
call[name[t].RotateX, parameter[constant[90]]]
variable[tf] assign[=] call[name[vtk].vtkTransformPolyDataFilter, parameter[]]
call[name[tf].SetInputData, parameter[call[name[cyl].GetOutput, parameter[]]]]
call[name[tf].SetTransform, parameter[name[t]]]
call[name[tf].Update, parameter[]]
variable[h] assign[=] call[name[Actor], parameter[call[name[tf].GetOutput, parameter[]]]]
call[name[h].PickableOff, parameter[]]
call[name[hexs].append, parameter[name[h]]]
if compare[name[ne] greater[>] name[binmax]] begin[:]
variable[binmax] assign[=] name[ne]
variable[asse] assign[=] call[name[Assembly], parameter[name[hexs]]]
call[name[asse].SetScale, parameter[binary_operation[binary_operation[binary_operation[constant[1] / name[n]] * constant[1.2]] * name[dx]], binary_operation[binary_operation[constant[1] / name[m]] * name[dy]], binary_operation[binary_operation[binary_operation[name[norm] / name[binmax]] * binary_operation[name[dx] + name[dy]]] / constant[4]]]]
call[name[asse].SetPosition, parameter[name[xmin], name[ymin], constant[0]]]
return[name[asse]] | keyword[def] identifier[histogram2D] ( identifier[xvalues] , identifier[yvalues] , identifier[bins] = literal[int] , identifier[norm] = literal[int] , identifier[c] = literal[string] , identifier[alpha] = literal[int] , identifier[fill] = keyword[False] ):
literal[string]
identifier[xmin] , identifier[xmax] = identifier[np] . identifier[min] ( identifier[xvalues] ), identifier[np] . identifier[max] ( identifier[xvalues] )
identifier[ymin] , identifier[ymax] = identifier[np] . identifier[min] ( identifier[yvalues] ), identifier[np] . identifier[max] ( identifier[yvalues] )
identifier[dx] , identifier[dy] = identifier[xmax] - identifier[xmin] , identifier[ymax] - identifier[ymin]
keyword[if] identifier[xmax] - identifier[xmin] < identifier[ymax] - identifier[ymin] :
identifier[n] = identifier[bins]
identifier[m] = identifier[np] . identifier[rint] ( identifier[dy] / identifier[dx] * identifier[n] / literal[int] + literal[int] ). identifier[astype] ( identifier[int] )
keyword[else] :
identifier[m] = identifier[bins]
identifier[n] = identifier[np] . identifier[rint] ( identifier[dx] / identifier[dy] * identifier[m] * literal[int] + literal[int] ). identifier[astype] ( identifier[int] )
identifier[src] = identifier[vtk] . identifier[vtkPointSource] ()
identifier[src] . identifier[SetNumberOfPoints] ( identifier[len] ( identifier[xvalues] ))
identifier[src] . identifier[Update] ()
identifier[pointsPolydata] = identifier[src] . identifier[GetOutput] ()
identifier[values] = identifier[list] ( identifier[zip] ( identifier[xvalues] , identifier[yvalues] ))
identifier[zs] =[[ literal[int] ]]* identifier[len] ( identifier[values] )
identifier[values] = identifier[np] . identifier[append] ( identifier[values] , identifier[zs] , identifier[axis] = literal[int] )
identifier[pointsPolydata] . identifier[GetPoints] (). identifier[SetData] ( identifier[numpy_to_vtk] ( identifier[values] , identifier[deep] = keyword[True] ))
identifier[cloud] = identifier[Actor] ( identifier[pointsPolydata] )
identifier[c1] = identifier[vc] . identifier[getColor] ( identifier[c] )
identifier[c2] = identifier[np] . identifier[array] ( identifier[c1] )* literal[int]
identifier[r] = literal[int] / identifier[n] * literal[int] * identifier[dx]
identifier[hexs] , identifier[binmax] =[], literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] + literal[int] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[m] + literal[int] ):
identifier[cyl] = identifier[vtk] . identifier[vtkCylinderSource] ()
identifier[cyl] . identifier[SetResolution] ( literal[int] )
identifier[cyl] . identifier[CappingOn] ()
identifier[cyl] . identifier[SetRadius] ( literal[int] )
identifier[cyl] . identifier[SetHeight] ( literal[int] )
identifier[cyl] . identifier[Update] ()
identifier[t] = identifier[vtk] . identifier[vtkTransform] ()
keyword[if] keyword[not] identifier[i] % literal[int] :
identifier[p] =( identifier[i] / literal[int] , identifier[j] / literal[int] , literal[int] )
identifier[c] = identifier[c1]
keyword[else] :
identifier[p] =( identifier[i] / literal[int] , identifier[j] / literal[int] + literal[int] , literal[int] )
identifier[c] = identifier[c2]
identifier[q] =( identifier[p] [ literal[int] ]/ identifier[n] * literal[int] * identifier[dx] + identifier[xmin] , identifier[p] [ literal[int] ]/ identifier[m] * identifier[dy] + identifier[ymin] , literal[int] )
identifier[ids] = identifier[cloud] . identifier[closestPoint] ( identifier[q] , identifier[radius] = identifier[r] , identifier[returnIds] = keyword[True] )
identifier[ne] = identifier[len] ( identifier[ids] )
keyword[if] identifier[fill] :
identifier[t] . identifier[Translate] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[ne] / literal[int] )
identifier[t] . identifier[Scale] ( literal[int] , literal[int] , identifier[ne] * literal[int] )
keyword[else] :
identifier[t] . identifier[Translate] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[ne] )
identifier[t] . identifier[RotateX] ( literal[int] )
identifier[tf] = identifier[vtk] . identifier[vtkTransformPolyDataFilter] ()
identifier[tf] . identifier[SetInputData] ( identifier[cyl] . identifier[GetOutput] ())
identifier[tf] . identifier[SetTransform] ( identifier[t] )
identifier[tf] . identifier[Update] ()
identifier[h] = identifier[Actor] ( identifier[tf] . identifier[GetOutput] (), identifier[c] = identifier[c] , identifier[alpha] = identifier[alpha] )
identifier[h] . identifier[PickableOff] ()
identifier[hexs] . identifier[append] ( identifier[h] )
keyword[if] identifier[ne] > identifier[binmax] :
identifier[binmax] = identifier[ne]
identifier[asse] = identifier[Assembly] ( identifier[hexs] )
identifier[asse] . identifier[SetScale] ( literal[int] / identifier[n] * literal[int] * identifier[dx] , literal[int] / identifier[m] * identifier[dy] , identifier[norm] / identifier[binmax] *( identifier[dx] + identifier[dy] )/ literal[int] )
identifier[asse] . identifier[SetPosition] ( identifier[xmin] , identifier[ymin] , literal[int] )
keyword[return] identifier[asse] | def histogram2D(xvalues, yvalues, bins=12, norm=1, c='g', alpha=1, fill=False):
"""
Build a 2D hexagonal histogram from a list of x and y values.
:param bool bins: nr of bins for the smaller range in x or y.
:param float norm: sets a scaling factor for the z axis.
:param bool fill: draw solid hexagons.
.. hint:: |histo2D| |histo2D.py|_
"""
(xmin, xmax) = (np.min(xvalues), np.max(xvalues))
(ymin, ymax) = (np.min(yvalues), np.max(yvalues))
(dx, dy) = (xmax - xmin, ymax - ymin)
if xmax - xmin < ymax - ymin:
n = bins
m = np.rint(dy / dx * n / 1.2 + 0.5).astype(int) # depends on [control=['if'], data=[]]
else:
m = bins
n = np.rint(dx / dy * m * 1.2 + 0.5).astype(int)
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(xvalues))
src.Update()
pointsPolydata = src.GetOutput()
values = list(zip(xvalues, yvalues))
zs = [[0.0]] * len(values)
values = np.append(values, zs, axis=1)
pointsPolydata.GetPoints().SetData(numpy_to_vtk(values, deep=True))
cloud = Actor(pointsPolydata)
c1 = vc.getColor(c)
c2 = np.array(c1) * 0.7
r = 0.47 / n * 1.2 * dx
(hexs, binmax) = ([], 0)
for i in range(n + 3):
for j in range(m + 2):
cyl = vtk.vtkCylinderSource()
cyl.SetResolution(6)
cyl.CappingOn()
cyl.SetRadius(0.5)
cyl.SetHeight(0.1)
cyl.Update()
t = vtk.vtkTransform()
if not i % 2:
p = (i / 1.33, j / 1.12, 0)
c = c1 # depends on [control=['if'], data=[]]
else:
p = (i / 1.33, j / 1.12 + 0.443, 0)
c = c2
q = (p[0] / n * 1.2 * dx + xmin, p[1] / m * dy + ymin, 0)
ids = cloud.closestPoint(q, radius=r, returnIds=True)
ne = len(ids)
if fill:
t.Translate(p[0], p[1], ne / 2)
t.Scale(1, 1, ne * 5) # depends on [control=['if'], data=[]]
else:
t.Translate(p[0], p[1], ne)
t.RotateX(90) # put it along Z
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(cyl.GetOutput())
tf.SetTransform(t)
tf.Update()
h = Actor(tf.GetOutput(), c=c, alpha=alpha)
h.PickableOff()
hexs.append(h)
if ne > binmax:
binmax = ne # depends on [control=['if'], data=['ne', 'binmax']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
asse = Assembly(hexs)
asse.SetScale(1 / n * 1.2 * dx, 1 / m * dy, norm / binmax * (dx + dy) / 4)
asse.SetPosition(xmin, ymin, 0)
return asse |
def plexp_inv(P,xm,a):
"""
Inverse CDF for a piecewise PDF as defined in eqn. 3.10
of Clauset et al.
"""
C = 1/(-xm/(1 - a) - xm/a + math.exp(a)*xm/a)
Pxm = 1+C*(xm/(1-a))
pp = P
x = xm*(pp-1)*(1-a)/(C*xm)**(1/(1-a)) if pp >= Pxm else (math.log( ((C*xm/a)*math.exp(a)-pp)/(C*xm/a)) - a) * (-xm/a)
#x[P>=Pxm] = xm*( (P[P>=Pxm]-1) * (1-a)/(C*xm) )**(1/(1-a)) # powerlaw
#x[P<Pxm] = (math.log( (C*xm/a*math.exp(a)-P[P<Pxm])/(C*xm/a) ) - a) * (-xm/a) # exp
return x | def function[plexp_inv, parameter[P, xm, a]]:
constant[
Inverse CDF for a piecewise PDF as defined in eqn. 3.10
of Clauset et al.
]
variable[C] assign[=] binary_operation[constant[1] / binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b26af910> / binary_operation[constant[1] - name[a]]] - binary_operation[name[xm] / name[a]]] + binary_operation[binary_operation[call[name[math].exp, parameter[name[a]]] * name[xm]] / name[a]]]]
variable[Pxm] assign[=] binary_operation[constant[1] + binary_operation[name[C] * binary_operation[name[xm] / binary_operation[constant[1] - name[a]]]]]
variable[pp] assign[=] name[P]
variable[x] assign[=] <ast.IfExp object at 0x7da20e957c70>
return[name[x]] | keyword[def] identifier[plexp_inv] ( identifier[P] , identifier[xm] , identifier[a] ):
literal[string]
identifier[C] = literal[int] /(- identifier[xm] /( literal[int] - identifier[a] )- identifier[xm] / identifier[a] + identifier[math] . identifier[exp] ( identifier[a] )* identifier[xm] / identifier[a] )
identifier[Pxm] = literal[int] + identifier[C] *( identifier[xm] /( literal[int] - identifier[a] ))
identifier[pp] = identifier[P]
identifier[x] = identifier[xm] *( identifier[pp] - literal[int] )*( literal[int] - identifier[a] )/( identifier[C] * identifier[xm] )**( literal[int] /( literal[int] - identifier[a] )) keyword[if] identifier[pp] >= identifier[Pxm] keyword[else] ( identifier[math] . identifier[log] ((( identifier[C] * identifier[xm] / identifier[a] )* identifier[math] . identifier[exp] ( identifier[a] )- identifier[pp] )/( identifier[C] * identifier[xm] / identifier[a] ))- identifier[a] )*(- identifier[xm] / identifier[a] )
keyword[return] identifier[x] | def plexp_inv(P, xm, a):
"""
Inverse CDF for a piecewise PDF as defined in eqn. 3.10
of Clauset et al.
"""
C = 1 / (-xm / (1 - a) - xm / a + math.exp(a) * xm / a)
Pxm = 1 + C * (xm / (1 - a))
pp = P
x = xm * (pp - 1) * (1 - a) / (C * xm) ** (1 / (1 - a)) if pp >= Pxm else (math.log((C * xm / a * math.exp(a) - pp) / (C * xm / a)) - a) * (-xm / a)
#x[P>=Pxm] = xm*( (P[P>=Pxm]-1) * (1-a)/(C*xm) )**(1/(1-a)) # powerlaw
#x[P<Pxm] = (math.log( (C*xm/a*math.exp(a)-P[P<Pxm])/(C*xm/a) ) - a) * (-xm/a) # exp
return x |
def join(args):
"""
%prog join fastafile [phasefile]
Make AGP file for a bunch of sequences, and add gaps between, and then build
the joined fastafile. This is useful by itself, but with --oo option this
can convert the .oo (BAMBUS output) into AGP and a joined fasta.
Phasefile is optional, but must contain two columns - BAC and phase (0, 1, 2, 3).
"""
from jcvi.formats.agp import OO, Phases, build
from jcvi.formats.sizes import Sizes
p = OptionParser(join.__doc__)
p.add_option("--newid", default=None,
help="New sequence ID [default: `%default`]")
p.add_option("--gapsize", default=100, type="int",
help="Number of N's in between the sequences [default: %default]")
p.add_option("--gaptype", default="contig",
help="Gap type to use in the AGP file [default: %default]")
p.add_option("--evidence", default="",
help="Linkage evidence to report in the AGP file [default: %default]")
p.add_option("--oo", help="Use .oo file generated by bambus [default: %default]")
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
if nargs == 2:
fastafile, phasefile = args
phases = DictFile(phasefile)
phases = dict((a, Phases[int(b)]) for a, b in phases.items())
else:
fastafile, = args
phases = {}
sizes = Sizes(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
agpfile = prefix + ".agp"
newid = opts.newid
oo = opts.oo
o = OO(oo, sizes.mapping)
if oo:
seen = o.contigs
# The leftover contigs not in the oo file
logging.debug("A total of {0} contigs ({1} in `{2}`)".\
format(len(sizes), len(seen), oo))
for ctg, size in sizes.iter_sizes():
if ctg in seen:
continue
o.add(ctg, ctg, size)
else:
if newid:
for ctg, size in sizes.iter_sizes():
o.add(newid, ctg, size)
else:
for scaffold_number, (ctg, size) in enumerate(sizes.iter_sizes()):
object_id = "scaffold{0:03d}".format(scaffold_number + 1)
o.add(object_id, ctg, size)
fw = open(agpfile, "w")
o.write_AGP(fw, gapsize=opts.gapsize, gaptype=opts.gaptype,
evidence=opts.evidence, phases=phases)
fw.close()
joinedfastafile = prefix + ".joined.fasta"
build([agpfile, fastafile, joinedfastafile])
return joinedfastafile | def function[join, parameter[args]]:
constant[
%prog join fastafile [phasefile]
Make AGP file for a bunch of sequences, and add gaps between, and then build
the joined fastafile. This is useful by itself, but with --oo option this
can convert the .oo (BAMBUS output) into AGP and a joined fasta.
Phasefile is optional, but must contain two columns - BAC and phase (0, 1, 2, 3).
]
from relative_module[jcvi.formats.agp] import module[OO], module[Phases], module[build]
from relative_module[jcvi.formats.sizes] import module[Sizes]
variable[p] assign[=] call[name[OptionParser], parameter[name[join].__doc__]]
call[name[p].add_option, parameter[constant[--newid]]]
call[name[p].add_option, parameter[constant[--gapsize]]]
call[name[p].add_option, parameter[constant[--gaptype]]]
call[name[p].add_option, parameter[constant[--evidence]]]
call[name[p].add_option, parameter[constant[--oo]]]
<ast.Tuple object at 0x7da1b0962aa0> assign[=] call[name[p].parse_args, parameter[name[args]]]
variable[nargs] assign[=] call[name[len], parameter[name[args]]]
if compare[name[nargs] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0961810>, <ast.Constant object at 0x7da1b0962410>]]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b0963070>]]
if compare[name[nargs] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da1b0963610> assign[=] name[args]
variable[phases] assign[=] call[name[DictFile], parameter[name[phasefile]]]
variable[phases] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b0961bd0>]]
variable[sizes] assign[=] call[name[Sizes], parameter[name[fastafile]]]
variable[prefix] assign[=] call[call[name[fastafile].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[agpfile] assign[=] binary_operation[name[prefix] + constant[.agp]]
variable[newid] assign[=] name[opts].newid
variable[oo] assign[=] name[opts].oo
variable[o] assign[=] call[name[OO], parameter[name[oo], name[sizes].mapping]]
if name[oo] begin[:]
variable[seen] assign[=] name[o].contigs
call[name[logging].debug, parameter[call[constant[A total of {0} contigs ({1} in `{2}`)].format, parameter[call[name[len], parameter[name[sizes]]], call[name[len], parameter[name[seen]]], name[oo]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0961ed0>, <ast.Name object at 0x7da1b0961d50>]]] in starred[call[name[sizes].iter_sizes, parameter[]]] begin[:]
if compare[name[ctg] in name[seen]] begin[:]
continue
call[name[o].add, parameter[name[ctg], name[ctg], name[size]]]
variable[fw] assign[=] call[name[open], parameter[name[agpfile], constant[w]]]
call[name[o].write_AGP, parameter[name[fw]]]
call[name[fw].close, parameter[]]
variable[joinedfastafile] assign[=] binary_operation[name[prefix] + constant[.joined.fasta]]
call[name[build], parameter[list[[<ast.Name object at 0x7da18f7211e0>, <ast.Name object at 0x7da18f720e50>, <ast.Name object at 0x7da18f721540>]]]]
return[name[joinedfastafile]] | keyword[def] identifier[join] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[agp] keyword[import] identifier[OO] , identifier[Phases] , identifier[build]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[sizes] keyword[import] identifier[Sizes]
identifier[p] = identifier[OptionParser] ( identifier[join] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
identifier[nargs] = identifier[len] ( identifier[args] )
keyword[if] identifier[nargs] keyword[not] keyword[in] ( literal[int] , literal[int] ):
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
keyword[if] identifier[nargs] == literal[int] :
identifier[fastafile] , identifier[phasefile] = identifier[args]
identifier[phases] = identifier[DictFile] ( identifier[phasefile] )
identifier[phases] = identifier[dict] (( identifier[a] , identifier[Phases] [ identifier[int] ( identifier[b] )]) keyword[for] identifier[a] , identifier[b] keyword[in] identifier[phases] . identifier[items] ())
keyword[else] :
identifier[fastafile] ,= identifier[args]
identifier[phases] ={}
identifier[sizes] = identifier[Sizes] ( identifier[fastafile] )
identifier[prefix] = identifier[fastafile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[agpfile] = identifier[prefix] + literal[string]
identifier[newid] = identifier[opts] . identifier[newid]
identifier[oo] = identifier[opts] . identifier[oo]
identifier[o] = identifier[OO] ( identifier[oo] , identifier[sizes] . identifier[mapping] )
keyword[if] identifier[oo] :
identifier[seen] = identifier[o] . identifier[contigs]
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[sizes] ), identifier[len] ( identifier[seen] ), identifier[oo] ))
keyword[for] identifier[ctg] , identifier[size] keyword[in] identifier[sizes] . identifier[iter_sizes] ():
keyword[if] identifier[ctg] keyword[in] identifier[seen] :
keyword[continue]
identifier[o] . identifier[add] ( identifier[ctg] , identifier[ctg] , identifier[size] )
keyword[else] :
keyword[if] identifier[newid] :
keyword[for] identifier[ctg] , identifier[size] keyword[in] identifier[sizes] . identifier[iter_sizes] ():
identifier[o] . identifier[add] ( identifier[newid] , identifier[ctg] , identifier[size] )
keyword[else] :
keyword[for] identifier[scaffold_number] ,( identifier[ctg] , identifier[size] ) keyword[in] identifier[enumerate] ( identifier[sizes] . identifier[iter_sizes] ()):
identifier[object_id] = literal[string] . identifier[format] ( identifier[scaffold_number] + literal[int] )
identifier[o] . identifier[add] ( identifier[object_id] , identifier[ctg] , identifier[size] )
identifier[fw] = identifier[open] ( identifier[agpfile] , literal[string] )
identifier[o] . identifier[write_AGP] ( identifier[fw] , identifier[gapsize] = identifier[opts] . identifier[gapsize] , identifier[gaptype] = identifier[opts] . identifier[gaptype] ,
identifier[evidence] = identifier[opts] . identifier[evidence] , identifier[phases] = identifier[phases] )
identifier[fw] . identifier[close] ()
identifier[joinedfastafile] = identifier[prefix] + literal[string]
identifier[build] ([ identifier[agpfile] , identifier[fastafile] , identifier[joinedfastafile] ])
keyword[return] identifier[joinedfastafile] | def join(args):
"""
%prog join fastafile [phasefile]
Make AGP file for a bunch of sequences, and add gaps between, and then build
the joined fastafile. This is useful by itself, but with --oo option this
can convert the .oo (BAMBUS output) into AGP and a joined fasta.
Phasefile is optional, but must contain two columns - BAC and phase (0, 1, 2, 3).
"""
from jcvi.formats.agp import OO, Phases, build
from jcvi.formats.sizes import Sizes
p = OptionParser(join.__doc__)
p.add_option('--newid', default=None, help='New sequence ID [default: `%default`]')
p.add_option('--gapsize', default=100, type='int', help="Number of N's in between the sequences [default: %default]")
p.add_option('--gaptype', default='contig', help='Gap type to use in the AGP file [default: %default]')
p.add_option('--evidence', default='', help='Linkage evidence to report in the AGP file [default: %default]')
p.add_option('--oo', help='Use .oo file generated by bambus [default: %default]')
(opts, args) = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
if nargs == 2:
(fastafile, phasefile) = args
phases = DictFile(phasefile)
phases = dict(((a, Phases[int(b)]) for (a, b) in phases.items())) # depends on [control=['if'], data=[]]
else:
(fastafile,) = args
phases = {}
sizes = Sizes(fastafile)
prefix = fastafile.rsplit('.', 1)[0]
agpfile = prefix + '.agp'
newid = opts.newid
oo = opts.oo
o = OO(oo, sizes.mapping)
if oo:
seen = o.contigs
# The leftover contigs not in the oo file
logging.debug('A total of {0} contigs ({1} in `{2}`)'.format(len(sizes), len(seen), oo))
for (ctg, size) in sizes.iter_sizes():
if ctg in seen:
continue # depends on [control=['if'], data=[]]
o.add(ctg, ctg, size) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif newid:
for (ctg, size) in sizes.iter_sizes():
o.add(newid, ctg, size) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for (scaffold_number, (ctg, size)) in enumerate(sizes.iter_sizes()):
object_id = 'scaffold{0:03d}'.format(scaffold_number + 1)
o.add(object_id, ctg, size) # depends on [control=['for'], data=[]]
fw = open(agpfile, 'w')
o.write_AGP(fw, gapsize=opts.gapsize, gaptype=opts.gaptype, evidence=opts.evidence, phases=phases)
fw.close()
joinedfastafile = prefix + '.joined.fasta'
build([agpfile, fastafile, joinedfastafile])
return joinedfastafile |
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None | def function[_find_frame_imports, parameter[name, frame]]:
constant[
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
]
variable[imports] assign[=] call[name[frame].nodes_of_class, parameter[tuple[[<ast.Attribute object at 0x7da1b03a48e0>, <ast.Attribute object at 0x7da1b03a5f30>]]]]
for taget[name[import_node]] in starred[name[imports]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b03a48b0>, <ast.Name object at 0x7da1b03a47c0>]]] in starred[name[import_node].names] begin[:]
if name[import_alias] begin[:]
if compare[name[import_alias] equal[==] name[name]] begin[:]
return[constant[True]]
return[constant[None]] | keyword[def] identifier[_find_frame_imports] ( identifier[name] , identifier[frame] ):
literal[string]
identifier[imports] = identifier[frame] . identifier[nodes_of_class] (( identifier[astroid] . identifier[Import] , identifier[astroid] . identifier[ImportFrom] ))
keyword[for] identifier[import_node] keyword[in] identifier[imports] :
keyword[for] identifier[import_name] , identifier[import_alias] keyword[in] identifier[import_node] . identifier[names] :
keyword[if] identifier[import_alias] :
keyword[if] identifier[import_alias] == identifier[name] :
keyword[return] keyword[True]
keyword[elif] identifier[import_name] keyword[and] identifier[import_name] == identifier[name] :
keyword[return] keyword[True]
keyword[return] keyword[None] | def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for (import_name, import_alias) in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif import_name and import_name == name:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['import_node']]
return None |
def img2img_transformer_base_tpu():
"""Hparams for training img2img_transformer on tpu."""
hparams = img2img_transformer_base()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 8
hparams.num_encoder_layers = 4
hparams.shared_embedding_and_softmax_weights = False
return hparams | def function[img2img_transformer_base_tpu, parameter[]]:
constant[Hparams for training img2img_transformer on tpu.]
variable[hparams] assign[=] call[name[img2img_transformer_base], parameter[]]
call[name[update_hparams_for_tpu], parameter[name[hparams]]]
name[hparams].batch_size assign[=] constant[2]
name[hparams].num_heads assign[=] constant[4]
name[hparams].num_decoder_layers assign[=] constant[8]
name[hparams].num_encoder_layers assign[=] constant[4]
name[hparams].shared_embedding_and_softmax_weights assign[=] constant[False]
return[name[hparams]] | keyword[def] identifier[img2img_transformer_base_tpu] ():
literal[string]
identifier[hparams] = identifier[img2img_transformer_base] ()
identifier[update_hparams_for_tpu] ( identifier[hparams] )
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[num_heads] = literal[int]
identifier[hparams] . identifier[num_decoder_layers] = literal[int]
identifier[hparams] . identifier[num_encoder_layers] = literal[int]
identifier[hparams] . identifier[shared_embedding_and_softmax_weights] = keyword[False]
keyword[return] identifier[hparams] | def img2img_transformer_base_tpu():
"""Hparams for training img2img_transformer on tpu."""
hparams = img2img_transformer_base()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 8
hparams.num_encoder_layers = 4
hparams.shared_embedding_and_softmax_weights = False
return hparams |
def from_molecule(cls, mol, theory, charge=None, spin_multiplicity=None,
basis_set="6-31g", basis_set_option="cartesian",
title=None, operation="optimize", theory_directives=None,
alternate_directives=None):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
"""
title = title if title is not None else "{} {} {}".format(
re.sub(r"\s", "", mol.formula), theory, operation)
charge = charge if charge is not None else mol.charge
nelectrons = - charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
charge, spin_multiplicity))
elif charge == mol.charge:
spin_multiplicity = mol.spin_multiplicity
else:
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set(mol.composition.get_el_amt_dict().keys())
if isinstance(basis_set, str):
basis_set = {el: basis_set for el in elements}
basis_set_option = basis_set_option
return NwTask(charge, spin_multiplicity, basis_set,
basis_set_option=basis_set_option,
title=title, theory=theory, operation=operation,
theory_directives=theory_directives,
alternate_directives=alternate_directives) | def function[from_molecule, parameter[cls, mol, theory, charge, spin_multiplicity, basis_set, basis_set_option, title, operation, theory_directives, alternate_directives]]:
constant[
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
]
variable[title] assign[=] <ast.IfExp object at 0x7da20c6c62c0>
variable[charge] assign[=] <ast.IfExp object at 0x7da20c6c7c10>
variable[nelectrons] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20c6c44f0> + name[mol].charge] + name[mol].nelectrons]
if compare[name[spin_multiplicity] is_not constant[None]] begin[:]
variable[spin_multiplicity] assign[=] name[spin_multiplicity]
if compare[binary_operation[binary_operation[name[nelectrons] + name[spin_multiplicity]] <ast.Mod object at 0x7da2590d6920> constant[2]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20c6c7ee0>
variable[elements] assign[=] call[name[set], parameter[call[call[name[mol].composition.get_el_amt_dict, parameter[]].keys, parameter[]]]]
if call[name[isinstance], parameter[name[basis_set], name[str]]] begin[:]
variable[basis_set] assign[=] <ast.DictComp object at 0x7da20c6c7fd0>
variable[basis_set_option] assign[=] name[basis_set_option]
return[call[name[NwTask], parameter[name[charge], name[spin_multiplicity], name[basis_set]]]] | keyword[def] identifier[from_molecule] ( identifier[cls] , identifier[mol] , identifier[theory] , identifier[charge] = keyword[None] , identifier[spin_multiplicity] = keyword[None] ,
identifier[basis_set] = literal[string] , identifier[basis_set_option] = literal[string] ,
identifier[title] = keyword[None] , identifier[operation] = literal[string] , identifier[theory_directives] = keyword[None] ,
identifier[alternate_directives] = keyword[None] ):
literal[string]
identifier[title] = identifier[title] keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] . identifier[format] (
identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[mol] . identifier[formula] ), identifier[theory] , identifier[operation] )
identifier[charge] = identifier[charge] keyword[if] identifier[charge] keyword[is] keyword[not] keyword[None] keyword[else] identifier[mol] . identifier[charge]
identifier[nelectrons] =- identifier[charge] + identifier[mol] . identifier[charge] + identifier[mol] . identifier[nelectrons]
keyword[if] identifier[spin_multiplicity] keyword[is] keyword[not] keyword[None] :
identifier[spin_multiplicity] = identifier[spin_multiplicity]
keyword[if] ( identifier[nelectrons] + identifier[spin_multiplicity] )% literal[int] != literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] (
identifier[charge] , identifier[spin_multiplicity] ))
keyword[elif] identifier[charge] == identifier[mol] . identifier[charge] :
identifier[spin_multiplicity] = identifier[mol] . identifier[spin_multiplicity]
keyword[else] :
identifier[spin_multiplicity] = literal[int] keyword[if] identifier[nelectrons] % literal[int] == literal[int] keyword[else] literal[int]
identifier[elements] = identifier[set] ( identifier[mol] . identifier[composition] . identifier[get_el_amt_dict] (). identifier[keys] ())
keyword[if] identifier[isinstance] ( identifier[basis_set] , identifier[str] ):
identifier[basis_set] ={ identifier[el] : identifier[basis_set] keyword[for] identifier[el] keyword[in] identifier[elements] }
identifier[basis_set_option] = identifier[basis_set_option]
keyword[return] identifier[NwTask] ( identifier[charge] , identifier[spin_multiplicity] , identifier[basis_set] ,
identifier[basis_set_option] = identifier[basis_set_option] ,
identifier[title] = identifier[title] , identifier[theory] = identifier[theory] , identifier[operation] = identifier[operation] ,
identifier[theory_directives] = identifier[theory_directives] ,
identifier[alternate_directives] = identifier[alternate_directives] ) | def from_molecule(cls, mol, theory, charge=None, spin_multiplicity=None, basis_set='6-31g', basis_set_option='cartesian', title=None, operation='optimize', theory_directives=None, alternate_directives=None):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
"""
title = title if title is not None else '{} {} {}'.format(re.sub('\\s', '', mol.formula), theory, operation)
charge = charge if charge is not None else mol.charge
nelectrons = -charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError('Charge of {} and spin multiplicity of {} is not possible for this molecule'.format(charge, spin_multiplicity)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['spin_multiplicity']]
elif charge == mol.charge:
spin_multiplicity = mol.spin_multiplicity # depends on [control=['if'], data=[]]
else:
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set(mol.composition.get_el_amt_dict().keys())
if isinstance(basis_set, str):
basis_set = {el: basis_set for el in elements} # depends on [control=['if'], data=[]]
basis_set_option = basis_set_option
return NwTask(charge, spin_multiplicity, basis_set, basis_set_option=basis_set_option, title=title, theory=theory, operation=operation, theory_directives=theory_directives, alternate_directives=alternate_directives) |
def parse(self, xmltext):
"""
Parse a string containing LEMS XML text.
@param xmltext: String containing LEMS XML formatted text.
@type xmltext: str
"""
xml = LEMSXMLNode(xe.XML(xmltext))
if xml.ltag != 'lems' and xml.ltag != 'neuroml':
raise ParseError('<Lems> expected as root element (or even <neuroml>), found: {0}'.format(xml.ltag))
'''
if xml.ltag == 'lems':
if 'description' in xml.lattrib:
self.description = xml.lattrib['description']
'''
self.process_nested_tags(xml) | def function[parse, parameter[self, xmltext]]:
constant[
Parse a string containing LEMS XML text.
@param xmltext: String containing LEMS XML formatted text.
@type xmltext: str
]
variable[xml] assign[=] call[name[LEMSXMLNode], parameter[call[name[xe].XML, parameter[name[xmltext]]]]]
if <ast.BoolOp object at 0x7da1b245db10> begin[:]
<ast.Raise object at 0x7da1b245c9d0>
constant[
if xml.ltag == 'lems':
if 'description' in xml.lattrib:
self.description = xml.lattrib['description']
]
call[name[self].process_nested_tags, parameter[name[xml]]] | keyword[def] identifier[parse] ( identifier[self] , identifier[xmltext] ):
literal[string]
identifier[xml] = identifier[LEMSXMLNode] ( identifier[xe] . identifier[XML] ( identifier[xmltext] ))
keyword[if] identifier[xml] . identifier[ltag] != literal[string] keyword[and] identifier[xml] . identifier[ltag] != literal[string] :
keyword[raise] identifier[ParseError] ( literal[string] . identifier[format] ( identifier[xml] . identifier[ltag] ))
literal[string]
identifier[self] . identifier[process_nested_tags] ( identifier[xml] ) | def parse(self, xmltext):
"""
Parse a string containing LEMS XML text.
@param xmltext: String containing LEMS XML formatted text.
@type xmltext: str
"""
xml = LEMSXMLNode(xe.XML(xmltext))
if xml.ltag != 'lems' and xml.ltag != 'neuroml':
raise ParseError('<Lems> expected as root element (or even <neuroml>), found: {0}'.format(xml.ltag)) # depends on [control=['if'], data=[]]
"\n if xml.ltag == 'lems':\n if 'description' in xml.lattrib:\n self.description = xml.lattrib['description']\n "
self.process_nested_tags(xml) |
def to_bytes(self, transparent=True, thumbnail_size=None, resampling=None, in_range='dtype', out_range='dtype',
format="png"):
"""
Convert to selected format (discarding geo).
Optionally also resizes.
Note: for color images returns interlaced.
:param transparent: if True - sets alpha channel for nodata pixels
:param thumbnail_size: if not None - resize to thumbnail size, e.g. 512
:param in_range: input intensity range
:param out_range: output intensity range
:param format : str, image format, default "png"
:param resampling: one of Resampling enums
:return bytes
"""
resampling = resampling if resampling is not None else Resampling.cubic
if self.num_bands < 3:
warnings.warn("Deprecation: to_png of less then three bands raster will be not be supported in next \
release, please use: .colorize('gray').to_png()", GeoRaster2Warning)
if self.num_bands > 3:
warnings.warn("Limiting %d bands raster to first three bands to generate png" % self.num_bands,
GeoRaster2Warning)
three_first_bands = self.band_names[:3]
raster = self.limit_to_bands(three_first_bands)
elif self.num_bands == 2:
warnings.warn("Limiting two bands raster to use the first band to generate png",
GeoRaster2Warning)
first_band = self.band_names[:1]
raster = self.limit_to_bands(first_band)
else:
raster = self
if raster.image.dtype != np.uint8:
warnings.warn("downscaling dtype to 'uint8' to convert to png",
GeoRaster2Warning)
thumbnail = raster.astype(np.uint8, in_range=in_range, out_range=out_range)
else:
thumbnail = raster.copy_with()
if thumbnail_size:
if thumbnail.width > thumbnail.height:
thumbnail = thumbnail.resize(dest_width=thumbnail_size, resampling=resampling)
else:
thumbnail = thumbnail.resize(dest_height=thumbnail_size, resampling=resampling)
img, mask = thumbnail.to_pillow_image(return_mask=True)
if transparent:
mask = np.array(mask)[:, :, np.newaxis]
mask = 255 - 255 * mask # inverse
if thumbnail.num_bands == 1:
img = np.stack([img, img, img], axis=2) # make grayscale into rgb. bypass, as mode=LA isn't supported
img = np.stack(tuple(np.split(np.asarray(img), 3, axis=2) + [mask]), axis=2) # re-arrange into RGBA
img = img[:, :, :, 0]
f = io.BytesIO()
imageio.imwrite(f, img, format)
image_data = f.getvalue()
return image_data | def function[to_bytes, parameter[self, transparent, thumbnail_size, resampling, in_range, out_range, format]]:
constant[
Convert to selected format (discarding geo).
Optionally also resizes.
Note: for color images returns interlaced.
:param transparent: if True - sets alpha channel for nodata pixels
:param thumbnail_size: if not None - resize to thumbnail size, e.g. 512
:param in_range: input intensity range
:param out_range: output intensity range
:param format : str, image format, default "png"
:param resampling: one of Resampling enums
:return bytes
]
variable[resampling] assign[=] <ast.IfExp object at 0x7da18bc73820>
if compare[name[self].num_bands less[<] constant[3]] begin[:]
call[name[warnings].warn, parameter[constant[Deprecation: to_png of less then three bands raster will be not be supported in next release, please use: .colorize('gray').to_png()], name[GeoRaster2Warning]]]
if compare[name[self].num_bands greater[>] constant[3]] begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Limiting %d bands raster to first three bands to generate png] <ast.Mod object at 0x7da2590d6920> name[self].num_bands], name[GeoRaster2Warning]]]
variable[three_first_bands] assign[=] call[name[self].band_names][<ast.Slice object at 0x7da18bc708e0>]
variable[raster] assign[=] call[name[self].limit_to_bands, parameter[name[three_first_bands]]]
if compare[name[raster].image.dtype not_equal[!=] name[np].uint8] begin[:]
call[name[warnings].warn, parameter[constant[downscaling dtype to 'uint8' to convert to png], name[GeoRaster2Warning]]]
variable[thumbnail] assign[=] call[name[raster].astype, parameter[name[np].uint8]]
if name[thumbnail_size] begin[:]
if compare[name[thumbnail].width greater[>] name[thumbnail].height] begin[:]
variable[thumbnail] assign[=] call[name[thumbnail].resize, parameter[]]
<ast.Tuple object at 0x7da18bc72a70> assign[=] call[name[thumbnail].to_pillow_image, parameter[]]
if name[transparent] begin[:]
variable[mask] assign[=] call[call[name[np].array, parameter[name[mask]]]][tuple[[<ast.Slice object at 0x7da18bc736d0>, <ast.Slice object at 0x7da18bc70550>, <ast.Attribute object at 0x7da18bc72a40>]]]
variable[mask] assign[=] binary_operation[constant[255] - binary_operation[constant[255] * name[mask]]]
if compare[name[thumbnail].num_bands equal[==] constant[1]] begin[:]
variable[img] assign[=] call[name[np].stack, parameter[list[[<ast.Name object at 0x7da18bc70e80>, <ast.Name object at 0x7da18bc710f0>, <ast.Name object at 0x7da18bc72020>]]]]
variable[img] assign[=] call[name[np].stack, parameter[call[name[tuple], parameter[binary_operation[call[name[np].split, parameter[call[name[np].asarray, parameter[name[img]]], constant[3]]] + list[[<ast.Name object at 0x7da2046228c0>]]]]]]]
variable[img] assign[=] call[name[img]][tuple[[<ast.Slice object at 0x7da2046200d0>, <ast.Slice object at 0x7da2046215d0>, <ast.Slice object at 0x7da204622740>, <ast.Constant object at 0x7da204623490>]]]
variable[f] assign[=] call[name[io].BytesIO, parameter[]]
call[name[imageio].imwrite, parameter[name[f], name[img], name[format]]]
variable[image_data] assign[=] call[name[f].getvalue, parameter[]]
return[name[image_data]] | keyword[def] identifier[to_bytes] ( identifier[self] , identifier[transparent] = keyword[True] , identifier[thumbnail_size] = keyword[None] , identifier[resampling] = keyword[None] , identifier[in_range] = literal[string] , identifier[out_range] = literal[string] ,
identifier[format] = literal[string] ):
literal[string]
identifier[resampling] = identifier[resampling] keyword[if] identifier[resampling] keyword[is] keyword[not] keyword[None] keyword[else] identifier[Resampling] . identifier[cubic]
keyword[if] identifier[self] . identifier[num_bands] < literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] , identifier[GeoRaster2Warning] )
keyword[if] identifier[self] . identifier[num_bands] > literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] % identifier[self] . identifier[num_bands] ,
identifier[GeoRaster2Warning] )
identifier[three_first_bands] = identifier[self] . identifier[band_names] [: literal[int] ]
identifier[raster] = identifier[self] . identifier[limit_to_bands] ( identifier[three_first_bands] )
keyword[elif] identifier[self] . identifier[num_bands] == literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] ,
identifier[GeoRaster2Warning] )
identifier[first_band] = identifier[self] . identifier[band_names] [: literal[int] ]
identifier[raster] = identifier[self] . identifier[limit_to_bands] ( identifier[first_band] )
keyword[else] :
identifier[raster] = identifier[self]
keyword[if] identifier[raster] . identifier[image] . identifier[dtype] != identifier[np] . identifier[uint8] :
identifier[warnings] . identifier[warn] ( literal[string] ,
identifier[GeoRaster2Warning] )
identifier[thumbnail] = identifier[raster] . identifier[astype] ( identifier[np] . identifier[uint8] , identifier[in_range] = identifier[in_range] , identifier[out_range] = identifier[out_range] )
keyword[else] :
identifier[thumbnail] = identifier[raster] . identifier[copy_with] ()
keyword[if] identifier[thumbnail_size] :
keyword[if] identifier[thumbnail] . identifier[width] > identifier[thumbnail] . identifier[height] :
identifier[thumbnail] = identifier[thumbnail] . identifier[resize] ( identifier[dest_width] = identifier[thumbnail_size] , identifier[resampling] = identifier[resampling] )
keyword[else] :
identifier[thumbnail] = identifier[thumbnail] . identifier[resize] ( identifier[dest_height] = identifier[thumbnail_size] , identifier[resampling] = identifier[resampling] )
identifier[img] , identifier[mask] = identifier[thumbnail] . identifier[to_pillow_image] ( identifier[return_mask] = keyword[True] )
keyword[if] identifier[transparent] :
identifier[mask] = identifier[np] . identifier[array] ( identifier[mask] )[:,:, identifier[np] . identifier[newaxis] ]
identifier[mask] = literal[int] - literal[int] * identifier[mask]
keyword[if] identifier[thumbnail] . identifier[num_bands] == literal[int] :
identifier[img] = identifier[np] . identifier[stack] ([ identifier[img] , identifier[img] , identifier[img] ], identifier[axis] = literal[int] )
identifier[img] = identifier[np] . identifier[stack] ( identifier[tuple] ( identifier[np] . identifier[split] ( identifier[np] . identifier[asarray] ( identifier[img] ), literal[int] , identifier[axis] = literal[int] )+[ identifier[mask] ]), identifier[axis] = literal[int] )
identifier[img] = identifier[img] [:,:,:, literal[int] ]
identifier[f] = identifier[io] . identifier[BytesIO] ()
identifier[imageio] . identifier[imwrite] ( identifier[f] , identifier[img] , identifier[format] )
identifier[image_data] = identifier[f] . identifier[getvalue] ()
keyword[return] identifier[image_data] | def to_bytes(self, transparent=True, thumbnail_size=None, resampling=None, in_range='dtype', out_range='dtype', format='png'):
"""
Convert to selected format (discarding geo).
Optionally also resizes.
Note: for color images returns interlaced.
:param transparent: if True - sets alpha channel for nodata pixels
:param thumbnail_size: if not None - resize to thumbnail size, e.g. 512
:param in_range: input intensity range
:param out_range: output intensity range
:param format : str, image format, default "png"
:param resampling: one of Resampling enums
:return bytes
"""
resampling = resampling if resampling is not None else Resampling.cubic
if self.num_bands < 3:
warnings.warn("Deprecation: to_png of less then three bands raster will be not be supported in next release, please use: .colorize('gray').to_png()", GeoRaster2Warning) # depends on [control=['if'], data=[]]
if self.num_bands > 3:
warnings.warn('Limiting %d bands raster to first three bands to generate png' % self.num_bands, GeoRaster2Warning)
three_first_bands = self.band_names[:3]
raster = self.limit_to_bands(three_first_bands) # depends on [control=['if'], data=[]]
elif self.num_bands == 2:
warnings.warn('Limiting two bands raster to use the first band to generate png', GeoRaster2Warning)
first_band = self.band_names[:1]
raster = self.limit_to_bands(first_band) # depends on [control=['if'], data=[]]
else:
raster = self
if raster.image.dtype != np.uint8:
warnings.warn("downscaling dtype to 'uint8' to convert to png", GeoRaster2Warning)
thumbnail = raster.astype(np.uint8, in_range=in_range, out_range=out_range) # depends on [control=['if'], data=[]]
else:
thumbnail = raster.copy_with()
if thumbnail_size:
if thumbnail.width > thumbnail.height:
thumbnail = thumbnail.resize(dest_width=thumbnail_size, resampling=resampling) # depends on [control=['if'], data=[]]
else:
thumbnail = thumbnail.resize(dest_height=thumbnail_size, resampling=resampling) # depends on [control=['if'], data=[]]
(img, mask) = thumbnail.to_pillow_image(return_mask=True)
if transparent:
mask = np.array(mask)[:, :, np.newaxis]
mask = 255 - 255 * mask # inverse
if thumbnail.num_bands == 1:
img = np.stack([img, img, img], axis=2) # make grayscale into rgb. bypass, as mode=LA isn't supported # depends on [control=['if'], data=[]]
img = np.stack(tuple(np.split(np.asarray(img), 3, axis=2) + [mask]), axis=2) # re-arrange into RGBA
img = img[:, :, :, 0] # depends on [control=['if'], data=[]]
f = io.BytesIO()
imageio.imwrite(f, img, format)
image_data = f.getvalue()
return image_data |
def _check_cache_minions(self,
expr,
delimiter,
greedy,
search_type,
regex_match=False,
exact_match=False):
'''
Helper function to search for minions in master caches If 'greedy',
then return accepted minions matched by the condition or those absent
from the cache. If not 'greedy' return the only minions have cache
data and matched by the condition.
'''
cache_enabled = self.opts.get('minion_data_cache', False)
def list_cached_minions():
return self.cache.list('minions')
if greedy:
minions = []
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)):
minions.append(fn_)
elif cache_enabled:
minions = list_cached_minions()
else:
return {'minions': [],
'missing': []}
if cache_enabled:
if greedy:
cminions = list_cached_minions()
else:
cminions = minions
if not cminions:
return {'minions': minions,
'missing': []}
minions = set(minions)
for id_ in cminions:
if greedy and id_ not in minions:
continue
mdata = self.cache.fetch('minions/{0}'.format(id_), 'data')
if mdata is None:
if not greedy:
minions.remove(id_)
continue
search_results = mdata.get(search_type)
if not salt.utils.data.subdict_match(search_results,
expr,
delimiter=delimiter,
regex_match=regex_match,
exact_match=exact_match):
minions.remove(id_)
minions = list(minions)
return {'minions': minions,
'missing': []} | def function[_check_cache_minions, parameter[self, expr, delimiter, greedy, search_type, regex_match, exact_match]]:
constant[
Helper function to search for minions in master caches If 'greedy',
then return accepted minions matched by the condition or those absent
from the cache. If not 'greedy' return the only minions have cache
data and matched by the condition.
]
variable[cache_enabled] assign[=] call[name[self].opts.get, parameter[constant[minion_data_cache], constant[False]]]
def function[list_cached_minions, parameter[]]:
return[call[name[self].cache.list, parameter[constant[minions]]]]
if name[greedy] begin[:]
variable[minions] assign[=] list[[]]
for taget[name[fn_]] in starred[call[name[salt].utils.data.sorted_ignorecase, parameter[call[name[os].listdir, parameter[call[name[os].path.join, parameter[call[name[self].opts][constant[pki_dir]], name[self].acc]]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b21952d0> begin[:]
call[name[minions].append, parameter[name[fn_]]]
if name[cache_enabled] begin[:]
if name[greedy] begin[:]
variable[cminions] assign[=] call[name[list_cached_minions], parameter[]]
if <ast.UnaryOp object at 0x7da1b2196110> begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b21979d0>, <ast.Constant object at 0x7da1b2197970>], [<ast.Name object at 0x7da1b2197730>, <ast.List object at 0x7da1b21972b0>]]]
variable[minions] assign[=] call[name[set], parameter[name[minions]]]
for taget[name[id_]] in starred[name[cminions]] begin[:]
if <ast.BoolOp object at 0x7da1b2197940> begin[:]
continue
variable[mdata] assign[=] call[name[self].cache.fetch, parameter[call[constant[minions/{0}].format, parameter[name[id_]]], constant[data]]]
if compare[name[mdata] is constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b2195030> begin[:]
call[name[minions].remove, parameter[name[id_]]]
continue
variable[search_results] assign[=] call[name[mdata].get, parameter[name[search_type]]]
if <ast.UnaryOp object at 0x7da1b2035690> begin[:]
call[name[minions].remove, parameter[name[id_]]]
variable[minions] assign[=] call[name[list], parameter[name[minions]]]
return[dictionary[[<ast.Constant object at 0x7da1b20b88b0>, <ast.Constant object at 0x7da1b20bb280>], [<ast.Name object at 0x7da1b20b8ee0>, <ast.List object at 0x7da1b20b8520>]]] | keyword[def] identifier[_check_cache_minions] ( identifier[self] ,
identifier[expr] ,
identifier[delimiter] ,
identifier[greedy] ,
identifier[search_type] ,
identifier[regex_match] = keyword[False] ,
identifier[exact_match] = keyword[False] ):
literal[string]
identifier[cache_enabled] = identifier[self] . identifier[opts] . identifier[get] ( literal[string] , keyword[False] )
keyword[def] identifier[list_cached_minions] ():
keyword[return] identifier[self] . identifier[cache] . identifier[list] ( literal[string] )
keyword[if] identifier[greedy] :
identifier[minions] =[]
keyword[for] identifier[fn_] keyword[in] identifier[salt] . identifier[utils] . identifier[data] . identifier[sorted_ignorecase] ( identifier[os] . identifier[listdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[opts] [ literal[string] ], identifier[self] . identifier[acc] ))):
keyword[if] keyword[not] identifier[fn_] . identifier[startswith] ( literal[string] ) keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[opts] [ literal[string] ], identifier[self] . identifier[acc] , identifier[fn_] )):
identifier[minions] . identifier[append] ( identifier[fn_] )
keyword[elif] identifier[cache_enabled] :
identifier[minions] = identifier[list_cached_minions] ()
keyword[else] :
keyword[return] { literal[string] :[],
literal[string] :[]}
keyword[if] identifier[cache_enabled] :
keyword[if] identifier[greedy] :
identifier[cminions] = identifier[list_cached_minions] ()
keyword[else] :
identifier[cminions] = identifier[minions]
keyword[if] keyword[not] identifier[cminions] :
keyword[return] { literal[string] : identifier[minions] ,
literal[string] :[]}
identifier[minions] = identifier[set] ( identifier[minions] )
keyword[for] identifier[id_] keyword[in] identifier[cminions] :
keyword[if] identifier[greedy] keyword[and] identifier[id_] keyword[not] keyword[in] identifier[minions] :
keyword[continue]
identifier[mdata] = identifier[self] . identifier[cache] . identifier[fetch] ( literal[string] . identifier[format] ( identifier[id_] ), literal[string] )
keyword[if] identifier[mdata] keyword[is] keyword[None] :
keyword[if] keyword[not] identifier[greedy] :
identifier[minions] . identifier[remove] ( identifier[id_] )
keyword[continue]
identifier[search_results] = identifier[mdata] . identifier[get] ( identifier[search_type] )
keyword[if] keyword[not] identifier[salt] . identifier[utils] . identifier[data] . identifier[subdict_match] ( identifier[search_results] ,
identifier[expr] ,
identifier[delimiter] = identifier[delimiter] ,
identifier[regex_match] = identifier[regex_match] ,
identifier[exact_match] = identifier[exact_match] ):
identifier[minions] . identifier[remove] ( identifier[id_] )
identifier[minions] = identifier[list] ( identifier[minions] )
keyword[return] { literal[string] : identifier[minions] ,
literal[string] :[]} | def _check_cache_minions(self, expr, delimiter, greedy, search_type, regex_match=False, exact_match=False):
"""
Helper function to search for minions in master caches If 'greedy',
then return accepted minions matched by the condition or those absent
from the cache. If not 'greedy' return the only minions have cache
data and matched by the condition.
"""
cache_enabled = self.opts.get('minion_data_cache', False)
def list_cached_minions():
return self.cache.list('minions')
if greedy:
minions = []
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)):
minions.append(fn_) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fn_']] # depends on [control=['if'], data=[]]
elif cache_enabled:
minions = list_cached_minions() # depends on [control=['if'], data=[]]
else:
return {'minions': [], 'missing': []}
if cache_enabled:
if greedy:
cminions = list_cached_minions() # depends on [control=['if'], data=[]]
else:
cminions = minions
if not cminions:
return {'minions': minions, 'missing': []} # depends on [control=['if'], data=[]]
minions = set(minions)
for id_ in cminions:
if greedy and id_ not in minions:
continue # depends on [control=['if'], data=[]]
mdata = self.cache.fetch('minions/{0}'.format(id_), 'data')
if mdata is None:
if not greedy:
minions.remove(id_) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
search_results = mdata.get(search_type)
if not salt.utils.data.subdict_match(search_results, expr, delimiter=delimiter, regex_match=regex_match, exact_match=exact_match):
minions.remove(id_) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['id_']]
minions = list(minions) # depends on [control=['if'], data=[]]
return {'minions': minions, 'missing': []} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.