code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _get_project_vcf(x, suffix=""):
"""Get our project VCF, either from the population or the variant batch file.
"""
vcfs = _get_variant_file(x, ("population", "vcf"), suffix=suffix)
if not vcfs:
vcfs = _get_variant_file(x, ("vrn_file_batch", ), suffix=suffix, ignore_do_upload=True)
if not vcfs and x.get("variantcaller") == "ensemble":
vcfs = _get_variant_file(x, ("vrn_file", ), suffix=suffix)
return vcfs | def function[_get_project_vcf, parameter[x, suffix]]:
constant[Get our project VCF, either from the population or the variant batch file.
]
variable[vcfs] assign[=] call[name[_get_variant_file], parameter[name[x], tuple[[<ast.Constant object at 0x7da1b26add80>, <ast.Constant object at 0x7da1b26ad750>]]]]
if <ast.UnaryOp object at 0x7da1b26ad660> begin[:]
variable[vcfs] assign[=] call[name[_get_variant_file], parameter[name[x], tuple[[<ast.Constant object at 0x7da1b26ae8f0>]]]]
if <ast.BoolOp object at 0x7da1b26ad0f0> begin[:]
variable[vcfs] assign[=] call[name[_get_variant_file], parameter[name[x], tuple[[<ast.Constant object at 0x7da1b26ac6d0>]]]]
return[name[vcfs]] | keyword[def] identifier[_get_project_vcf] ( identifier[x] , identifier[suffix] = literal[string] ):
literal[string]
identifier[vcfs] = identifier[_get_variant_file] ( identifier[x] ,( literal[string] , literal[string] ), identifier[suffix] = identifier[suffix] )
keyword[if] keyword[not] identifier[vcfs] :
identifier[vcfs] = identifier[_get_variant_file] ( identifier[x] ,( literal[string] ,), identifier[suffix] = identifier[suffix] , identifier[ignore_do_upload] = keyword[True] )
keyword[if] keyword[not] identifier[vcfs] keyword[and] identifier[x] . identifier[get] ( literal[string] )== literal[string] :
identifier[vcfs] = identifier[_get_variant_file] ( identifier[x] ,( literal[string] ,), identifier[suffix] = identifier[suffix] )
keyword[return] identifier[vcfs] | def _get_project_vcf(x, suffix=''):
"""Get our project VCF, either from the population or the variant batch file.
"""
vcfs = _get_variant_file(x, ('population', 'vcf'), suffix=suffix)
if not vcfs:
vcfs = _get_variant_file(x, ('vrn_file_batch',), suffix=suffix, ignore_do_upload=True)
if not vcfs and x.get('variantcaller') == 'ensemble':
vcfs = _get_variant_file(x, ('vrn_file',), suffix=suffix) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return vcfs |
def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs) | def function[iter_parents, parameter[self, paths]]:
constant[Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self ]
variable[skip] assign[=] call[name[kwargs].get, parameter[constant[skip], constant[1]]]
if compare[name[skip] equal[==] constant[0]] begin[:]
variable[skip] assign[=] constant[1]
call[name[kwargs]][constant[skip]] assign[=] name[skip]
return[call[name[self].iter_items, parameter[name[self].repo, name[self], name[paths]]]] | keyword[def] identifier[iter_parents] ( identifier[self] , identifier[paths] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[skip] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
keyword[if] identifier[skip] == literal[int] :
identifier[skip] = literal[int]
identifier[kwargs] [ literal[string] ]= identifier[skip]
keyword[return] identifier[self] . identifier[iter_items] ( identifier[self] . identifier[repo] , identifier[self] , identifier[paths] ,** identifier[kwargs] ) | def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get('skip', 1)
if skip == 0: # skip ourselves
skip = 1 # depends on [control=['if'], data=['skip']]
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs) |
def get_order_line_item_by_id(cls, order_line_item_id, **kwargs):
"""Find OrderLineItem
Return single instance of OrderLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_order_line_item_by_id(order_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to return (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs)
else:
(data) = cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs)
return data | def function[get_order_line_item_by_id, parameter[cls, order_line_item_id]]:
constant[Find OrderLineItem
Return single instance of OrderLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_order_line_item_by_id(order_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to return (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._get_order_line_item_by_id_with_http_info, parameter[name[order_line_item_id]]]] | keyword[def] identifier[get_order_line_item_by_id] ( identifier[cls] , identifier[order_line_item_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_get_order_line_item_by_id_with_http_info] ( identifier[order_line_item_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_get_order_line_item_by_id_with_http_info] ( identifier[order_line_item_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def get_order_line_item_by_id(cls, order_line_item_id, **kwargs):
"""Find OrderLineItem
Return single instance of OrderLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_order_line_item_by_id(order_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to return (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs)
return data |
def dataReceived(self, data):
"""Data received, react to it and respond if needed.
"""
# print "receiver dataReceived: <%s>" % data
msg = stomper.unpack_frame(data)
returned = self.sm.react(msg)
# print "receiver returned <%s>" % returned
if returned:
self.transport.write(returned) | def function[dataReceived, parameter[self, data]]:
constant[Data received, react to it and respond if needed.
]
variable[msg] assign[=] call[name[stomper].unpack_frame, parameter[name[data]]]
variable[returned] assign[=] call[name[self].sm.react, parameter[name[msg]]]
if name[returned] begin[:]
call[name[self].transport.write, parameter[name[returned]]] | keyword[def] identifier[dataReceived] ( identifier[self] , identifier[data] ):
literal[string]
identifier[msg] = identifier[stomper] . identifier[unpack_frame] ( identifier[data] )
identifier[returned] = identifier[self] . identifier[sm] . identifier[react] ( identifier[msg] )
keyword[if] identifier[returned] :
identifier[self] . identifier[transport] . identifier[write] ( identifier[returned] ) | def dataReceived(self, data):
"""Data received, react to it and respond if needed.
"""
# print "receiver dataReceived: <%s>" % data
msg = stomper.unpack_frame(data)
returned = self.sm.react(msg)
# print "receiver returned <%s>" % returned
if returned:
self.transport.write(returned) # depends on [control=['if'], data=[]] |
def patch(self, container: Container, patch: Patch) -> bool:
"""
Attempts to apply a given patch to the source code for a program inside
a given container. All patch applications are guaranteed to be atomic;
if the patch fails to apply, no changes will be made to the relevant
source code files.
Returns:
true if patch application was successful, and false if the attempt
was unsuccessful.
"""
path = "containers/{}".format(container.uid)
payload = str(patch)
r = self.__api.patch(path, payload)
return r.status_code == 204 | def function[patch, parameter[self, container, patch]]:
constant[
Attempts to apply a given patch to the source code for a program inside
a given container. All patch applications are guaranteed to be atomic;
if the patch fails to apply, no changes will be made to the relevant
source code files.
Returns:
true if patch application was successful, and false if the attempt
was unsuccessful.
]
variable[path] assign[=] call[constant[containers/{}].format, parameter[name[container].uid]]
variable[payload] assign[=] call[name[str], parameter[name[patch]]]
variable[r] assign[=] call[name[self].__api.patch, parameter[name[path], name[payload]]]
return[compare[name[r].status_code equal[==] constant[204]]] | keyword[def] identifier[patch] ( identifier[self] , identifier[container] : identifier[Container] , identifier[patch] : identifier[Patch] )-> identifier[bool] :
literal[string]
identifier[path] = literal[string] . identifier[format] ( identifier[container] . identifier[uid] )
identifier[payload] = identifier[str] ( identifier[patch] )
identifier[r] = identifier[self] . identifier[__api] . identifier[patch] ( identifier[path] , identifier[payload] )
keyword[return] identifier[r] . identifier[status_code] == literal[int] | def patch(self, container: Container, patch: Patch) -> bool:
"""
Attempts to apply a given patch to the source code for a program inside
a given container. All patch applications are guaranteed to be atomic;
if the patch fails to apply, no changes will be made to the relevant
source code files.
Returns:
true if patch application was successful, and false if the attempt
was unsuccessful.
"""
path = 'containers/{}'.format(container.uid)
payload = str(patch)
r = self.__api.patch(path, payload)
return r.status_code == 204 |
def get_groups_with_perms(obj, attach_perms=False):
"""Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``."""
ctype = get_content_type(obj)
group_model = get_group_obj_perms_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is the case
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {'%s__content_object' % group_rel_name: obj}
return Group.objects.filter(**group_filters).distinct()
else:
group_perms_mapping = defaultdict(list)
groups_with_perms = get_groups_with_perms(obj)
queryset = group_model.objects.filter(group__in=groups_with_perms).prefetch_related('group', 'permission')
if group_model is GroupObjectPermission:
queryset = queryset.filter(object_pk=obj.pk, content_type=ctype)
else:
queryset = queryset.filter(content_object_id=obj.pk)
for group_perm in queryset:
group_perms_mapping[group_perm.group].append(group_perm.permission.codename)
return dict(group_perms_mapping) | def function[get_groups_with_perms, parameter[obj, attach_perms]]:
constant[Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``.]
variable[ctype] assign[=] call[name[get_content_type], parameter[name[obj]]]
variable[group_model] assign[=] call[name[get_group_obj_perms_model], parameter[name[obj]]]
if <ast.UnaryOp object at 0x7da1b1b69300> begin[:]
variable[group_rel_name] assign[=] call[name[group_model].group.field.related_query_name, parameter[]]
if call[name[group_model].objects.is_generic, parameter[]] begin[:]
variable[group_filters] assign[=] dictionary[[<ast.BinOp object at 0x7da1b1b695d0>, <ast.BinOp object at 0x7da1b1b6acb0>], [<ast.Name object at 0x7da1b1b6ad10>, <ast.Attribute object at 0x7da1b1b68e50>]]
return[call[call[name[Group].objects.filter, parameter[]].distinct, parameter[]]] | keyword[def] identifier[get_groups_with_perms] ( identifier[obj] , identifier[attach_perms] = keyword[False] ):
literal[string]
identifier[ctype] = identifier[get_content_type] ( identifier[obj] )
identifier[group_model] = identifier[get_group_obj_perms_model] ( identifier[obj] )
keyword[if] keyword[not] identifier[attach_perms] :
identifier[group_rel_name] = identifier[group_model] . identifier[group] . identifier[field] . identifier[related_query_name] ()
keyword[if] identifier[group_model] . identifier[objects] . identifier[is_generic] ():
identifier[group_filters] ={
literal[string] % identifier[group_rel_name] : identifier[ctype] ,
literal[string] % identifier[group_rel_name] : identifier[obj] . identifier[pk] ,
}
keyword[else] :
identifier[group_filters] ={ literal[string] % identifier[group_rel_name] : identifier[obj] }
keyword[return] identifier[Group] . identifier[objects] . identifier[filter] (** identifier[group_filters] ). identifier[distinct] ()
keyword[else] :
identifier[group_perms_mapping] = identifier[defaultdict] ( identifier[list] )
identifier[groups_with_perms] = identifier[get_groups_with_perms] ( identifier[obj] )
identifier[queryset] = identifier[group_model] . identifier[objects] . identifier[filter] ( identifier[group__in] = identifier[groups_with_perms] ). identifier[prefetch_related] ( literal[string] , literal[string] )
keyword[if] identifier[group_model] keyword[is] identifier[GroupObjectPermission] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[object_pk] = identifier[obj] . identifier[pk] , identifier[content_type] = identifier[ctype] )
keyword[else] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[content_object_id] = identifier[obj] . identifier[pk] )
keyword[for] identifier[group_perm] keyword[in] identifier[queryset] :
identifier[group_perms_mapping] [ identifier[group_perm] . identifier[group] ]. identifier[append] ( identifier[group_perm] . identifier[permission] . identifier[codename] )
keyword[return] identifier[dict] ( identifier[group_perms_mapping] ) | def get_groups_with_perms(obj, attach_perms=False):
"""Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``."""
ctype = get_content_type(obj)
group_model = get_group_obj_perms_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is the case
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {'%s__content_type' % group_rel_name: ctype, '%s__object_pk' % group_rel_name: obj.pk} # depends on [control=['if'], data=[]]
else:
group_filters = {'%s__content_object' % group_rel_name: obj}
return Group.objects.filter(**group_filters).distinct() # depends on [control=['if'], data=[]]
else:
group_perms_mapping = defaultdict(list)
groups_with_perms = get_groups_with_perms(obj)
queryset = group_model.objects.filter(group__in=groups_with_perms).prefetch_related('group', 'permission')
if group_model is GroupObjectPermission:
queryset = queryset.filter(object_pk=obj.pk, content_type=ctype) # depends on [control=['if'], data=[]]
else:
queryset = queryset.filter(content_object_id=obj.pk)
for group_perm in queryset:
group_perms_mapping[group_perm.group].append(group_perm.permission.codename) # depends on [control=['for'], data=['group_perm']]
return dict(group_perms_mapping) |
def set_published_date(self, date=None):
"""
Set the published date of a IOC to the current date.
User may specify the date they want to set as well.
:param date: Date value to set the published date to. This should be in the xsdDate form.
This defaults to the current date if it is not provided.
xsdDate Form: YYYY-MM-DDTHH:MM:SS
:return: True
:raises: IOCParseError if date format is not valid.
"""
if date:
match = re.match(DATE_REGEX, date)
if not match:
raise IOCParseError('Published date is not valid. Must be in the form YYYY-MM-DDTHH:MM:SS')
ioc_et.set_root_published_date(self.root, date)
return True | def function[set_published_date, parameter[self, date]]:
constant[
Set the published date of a IOC to the current date.
User may specify the date they want to set as well.
:param date: Date value to set the published date to. This should be in the xsdDate form.
This defaults to the current date if it is not provided.
xsdDate Form: YYYY-MM-DDTHH:MM:SS
:return: True
:raises: IOCParseError if date format is not valid.
]
if name[date] begin[:]
variable[match] assign[=] call[name[re].match, parameter[name[DATE_REGEX], name[date]]]
if <ast.UnaryOp object at 0x7da1b1021900> begin[:]
<ast.Raise object at 0x7da1b1023550>
call[name[ioc_et].set_root_published_date, parameter[name[self].root, name[date]]]
return[constant[True]] | keyword[def] identifier[set_published_date] ( identifier[self] , identifier[date] = keyword[None] ):
literal[string]
keyword[if] identifier[date] :
identifier[match] = identifier[re] . identifier[match] ( identifier[DATE_REGEX] , identifier[date] )
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[IOCParseError] ( literal[string] )
identifier[ioc_et] . identifier[set_root_published_date] ( identifier[self] . identifier[root] , identifier[date] )
keyword[return] keyword[True] | def set_published_date(self, date=None):
"""
Set the published date of a IOC to the current date.
User may specify the date they want to set as well.
:param date: Date value to set the published date to. This should be in the xsdDate form.
This defaults to the current date if it is not provided.
xsdDate Form: YYYY-MM-DDTHH:MM:SS
:return: True
:raises: IOCParseError if date format is not valid.
"""
if date:
match = re.match(DATE_REGEX, date)
if not match:
raise IOCParseError('Published date is not valid. Must be in the form YYYY-MM-DDTHH:MM:SS') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
ioc_et.set_root_published_date(self.root, date)
return True |
def generate_tile_coordinates_from_pixels(roi, scale, size):
"""Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
scale : float
Scale factor (think of it as pixel resolution)
size : tuple
Pixel size in (width, height) to be multiplied by the scale factor
Yields
------
~telluric.vectors.GeoVector
"""
if not all(isinstance(coord, int) for coord in size):
raise ValueError("Pixel size must be a tuple of integers")
width = size[0] * scale
height = size[1] * scale
minx, miny, maxx, maxy = roi.get_shape(roi.crs).bounds
num_w = np.ceil((maxx - minx) / width)
num_h = np.ceil((maxy - miny) / height)
new_roi = GeoVector.from_bounds(
xmin=minx, ymin=miny,
xmax=minx + num_w * width, ymax=miny + num_h * height,
crs=roi.crs
)
yield from generate_tile_coordinates(new_roi, (num_w, num_h)) | def function[generate_tile_coordinates_from_pixels, parameter[roi, scale, size]]:
constant[Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
scale : float
Scale factor (think of it as pixel resolution)
size : tuple
Pixel size in (width, height) to be multiplied by the scale factor
Yields
------
~telluric.vectors.GeoVector
]
if <ast.UnaryOp object at 0x7da2054a46a0> begin[:]
<ast.Raise object at 0x7da2054a52a0>
variable[width] assign[=] binary_operation[call[name[size]][constant[0]] * name[scale]]
variable[height] assign[=] binary_operation[call[name[size]][constant[1]] * name[scale]]
<ast.Tuple object at 0x7da2054a59f0> assign[=] call[name[roi].get_shape, parameter[name[roi].crs]].bounds
variable[num_w] assign[=] call[name[np].ceil, parameter[binary_operation[binary_operation[name[maxx] - name[minx]] / name[width]]]]
variable[num_h] assign[=] call[name[np].ceil, parameter[binary_operation[binary_operation[name[maxy] - name[miny]] / name[height]]]]
variable[new_roi] assign[=] call[name[GeoVector].from_bounds, parameter[]]
<ast.YieldFrom object at 0x7da2054a7790> | keyword[def] identifier[generate_tile_coordinates_from_pixels] ( identifier[roi] , identifier[scale] , identifier[size] ):
literal[string]
keyword[if] keyword[not] identifier[all] ( identifier[isinstance] ( identifier[coord] , identifier[int] ) keyword[for] identifier[coord] keyword[in] identifier[size] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[width] = identifier[size] [ literal[int] ]* identifier[scale]
identifier[height] = identifier[size] [ literal[int] ]* identifier[scale]
identifier[minx] , identifier[miny] , identifier[maxx] , identifier[maxy] = identifier[roi] . identifier[get_shape] ( identifier[roi] . identifier[crs] ). identifier[bounds]
identifier[num_w] = identifier[np] . identifier[ceil] (( identifier[maxx] - identifier[minx] )/ identifier[width] )
identifier[num_h] = identifier[np] . identifier[ceil] (( identifier[maxy] - identifier[miny] )/ identifier[height] )
identifier[new_roi] = identifier[GeoVector] . identifier[from_bounds] (
identifier[xmin] = identifier[minx] , identifier[ymin] = identifier[miny] ,
identifier[xmax] = identifier[minx] + identifier[num_w] * identifier[width] , identifier[ymax] = identifier[miny] + identifier[num_h] * identifier[height] ,
identifier[crs] = identifier[roi] . identifier[crs]
)
keyword[yield] keyword[from] identifier[generate_tile_coordinates] ( identifier[new_roi] ,( identifier[num_w] , identifier[num_h] )) | def generate_tile_coordinates_from_pixels(roi, scale, size):
"""Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
scale : float
Scale factor (think of it as pixel resolution)
size : tuple
Pixel size in (width, height) to be multiplied by the scale factor
Yields
------
~telluric.vectors.GeoVector
"""
if not all((isinstance(coord, int) for coord in size)):
raise ValueError('Pixel size must be a tuple of integers') # depends on [control=['if'], data=[]]
width = size[0] * scale
height = size[1] * scale
(minx, miny, maxx, maxy) = roi.get_shape(roi.crs).bounds
num_w = np.ceil((maxx - minx) / width)
num_h = np.ceil((maxy - miny) / height)
new_roi = GeoVector.from_bounds(xmin=minx, ymin=miny, xmax=minx + num_w * width, ymax=miny + num_h * height, crs=roi.crs)
yield from generate_tile_coordinates(new_roi, (num_w, num_h)) |
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating) | def function[is_float_array, parameter[val]]:
constant[
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
]
return[<ast.BoolOp object at 0x7da1b0237f40>] | keyword[def] identifier[is_float_array] ( identifier[val] ):
literal[string]
keyword[return] identifier[is_np_array] ( identifier[val] ) keyword[and] identifier[issubclass] ( identifier[val] . identifier[dtype] . identifier[type] , identifier[np] . identifier[floating] ) | def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating) |
def mavlink_packet(self, m):
'''handle mavlink packets'''
if m.get_type() == 'SYSTEM_TIME':
if self.system_time_settings.verbose:
print("ST: Received from (%u/%u): %s" %
(m.get_srcSystem(), m.get_srcComponent(), m))
if m.get_type() == 'TIMESYNC':
if m.tc1 == 0:
# this is a request for a timesync response
time_ns = time.time() * 1000000000
time_ns += 1234
if True or self.system_time_settings.verbose:
if self.system_time_settings.verbose:
print("ST: received timesync; sending response: %u" %
(time_ns))
self.master.mav.timesync_send(time_ns,
m.ts1)
else:
if m.ts1 == self.last_sent_ts1:
# we sent this one!
now_ns = time.time() * 1000000000
now_ns += 1234
if self.system_time_settings.verbose:
print("ST: timesync response: sysid=%u latency=%fms" %
(m.get_srcSystem(),
(now_ns-self.last_sent_ts1)/1000000.0)) | def function[mavlink_packet, parameter[self, m]]:
constant[handle mavlink packets]
if compare[call[name[m].get_type, parameter[]] equal[==] constant[SYSTEM_TIME]] begin[:]
if name[self].system_time_settings.verbose begin[:]
call[name[print], parameter[binary_operation[constant[ST: Received from (%u/%u): %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b1720430>, <ast.Call object at 0x7da1b17220e0>, <ast.Name object at 0x7da1b1720640>]]]]]
if compare[call[name[m].get_type, parameter[]] equal[==] constant[TIMESYNC]] begin[:]
if compare[name[m].tc1 equal[==] constant[0]] begin[:]
variable[time_ns] assign[=] binary_operation[call[name[time].time, parameter[]] * constant[1000000000]]
<ast.AugAssign object at 0x7da1b17210c0>
if <ast.BoolOp object at 0x7da1b1722fe0> begin[:]
if name[self].system_time_settings.verbose begin[:]
call[name[print], parameter[binary_operation[constant[ST: received timesync; sending response: %u] <ast.Mod object at 0x7da2590d6920> name[time_ns]]]]
call[name[self].master.mav.timesync_send, parameter[name[time_ns], name[m].ts1]] | keyword[def] identifier[mavlink_packet] ( identifier[self] , identifier[m] ):
literal[string]
keyword[if] identifier[m] . identifier[get_type] ()== literal[string] :
keyword[if] identifier[self] . identifier[system_time_settings] . identifier[verbose] :
identifier[print] ( literal[string] %
( identifier[m] . identifier[get_srcSystem] (), identifier[m] . identifier[get_srcComponent] (), identifier[m] ))
keyword[if] identifier[m] . identifier[get_type] ()== literal[string] :
keyword[if] identifier[m] . identifier[tc1] == literal[int] :
identifier[time_ns] = identifier[time] . identifier[time] ()* literal[int]
identifier[time_ns] += literal[int]
keyword[if] keyword[True] keyword[or] identifier[self] . identifier[system_time_settings] . identifier[verbose] :
keyword[if] identifier[self] . identifier[system_time_settings] . identifier[verbose] :
identifier[print] ( literal[string] %
( identifier[time_ns] ))
identifier[self] . identifier[master] . identifier[mav] . identifier[timesync_send] ( identifier[time_ns] ,
identifier[m] . identifier[ts1] )
keyword[else] :
keyword[if] identifier[m] . identifier[ts1] == identifier[self] . identifier[last_sent_ts1] :
identifier[now_ns] = identifier[time] . identifier[time] ()* literal[int]
identifier[now_ns] += literal[int]
keyword[if] identifier[self] . identifier[system_time_settings] . identifier[verbose] :
identifier[print] ( literal[string] %
( identifier[m] . identifier[get_srcSystem] (),
( identifier[now_ns] - identifier[self] . identifier[last_sent_ts1] )/ literal[int] )) | def mavlink_packet(self, m):
"""handle mavlink packets"""
if m.get_type() == 'SYSTEM_TIME':
if self.system_time_settings.verbose:
print('ST: Received from (%u/%u): %s' % (m.get_srcSystem(), m.get_srcComponent(), m)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if m.get_type() == 'TIMESYNC':
if m.tc1 == 0:
# this is a request for a timesync response
time_ns = time.time() * 1000000000
time_ns += 1234
if True or self.system_time_settings.verbose:
if self.system_time_settings.verbose:
print('ST: received timesync; sending response: %u' % time_ns) # depends on [control=['if'], data=[]]
self.master.mav.timesync_send(time_ns, m.ts1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif m.ts1 == self.last_sent_ts1:
# we sent this one!
now_ns = time.time() * 1000000000
now_ns += 1234
if self.system_time_settings.verbose:
print('ST: timesync response: sysid=%u latency=%fms' % (m.get_srcSystem(), (now_ns - self.last_sent_ts1) / 1000000.0)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def has_edge(self, p_from, p_to):
""" Returns True when the graph has the given edge. """
return p_from in self._edges and p_to in self._edges[p_from] | def function[has_edge, parameter[self, p_from, p_to]]:
constant[ Returns True when the graph has the given edge. ]
return[<ast.BoolOp object at 0x7da20c7ca380>] | keyword[def] identifier[has_edge] ( identifier[self] , identifier[p_from] , identifier[p_to] ):
literal[string]
keyword[return] identifier[p_from] keyword[in] identifier[self] . identifier[_edges] keyword[and] identifier[p_to] keyword[in] identifier[self] . identifier[_edges] [ identifier[p_from] ] | def has_edge(self, p_from, p_to):
""" Returns True when the graph has the given edge. """
return p_from in self._edges and p_to in self._edges[p_from] |
def init_providers(self, provider, kwargs):
"""
Inits main and fallback provider if relevant
:param provider: Provider name to use
:param kwargs: Additional kwargs
:raises ValueError: If provider name or fallback names are not valid providers, a :exc:`ValueError` will
be raised
"""
self.provider = notifiers.get_notifier(provider, strict=True)
if kwargs.get("fallback"):
self.fallback = notifiers.get_notifier(kwargs.pop("fallback"), strict=True)
self.fallback_defaults = kwargs.pop("fallback_defaults", {}) | def function[init_providers, parameter[self, provider, kwargs]]:
constant[
Inits main and fallback provider if relevant
:param provider: Provider name to use
:param kwargs: Additional kwargs
:raises ValueError: If provider name or fallback names are not valid providers, a :exc:`ValueError` will
be raised
]
name[self].provider assign[=] call[name[notifiers].get_notifier, parameter[name[provider]]]
if call[name[kwargs].get, parameter[constant[fallback]]] begin[:]
name[self].fallback assign[=] call[name[notifiers].get_notifier, parameter[call[name[kwargs].pop, parameter[constant[fallback]]]]]
name[self].fallback_defaults assign[=] call[name[kwargs].pop, parameter[constant[fallback_defaults], dictionary[[], []]]] | keyword[def] identifier[init_providers] ( identifier[self] , identifier[provider] , identifier[kwargs] ):
literal[string]
identifier[self] . identifier[provider] = identifier[notifiers] . identifier[get_notifier] ( identifier[provider] , identifier[strict] = keyword[True] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[self] . identifier[fallback] = identifier[notifiers] . identifier[get_notifier] ( identifier[kwargs] . identifier[pop] ( literal[string] ), identifier[strict] = keyword[True] )
identifier[self] . identifier[fallback_defaults] = identifier[kwargs] . identifier[pop] ( literal[string] ,{}) | def init_providers(self, provider, kwargs):
"""
Inits main and fallback provider if relevant
:param provider: Provider name to use
:param kwargs: Additional kwargs
:raises ValueError: If provider name or fallback names are not valid providers, a :exc:`ValueError` will
be raised
"""
self.provider = notifiers.get_notifier(provider, strict=True)
if kwargs.get('fallback'):
self.fallback = notifiers.get_notifier(kwargs.pop('fallback'), strict=True)
self.fallback_defaults = kwargs.pop('fallback_defaults', {}) # depends on [control=['if'], data=[]] |
def end(self):
"""End HTML output."""
if self.lastUrl is not None:
self.html.write(u'</li>\n')
if self.lastComic is not None:
self.html.write(u'</ul>\n')
self.html.write(u'</ul>\n')
self.addNavLinks()
self.html.close() | def function[end, parameter[self]]:
constant[End HTML output.]
if compare[name[self].lastUrl is_not constant[None]] begin[:]
call[name[self].html.write, parameter[constant[</li>
]]]
if compare[name[self].lastComic is_not constant[None]] begin[:]
call[name[self].html.write, parameter[constant[</ul>
]]]
call[name[self].html.write, parameter[constant[</ul>
]]]
call[name[self].addNavLinks, parameter[]]
call[name[self].html.close, parameter[]] | keyword[def] identifier[end] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[lastUrl] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[html] . identifier[write] ( literal[string] )
keyword[if] identifier[self] . identifier[lastComic] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[html] . identifier[write] ( literal[string] )
identifier[self] . identifier[html] . identifier[write] ( literal[string] )
identifier[self] . identifier[addNavLinks] ()
identifier[self] . identifier[html] . identifier[close] () | def end(self):
"""End HTML output."""
if self.lastUrl is not None:
self.html.write(u'</li>\n') # depends on [control=['if'], data=[]]
if self.lastComic is not None:
self.html.write(u'</ul>\n') # depends on [control=['if'], data=[]]
self.html.write(u'</ul>\n')
self.addNavLinks()
self.html.close() |
def op_token(self, display_name, opt):
"""Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho."""
args = {
'lease': opt.lease,
'display_name': display_name,
'meta': token_meta(opt)
}
try:
token = self.create_token(**args)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied creating operational token"
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
LOG.debug("Created operational token with lease of %s", opt.lease)
return token['auth']['client_token'] | def function[op_token, parameter[self, display_name, opt]]:
constant[Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho.]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b68b80>, <ast.Constant object at 0x7da1b1b69090>, <ast.Constant object at 0x7da1b1b69b40>], [<ast.Attribute object at 0x7da1b1b68820>, <ast.Name object at 0x7da1b1b684c0>, <ast.Call object at 0x7da1b1b68a90>]]
<ast.Try object at 0x7da1b1b6bf40>
call[name[LOG].debug, parameter[constant[Created operational token with lease of %s], name[opt].lease]]
return[call[call[name[token]][constant[auth]]][constant[client_token]]] | keyword[def] identifier[op_token] ( identifier[self] , identifier[display_name] , identifier[opt] ):
literal[string]
identifier[args] ={
literal[string] : identifier[opt] . identifier[lease] ,
literal[string] : identifier[display_name] ,
literal[string] : identifier[token_meta] ( identifier[opt] )
}
keyword[try] :
identifier[token] = identifier[self] . identifier[create_token] (** identifier[args] )
keyword[except] ( identifier[hvac] . identifier[exceptions] . identifier[InvalidRequest] ,
identifier[hvac] . identifier[exceptions] . identifier[Forbidden] ) keyword[as] identifier[vault_exception] :
keyword[if] identifier[vault_exception] . identifier[errors] [ literal[int] ]== literal[string] :
identifier[emsg] = literal[string]
keyword[raise] identifier[aomi] . identifier[exceptions] . identifier[AomiCredentials] ( identifier[emsg] )
keyword[else] :
keyword[raise]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[opt] . identifier[lease] )
keyword[return] identifier[token] [ literal[string] ][ literal[string] ] | def op_token(self, display_name, opt):
"""Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho."""
args = {'lease': opt.lease, 'display_name': display_name, 'meta': token_meta(opt)}
try:
token = self.create_token(**args) # depends on [control=['try'], data=[]]
except (hvac.exceptions.InvalidRequest, hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = 'Permission denied creating operational token'
raise aomi.exceptions.AomiCredentials(emsg) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['vault_exception']]
LOG.debug('Created operational token with lease of %s', opt.lease)
return token['auth']['client_token'] |
def getFeatureById(self, featureId):
"""
Fetch feature by featureID.
:param featureId: the FeatureID as found in GFF3 records
:return: dictionary representing a feature object,
or None if no match is found.
"""
sql = "SELECT * FROM FEATURE WHERE id = ?"
query = self._dbconn.execute(sql, (featureId,))
ret = query.fetchone()
if ret is None:
return None
return sqlite_backend.sqliteRowToDict(ret) | def function[getFeatureById, parameter[self, featureId]]:
constant[
Fetch feature by featureID.
:param featureId: the FeatureID as found in GFF3 records
:return: dictionary representing a feature object,
or None if no match is found.
]
variable[sql] assign[=] constant[SELECT * FROM FEATURE WHERE id = ?]
variable[query] assign[=] call[name[self]._dbconn.execute, parameter[name[sql], tuple[[<ast.Name object at 0x7da18bccb310>]]]]
variable[ret] assign[=] call[name[query].fetchone, parameter[]]
if compare[name[ret] is constant[None]] begin[:]
return[constant[None]]
return[call[name[sqlite_backend].sqliteRowToDict, parameter[name[ret]]]] | keyword[def] identifier[getFeatureById] ( identifier[self] , identifier[featureId] ):
literal[string]
identifier[sql] = literal[string]
identifier[query] = identifier[self] . identifier[_dbconn] . identifier[execute] ( identifier[sql] ,( identifier[featureId] ,))
identifier[ret] = identifier[query] . identifier[fetchone] ()
keyword[if] identifier[ret] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[sqlite_backend] . identifier[sqliteRowToDict] ( identifier[ret] ) | def getFeatureById(self, featureId):
"""
Fetch feature by featureID.
:param featureId: the FeatureID as found in GFF3 records
:return: dictionary representing a feature object,
or None if no match is found.
"""
sql = 'SELECT * FROM FEATURE WHERE id = ?'
query = self._dbconn.execute(sql, (featureId,))
ret = query.fetchone()
if ret is None:
return None # depends on [control=['if'], data=[]]
return sqlite_backend.sqliteRowToDict(ret) |
def visitEbnfSuffix(self, ctx: jsgParser.EbnfSuffixContext):
""" ebnfSuffix: QMARK | STAR | PLUS | OBRACE INT (COMMA (INT|STAR)?)? CBRACE """
self._ebnftext = ctx.getText()
if ctx.INT():
self.min = int(ctx.INT(0).getText())
if ctx.COMMA():
if len(ctx.INT()) > 1:
self.max = int(ctx.INT(1).getText())
else:
self.max = None
else:
self.max = self.min
elif ctx.QMARK():
self.min = 0
self.max = 1
elif ctx.STAR():
self.min = 0
self.max = None
elif ctx.PLUS():
self.min = 1
self.max = None
else:
raise NotImplementedError("Unknown ebnf construct: {}".format(self._ebnftext)) | def function[visitEbnfSuffix, parameter[self, ctx]]:
constant[ ebnfSuffix: QMARK | STAR | PLUS | OBRACE INT (COMMA (INT|STAR)?)? CBRACE ]
name[self]._ebnftext assign[=] call[name[ctx].getText, parameter[]]
if call[name[ctx].INT, parameter[]] begin[:]
name[self].min assign[=] call[name[int], parameter[call[call[name[ctx].INT, parameter[constant[0]]].getText, parameter[]]]]
if call[name[ctx].COMMA, parameter[]] begin[:]
if compare[call[name[len], parameter[call[name[ctx].INT, parameter[]]]] greater[>] constant[1]] begin[:]
name[self].max assign[=] call[name[int], parameter[call[call[name[ctx].INT, parameter[constant[1]]].getText, parameter[]]]] | keyword[def] identifier[visitEbnfSuffix] ( identifier[self] , identifier[ctx] : identifier[jsgParser] . identifier[EbnfSuffixContext] ):
literal[string]
identifier[self] . identifier[_ebnftext] = identifier[ctx] . identifier[getText] ()
keyword[if] identifier[ctx] . identifier[INT] ():
identifier[self] . identifier[min] = identifier[int] ( identifier[ctx] . identifier[INT] ( literal[int] ). identifier[getText] ())
keyword[if] identifier[ctx] . identifier[COMMA] ():
keyword[if] identifier[len] ( identifier[ctx] . identifier[INT] ())> literal[int] :
identifier[self] . identifier[max] = identifier[int] ( identifier[ctx] . identifier[INT] ( literal[int] ). identifier[getText] ())
keyword[else] :
identifier[self] . identifier[max] = keyword[None]
keyword[else] :
identifier[self] . identifier[max] = identifier[self] . identifier[min]
keyword[elif] identifier[ctx] . identifier[QMARK] ():
identifier[self] . identifier[min] = literal[int]
identifier[self] . identifier[max] = literal[int]
keyword[elif] identifier[ctx] . identifier[STAR] ():
identifier[self] . identifier[min] = literal[int]
identifier[self] . identifier[max] = keyword[None]
keyword[elif] identifier[ctx] . identifier[PLUS] ():
identifier[self] . identifier[min] = literal[int]
identifier[self] . identifier[max] = keyword[None]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[self] . identifier[_ebnftext] )) | def visitEbnfSuffix(self, ctx: jsgParser.EbnfSuffixContext):
""" ebnfSuffix: QMARK | STAR | PLUS | OBRACE INT (COMMA (INT|STAR)?)? CBRACE """
self._ebnftext = ctx.getText()
if ctx.INT():
self.min = int(ctx.INT(0).getText())
if ctx.COMMA():
if len(ctx.INT()) > 1:
self.max = int(ctx.INT(1).getText()) # depends on [control=['if'], data=[]]
else:
self.max = None # depends on [control=['if'], data=[]]
else:
self.max = self.min # depends on [control=['if'], data=[]]
elif ctx.QMARK():
self.min = 0
self.max = 1 # depends on [control=['if'], data=[]]
elif ctx.STAR():
self.min = 0
self.max = None # depends on [control=['if'], data=[]]
elif ctx.PLUS():
self.min = 1
self.max = None # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Unknown ebnf construct: {}'.format(self._ebnftext)) |
def login(self, params):
"""Login either with resume token or password."""
if 'password' in params:
return self.login_with_password(params)
elif 'resume' in params:
return self.login_with_resume_token(params)
else:
self.auth_failed(**params) | def function[login, parameter[self, params]]:
constant[Login either with resume token or password.]
if compare[constant[password] in name[params]] begin[:]
return[call[name[self].login_with_password, parameter[name[params]]]] | keyword[def] identifier[login] ( identifier[self] , identifier[params] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[params] :
keyword[return] identifier[self] . identifier[login_with_password] ( identifier[params] )
keyword[elif] literal[string] keyword[in] identifier[params] :
keyword[return] identifier[self] . identifier[login_with_resume_token] ( identifier[params] )
keyword[else] :
identifier[self] . identifier[auth_failed] (** identifier[params] ) | def login(self, params):
"""Login either with resume token or password."""
if 'password' in params:
return self.login_with_password(params) # depends on [control=['if'], data=['params']]
elif 'resume' in params:
return self.login_with_resume_token(params) # depends on [control=['if'], data=['params']]
else:
self.auth_failed(**params) |
def parallel_quad (func, a, b, par_args=(), simple_args=(), parallel=True, **kwargs):
"""A parallelized version of :func:`scipy.integrate.quad`.
Arguments are:
func
The function to integrate, called as ``f(x, [*par_args...], [*simple_args...])``.
a
The lower limit(s) of integration.
b
The upper limits(s) of integration.
par_args
Tuple of additional parallelized arguments.
simple_args
Tuple of additional arguments passed identically to every invocation.
parallel
Controls parallelization; default uses all available cores. See
:func:`pwkit.parallel.make_parallel_helper`.
kwargs
Passed to :func:`scipy.integrate.quad`. Don't set *full_output* to True.
Returns: integrals and errors; see below.
Computes many integrals in parallel. The values *a*, *b*, and the items of
*par_args* should all be numeric, and may be N-dimensional Numpy arrays.
They are all broadcast to a common shape, and one integral is performed
for each element in the resulting array. If this common shape is (X,Y,Z),
the return value has shape (2,X,Y,Z), where the subarray [0,...] contains
the computed integrals and the subarray [1,...] contains the absolute
error estimates. If *a*, *b*, and the items in *par_args* are all scalars,
the return value has shape (2,).
The *simple_args* are passed to each integrand function identically for each
integration. They do not need to be Pickle-able.
Example::
>>> parallel_quad (lambda x, u, v, q: u * x + v,
0, # a
[3, 4], # b
(np.arange (6).reshape ((3,2)), np.arange (3).reshape ((3,1))), # par_args
('hello',),)
Computes six integrals and returns an array of shape ``(2,3,2)``. The
functions that are evaluated are::
[[ 0*x + 0, 1*x + 0 ],
[ 2*x + 1, 3*x + 1 ],
[ 4*x + 2, 5*x + 2 ]]
and the bounds of the integrals are::
[[ (0, 3), (0, 4) ],
[ (0, 3), (0, 4) ],
[ (0, 3), (0, 4) ]]
In all cases the unused fourth parameter *q* is ``'hello'``.
"""
from scipy.integrate import quad
from .parallel import make_parallel_helper
phelp = make_parallel_helper (parallel)
if not isinstance (par_args, tuple):
raise ValueError ('par_args must be a tuple')
if not isinstance (simple_args, tuple):
raise ValueError ('simple_args must be a tuple')
bc_raw = np.broadcast_arrays (a, b, *par_args)
bc_1d = tuple (np.atleast_1d (a) for a in bc_raw)
def gen_var_args ():
for i in range (bc_1d[0].size):
yield tuple (x.flat[i] for x in bc_1d)
def helper (i, _, var_args):
a, b = var_args[:2]
return quad (func, a, b, var_args[2:] + simple_args, **kwargs)
with phelp.get_ppmap () as ppmap:
result_list = ppmap (helper, None, gen_var_args ())
if bc_raw[0].ndim == 0:
return np.asarray (result_list[0])
result_arr = np.empty ((2,) + bc_raw[0].shape)
for i in range (bc_1d[0].size):
result_arr[0].flat[i], result_arr[1].flat[i] = result_list[i]
return result_arr | def function[parallel_quad, parameter[func, a, b, par_args, simple_args, parallel]]:
constant[A parallelized version of :func:`scipy.integrate.quad`.
Arguments are:
func
The function to integrate, called as ``f(x, [*par_args...], [*simple_args...])``.
a
The lower limit(s) of integration.
b
The upper limits(s) of integration.
par_args
Tuple of additional parallelized arguments.
simple_args
Tuple of additional arguments passed identically to every invocation.
parallel
Controls parallelization; default uses all available cores. See
:func:`pwkit.parallel.make_parallel_helper`.
kwargs
Passed to :func:`scipy.integrate.quad`. Don't set *full_output* to True.
Returns: integrals and errors; see below.
Computes many integrals in parallel. The values *a*, *b*, and the items of
*par_args* should all be numeric, and may be N-dimensional Numpy arrays.
They are all broadcast to a common shape, and one integral is performed
for each element in the resulting array. If this common shape is (X,Y,Z),
the return value has shape (2,X,Y,Z), where the subarray [0,...] contains
the computed integrals and the subarray [1,...] contains the absolute
error estimates. If *a*, *b*, and the items in *par_args* are all scalars,
the return value has shape (2,).
The *simple_args* are passed to each integrand function identically for each
integration. They do not need to be Pickle-able.
Example::
>>> parallel_quad (lambda x, u, v, q: u * x + v,
0, # a
[3, 4], # b
(np.arange (6).reshape ((3,2)), np.arange (3).reshape ((3,1))), # par_args
('hello',),)
Computes six integrals and returns an array of shape ``(2,3,2)``. The
functions that are evaluated are::
[[ 0*x + 0, 1*x + 0 ],
[ 2*x + 1, 3*x + 1 ],
[ 4*x + 2, 5*x + 2 ]]
and the bounds of the integrals are::
[[ (0, 3), (0, 4) ],
[ (0, 3), (0, 4) ],
[ (0, 3), (0, 4) ]]
In all cases the unused fourth parameter *q* is ``'hello'``.
]
from relative_module[scipy.integrate] import module[quad]
from relative_module[parallel] import module[make_parallel_helper]
variable[phelp] assign[=] call[name[make_parallel_helper], parameter[name[parallel]]]
if <ast.UnaryOp object at 0x7da1b2778a90> begin[:]
<ast.Raise object at 0x7da1b2778cd0>
if <ast.UnaryOp object at 0x7da1b2778a30> begin[:]
<ast.Raise object at 0x7da1b27784c0>
variable[bc_raw] assign[=] call[name[np].broadcast_arrays, parameter[name[a], name[b], <ast.Starred object at 0x7da1b27780d0>]]
variable[bc_1d] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b2778370>]]
def function[gen_var_args, parameter[]]:
for taget[name[i]] in starred[call[name[range], parameter[call[name[bc_1d]][constant[0]].size]]] begin[:]
<ast.Yield object at 0x7da1b2778790>
def function[helper, parameter[i, _, var_args]]:
<ast.Tuple object at 0x7da2054a6770> assign[=] call[name[var_args]][<ast.Slice object at 0x7da2054a7370>]
return[call[name[quad], parameter[name[func], name[a], name[b], binary_operation[call[name[var_args]][<ast.Slice object at 0x7da2054a79d0>] + name[simple_args]]]]]
with call[name[phelp].get_ppmap, parameter[]] begin[:]
variable[result_list] assign[=] call[name[ppmap], parameter[name[helper], constant[None], call[name[gen_var_args], parameter[]]]]
if compare[call[name[bc_raw]][constant[0]].ndim equal[==] constant[0]] begin[:]
return[call[name[np].asarray, parameter[call[name[result_list]][constant[0]]]]]
variable[result_arr] assign[=] call[name[np].empty, parameter[binary_operation[tuple[[<ast.Constant object at 0x7da2054a6920>]] + call[name[bc_raw]][constant[0]].shape]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[bc_1d]][constant[0]].size]]] begin[:]
<ast.Tuple object at 0x7da204566920> assign[=] call[name[result_list]][name[i]]
return[name[result_arr]] | keyword[def] identifier[parallel_quad] ( identifier[func] , identifier[a] , identifier[b] , identifier[par_args] =(), identifier[simple_args] =(), identifier[parallel] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[scipy] . identifier[integrate] keyword[import] identifier[quad]
keyword[from] . identifier[parallel] keyword[import] identifier[make_parallel_helper]
identifier[phelp] = identifier[make_parallel_helper] ( identifier[parallel] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[par_args] , identifier[tuple] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[simple_args] , identifier[tuple] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[bc_raw] = identifier[np] . identifier[broadcast_arrays] ( identifier[a] , identifier[b] ,* identifier[par_args] )
identifier[bc_1d] = identifier[tuple] ( identifier[np] . identifier[atleast_1d] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[bc_raw] )
keyword[def] identifier[gen_var_args] ():
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[bc_1d] [ literal[int] ]. identifier[size] ):
keyword[yield] identifier[tuple] ( identifier[x] . identifier[flat] [ identifier[i] ] keyword[for] identifier[x] keyword[in] identifier[bc_1d] )
keyword[def] identifier[helper] ( identifier[i] , identifier[_] , identifier[var_args] ):
identifier[a] , identifier[b] = identifier[var_args] [: literal[int] ]
keyword[return] identifier[quad] ( identifier[func] , identifier[a] , identifier[b] , identifier[var_args] [ literal[int] :]+ identifier[simple_args] ,** identifier[kwargs] )
keyword[with] identifier[phelp] . identifier[get_ppmap] () keyword[as] identifier[ppmap] :
identifier[result_list] = identifier[ppmap] ( identifier[helper] , keyword[None] , identifier[gen_var_args] ())
keyword[if] identifier[bc_raw] [ literal[int] ]. identifier[ndim] == literal[int] :
keyword[return] identifier[np] . identifier[asarray] ( identifier[result_list] [ literal[int] ])
identifier[result_arr] = identifier[np] . identifier[empty] (( literal[int] ,)+ identifier[bc_raw] [ literal[int] ]. identifier[shape] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[bc_1d] [ literal[int] ]. identifier[size] ):
identifier[result_arr] [ literal[int] ]. identifier[flat] [ identifier[i] ], identifier[result_arr] [ literal[int] ]. identifier[flat] [ identifier[i] ]= identifier[result_list] [ identifier[i] ]
keyword[return] identifier[result_arr] | def parallel_quad(func, a, b, par_args=(), simple_args=(), parallel=True, **kwargs):
"""A parallelized version of :func:`scipy.integrate.quad`.
Arguments are:
func
The function to integrate, called as ``f(x, [*par_args...], [*simple_args...])``.
a
The lower limit(s) of integration.
b
The upper limits(s) of integration.
par_args
Tuple of additional parallelized arguments.
simple_args
Tuple of additional arguments passed identically to every invocation.
parallel
Controls parallelization; default uses all available cores. See
:func:`pwkit.parallel.make_parallel_helper`.
kwargs
Passed to :func:`scipy.integrate.quad`. Don't set *full_output* to True.
Returns: integrals and errors; see below.
Computes many integrals in parallel. The values *a*, *b*, and the items of
*par_args* should all be numeric, and may be N-dimensional Numpy arrays.
They are all broadcast to a common shape, and one integral is performed
for each element in the resulting array. If this common shape is (X,Y,Z),
the return value has shape (2,X,Y,Z), where the subarray [0,...] contains
the computed integrals and the subarray [1,...] contains the absolute
error estimates. If *a*, *b*, and the items in *par_args* are all scalars,
the return value has shape (2,).
The *simple_args* are passed to each integrand function identically for each
integration. They do not need to be Pickle-able.
Example::
>>> parallel_quad (lambda x, u, v, q: u * x + v,
0, # a
[3, 4], # b
(np.arange (6).reshape ((3,2)), np.arange (3).reshape ((3,1))), # par_args
('hello',),)
Computes six integrals and returns an array of shape ``(2,3,2)``. The
functions that are evaluated are::
[[ 0*x + 0, 1*x + 0 ],
[ 2*x + 1, 3*x + 1 ],
[ 4*x + 2, 5*x + 2 ]]
and the bounds of the integrals are::
[[ (0, 3), (0, 4) ],
[ (0, 3), (0, 4) ],
[ (0, 3), (0, 4) ]]
In all cases the unused fourth parameter *q* is ``'hello'``.
"""
from scipy.integrate import quad
from .parallel import make_parallel_helper
phelp = make_parallel_helper(parallel)
if not isinstance(par_args, tuple):
raise ValueError('par_args must be a tuple') # depends on [control=['if'], data=[]]
if not isinstance(simple_args, tuple):
raise ValueError('simple_args must be a tuple') # depends on [control=['if'], data=[]]
bc_raw = np.broadcast_arrays(a, b, *par_args)
bc_1d = tuple((np.atleast_1d(a) for a in bc_raw))
def gen_var_args():
for i in range(bc_1d[0].size):
yield tuple((x.flat[i] for x in bc_1d)) # depends on [control=['for'], data=['i']]
def helper(i, _, var_args):
(a, b) = var_args[:2]
return quad(func, a, b, var_args[2:] + simple_args, **kwargs)
with phelp.get_ppmap() as ppmap:
result_list = ppmap(helper, None, gen_var_args()) # depends on [control=['with'], data=['ppmap']]
if bc_raw[0].ndim == 0:
return np.asarray(result_list[0]) # depends on [control=['if'], data=[]]
result_arr = np.empty((2,) + bc_raw[0].shape)
for i in range(bc_1d[0].size):
(result_arr[0].flat[i], result_arr[1].flat[i]) = result_list[i] # depends on [control=['for'], data=['i']]
return result_arr |
def join_path_to_filelist(path, filelist):
"""Joins path to each line in filelist
Parameters
----------
path: str
filelist: list of str
Returns
-------
list of filepaths
"""
return [op.join(path, str(item)) for item in filelist] | def function[join_path_to_filelist, parameter[path, filelist]]:
constant[Joins path to each line in filelist
Parameters
----------
path: str
filelist: list of str
Returns
-------
list of filepaths
]
return[<ast.ListComp object at 0x7da1b004c6a0>] | keyword[def] identifier[join_path_to_filelist] ( identifier[path] , identifier[filelist] ):
literal[string]
keyword[return] [ identifier[op] . identifier[join] ( identifier[path] , identifier[str] ( identifier[item] )) keyword[for] identifier[item] keyword[in] identifier[filelist] ] | def join_path_to_filelist(path, filelist):
"""Joins path to each line in filelist
Parameters
----------
path: str
filelist: list of str
Returns
-------
list of filepaths
"""
return [op.join(path, str(item)) for item in filelist] |
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures) | def function[get_departures, parameter[self, station]]:
constant[
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
]
variable[url] assign[=] binary_operation[constant[http://webservices.ns.nl/ns-api-avt?station=] + name[station]]
variable[raw_departures] assign[=] call[name[self]._request, parameter[constant[GET], name[url]]]
return[call[name[self].parse_departures, parameter[name[raw_departures]]]] | keyword[def] identifier[get_departures] ( identifier[self] , identifier[station] ):
literal[string]
identifier[url] = literal[string] + identifier[station]
identifier[raw_departures] = identifier[self] . identifier[_request] ( literal[string] , identifier[url] )
keyword[return] identifier[self] . identifier[parse_departures] ( identifier[raw_departures] ) | def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures) |
def _set_line_speed(self, v, load=False):
"""
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_line_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_line_speed() directly.
YANG Description: The line-speed characteristics for this management
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=line_speed.line_speed, is_container='container', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """line_speed must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=line_speed.line_speed, is_container='container', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__line_speed = t
if hasattr(self, '_set'):
self._set() | def function[_set_line_speed, parameter[self, v, load]]:
constant[
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_line_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_line_speed() directly.
YANG Description: The line-speed characteristics for this management
interface.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18dc05a20>
name[self].__line_speed assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_line_speed] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[line_speed] . identifier[line_speed] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__line_speed] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_line_speed(self, v, load=False):
"""
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_line_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_line_speed() directly.
YANG Description: The line-speed characteristics for this management
interface.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=line_speed.line_speed, is_container='container', presence=False, yang_name='line-speed', rest_name='line-speed', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'line_speed must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=line_speed.line_speed, is_container=\'container\', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'The line-speed characteristics for this management \ninterface.\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__line_speed = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def get_start_state(self, set_final_outcome=False):
"""Get the start state of the container state
:param set_final_outcome: if the final_outcome of the state should be set if the income directly connects to
an outcome
:return: the start state
"""
# overwrite the start state in the case that a specific start state is specific e.g. by start_from_state
if self.get_path() in state_machine_execution_engine.start_state_paths:
for state_id, state in self.states.items():
if state.get_path() in state_machine_execution_engine.start_state_paths:
state_machine_execution_engine.start_state_paths.remove(self.get_path())
self._start_state_modified = True
return state
if self.start_state_id is None:
return None
# It is possible to connect the income directly with an outcome
if self.start_state_id == self.state_id:
if set_final_outcome:
for transition_id in self.transitions:
# the transition of which the from state is None is the transition that directly connects the income
if self.transitions[transition_id].from_state is None:
to_outcome_id = self.transitions[transition_id].to_outcome
self.final_outcome = self.outcomes[to_outcome_id]
break
return self
return self.states[self.start_state_id] | def function[get_start_state, parameter[self, set_final_outcome]]:
constant[Get the start state of the container state
:param set_final_outcome: if the final_outcome of the state should be set if the income directly connects to
an outcome
:return: the start state
]
if compare[call[name[self].get_path, parameter[]] in name[state_machine_execution_engine].start_state_paths] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1a3c3a0>, <ast.Name object at 0x7da1b1a3cac0>]]] in starred[call[name[self].states.items, parameter[]]] begin[:]
if compare[call[name[state].get_path, parameter[]] in name[state_machine_execution_engine].start_state_paths] begin[:]
call[name[state_machine_execution_engine].start_state_paths.remove, parameter[call[name[self].get_path, parameter[]]]]
name[self]._start_state_modified assign[=] constant[True]
return[name[state]]
if compare[name[self].start_state_id is constant[None]] begin[:]
return[constant[None]]
if compare[name[self].start_state_id equal[==] name[self].state_id] begin[:]
if name[set_final_outcome] begin[:]
for taget[name[transition_id]] in starred[name[self].transitions] begin[:]
if compare[call[name[self].transitions][name[transition_id]].from_state is constant[None]] begin[:]
variable[to_outcome_id] assign[=] call[name[self].transitions][name[transition_id]].to_outcome
name[self].final_outcome assign[=] call[name[self].outcomes][name[to_outcome_id]]
break
return[name[self]]
return[call[name[self].states][name[self].start_state_id]] | keyword[def] identifier[get_start_state] ( identifier[self] , identifier[set_final_outcome] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[get_path] () keyword[in] identifier[state_machine_execution_engine] . identifier[start_state_paths] :
keyword[for] identifier[state_id] , identifier[state] keyword[in] identifier[self] . identifier[states] . identifier[items] ():
keyword[if] identifier[state] . identifier[get_path] () keyword[in] identifier[state_machine_execution_engine] . identifier[start_state_paths] :
identifier[state_machine_execution_engine] . identifier[start_state_paths] . identifier[remove] ( identifier[self] . identifier[get_path] ())
identifier[self] . identifier[_start_state_modified] = keyword[True]
keyword[return] identifier[state]
keyword[if] identifier[self] . identifier[start_state_id] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[start_state_id] == identifier[self] . identifier[state_id] :
keyword[if] identifier[set_final_outcome] :
keyword[for] identifier[transition_id] keyword[in] identifier[self] . identifier[transitions] :
keyword[if] identifier[self] . identifier[transitions] [ identifier[transition_id] ]. identifier[from_state] keyword[is] keyword[None] :
identifier[to_outcome_id] = identifier[self] . identifier[transitions] [ identifier[transition_id] ]. identifier[to_outcome]
identifier[self] . identifier[final_outcome] = identifier[self] . identifier[outcomes] [ identifier[to_outcome_id] ]
keyword[break]
keyword[return] identifier[self]
keyword[return] identifier[self] . identifier[states] [ identifier[self] . identifier[start_state_id] ] | def get_start_state(self, set_final_outcome=False):
"""Get the start state of the container state
:param set_final_outcome: if the final_outcome of the state should be set if the income directly connects to
an outcome
:return: the start state
"""
# overwrite the start state in the case that a specific start state is specific e.g. by start_from_state
if self.get_path() in state_machine_execution_engine.start_state_paths:
for (state_id, state) in self.states.items():
if state.get_path() in state_machine_execution_engine.start_state_paths:
state_machine_execution_engine.start_state_paths.remove(self.get_path())
self._start_state_modified = True
return state # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if self.start_state_id is None:
return None # depends on [control=['if'], data=[]]
# It is possible to connect the income directly with an outcome
if self.start_state_id == self.state_id:
if set_final_outcome:
for transition_id in self.transitions:
# the transition of which the from state is None is the transition that directly connects the income
if self.transitions[transition_id].from_state is None:
to_outcome_id = self.transitions[transition_id].to_outcome
self.final_outcome = self.outcomes[to_outcome_id]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['transition_id']] # depends on [control=['if'], data=[]]
return self # depends on [control=['if'], data=[]]
return self.states[self.start_state_id] |
def insert(self,*args,**kw):
"""Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
"""
if args:
kw = dict([(f,arg) for f,arg in zip(self.fields,args)])
ks = kw.keys()
s1 = ",".join(ks)
qm = ','.join(['?']*len(ks))
sql = "INSERT INTO %s (%s) VALUES (%s)" %(self.name,s1,qm)
self.cursor.execute(sql,kw.values())
# return last row id
return self.cursor.lastrowid | def function[insert, parameter[self]]:
constant[Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
]
if name[args] begin[:]
variable[kw] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b0ab9630>]]
variable[ks] assign[=] call[name[kw].keys, parameter[]]
variable[s1] assign[=] call[constant[,].join, parameter[name[ks]]]
variable[qm] assign[=] call[constant[,].join, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b0aba0b0>]] * call[name[len], parameter[name[ks]]]]]]
variable[sql] assign[=] binary_operation[constant[INSERT INTO %s (%s) VALUES (%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0abacb0>, <ast.Name object at 0x7da1b0abb0a0>, <ast.Name object at 0x7da1b0ab8d60>]]]
call[name[self].cursor.execute, parameter[name[sql], call[name[kw].values, parameter[]]]]
return[name[self].cursor.lastrowid] | keyword[def] identifier[insert] ( identifier[self] ,* identifier[args] ,** identifier[kw] ):
literal[string]
keyword[if] identifier[args] :
identifier[kw] = identifier[dict] ([( identifier[f] , identifier[arg] ) keyword[for] identifier[f] , identifier[arg] keyword[in] identifier[zip] ( identifier[self] . identifier[fields] , identifier[args] )])
identifier[ks] = identifier[kw] . identifier[keys] ()
identifier[s1] = literal[string] . identifier[join] ( identifier[ks] )
identifier[qm] = literal[string] . identifier[join] ([ literal[string] ]* identifier[len] ( identifier[ks] ))
identifier[sql] = literal[string] %( identifier[self] . identifier[name] , identifier[s1] , identifier[qm] )
identifier[self] . identifier[cursor] . identifier[execute] ( identifier[sql] , identifier[kw] . identifier[values] ())
keyword[return] identifier[self] . identifier[cursor] . identifier[lastrowid] | def insert(self, *args, **kw):
"""Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
"""
if args:
kw = dict([(f, arg) for (f, arg) in zip(self.fields, args)]) # depends on [control=['if'], data=[]]
ks = kw.keys()
s1 = ','.join(ks)
qm = ','.join(['?'] * len(ks))
sql = 'INSERT INTO %s (%s) VALUES (%s)' % (self.name, s1, qm)
self.cursor.execute(sql, kw.values()) # return last row id
return self.cursor.lastrowid |
def download_from_s3(context):
'''Download an object from s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
str:
The path to the downloaded object.
'''
target_file = context.solid_config['target_file']
return context.resources.download_manager.download_file_contents(context, target_file) | def function[download_from_s3, parameter[context]]:
constant[Download an object from s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
str:
The path to the downloaded object.
]
variable[target_file] assign[=] call[name[context].solid_config][constant[target_file]]
return[call[name[context].resources.download_manager.download_file_contents, parameter[name[context], name[target_file]]]] | keyword[def] identifier[download_from_s3] ( identifier[context] ):
literal[string]
identifier[target_file] = identifier[context] . identifier[solid_config] [ literal[string] ]
keyword[return] identifier[context] . identifier[resources] . identifier[download_manager] . identifier[download_file_contents] ( identifier[context] , identifier[target_file] ) | def download_from_s3(context):
"""Download an object from s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
str:
The path to the downloaded object.
"""
target_file = context.solid_config['target_file']
return context.resources.download_manager.download_file_contents(context, target_file) |
def EdgeDetect(alpha=0, name=None, deterministic=False, random_state=None):
"""
Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | def function[EdgeDetect, parameter[alpha, name, deterministic, random_state]]:
constant[
Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image.
]
variable[alpha_param] assign[=] call[name[iap].handle_continuous_param, parameter[name[alpha], constant[alpha]]]
def function[create_matrices, parameter[_image, nb_channels, random_state_func]]:
variable[alpha_sample] assign[=] call[name[alpha_param].draw_sample, parameter[]]
call[name[ia].do_assert, parameter[compare[constant[0] less_or_equal[<=] name[alpha_sample]]]]
variable[matrix_nochange] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da1b0210c10>, <ast.List object at 0x7da1b0212830>, <ast.List object at 0x7da1b025f040>]]]]
variable[matrix_effect] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da1b025e9b0>, <ast.List object at 0x7da1b025e260>, <ast.List object at 0x7da1b025f3a0>]]]]
variable[matrix] assign[=] binary_operation[binary_operation[binary_operation[constant[1] - name[alpha_sample]] * name[matrix_nochange]] + binary_operation[name[alpha_sample] * name[matrix_effect]]]
return[binary_operation[list[[<ast.Name object at 0x7da1b025dc90>]] * name[nb_channels]]]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] binary_operation[constant[Unnamed%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b025d870>]]]
return[call[name[Convolve], parameter[name[create_matrices]]]] | keyword[def] identifier[EdgeDetect] ( identifier[alpha] = literal[int] , identifier[name] = keyword[None] , identifier[deterministic] = keyword[False] , identifier[random_state] = keyword[None] ):
literal[string]
identifier[alpha_param] = identifier[iap] . identifier[handle_continuous_param] ( identifier[alpha] , literal[string] , identifier[value_range] =( literal[int] , literal[int] ), identifier[tuple_to_uniform] = keyword[True] ,
identifier[list_to_choice] = keyword[True] )
keyword[def] identifier[create_matrices] ( identifier[_image] , identifier[nb_channels] , identifier[random_state_func] ):
identifier[alpha_sample] = identifier[alpha_param] . identifier[draw_sample] ( identifier[random_state] = identifier[random_state_func] )
identifier[ia] . identifier[do_assert] ( literal[int] <= identifier[alpha_sample] <= literal[int] )
identifier[matrix_nochange] = identifier[np] . identifier[array] ([
[ literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] ]
], identifier[dtype] = identifier[np] . identifier[float32] )
identifier[matrix_effect] = identifier[np] . identifier[array] ([
[ literal[int] , literal[int] , literal[int] ],
[ literal[int] ,- literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] ]
], identifier[dtype] = identifier[np] . identifier[float32] )
identifier[matrix] =( literal[int] - identifier[alpha_sample] )* identifier[matrix_nochange] + identifier[alpha_sample] * identifier[matrix_effect]
keyword[return] [ identifier[matrix] ]* identifier[nb_channels]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = literal[string] %( identifier[ia] . identifier[caller_name] (),)
keyword[return] identifier[Convolve] ( identifier[create_matrices] , identifier[name] = identifier[name] , identifier[deterministic] = identifier[deterministic] , identifier[random_state] = identifier[random_state] ) | def EdgeDetect(alpha=0, name=None, deterministic=False, random_state=None):
"""
Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, 'alpha', value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
matrix_nochange = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.float32)
matrix_effect = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]], dtype=np.float32)
matrix = (1 - alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = 'Unnamed%s' % (ia.caller_name(),) # depends on [control=['if'], data=['name']]
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) |
def make_config(self, data: dict):
"""Make a MIP config."""
self.validate_config(data)
config_data = self.prepare_config(data)
return config_data | def function[make_config, parameter[self, data]]:
constant[Make a MIP config.]
call[name[self].validate_config, parameter[name[data]]]
variable[config_data] assign[=] call[name[self].prepare_config, parameter[name[data]]]
return[name[config_data]] | keyword[def] identifier[make_config] ( identifier[self] , identifier[data] : identifier[dict] ):
literal[string]
identifier[self] . identifier[validate_config] ( identifier[data] )
identifier[config_data] = identifier[self] . identifier[prepare_config] ( identifier[data] )
keyword[return] identifier[config_data] | def make_config(self, data: dict):
"""Make a MIP config."""
self.validate_config(data)
config_data = self.prepare_config(data)
return config_data |
def method_view(injector):
"""Create Flask method based dispatching view from injector class."""
handler = create_handler(MethodView)
apply_http_methods(handler, injector)
return injector.let(as_view=handler.as_view) | def function[method_view, parameter[injector]]:
constant[Create Flask method based dispatching view from injector class.]
variable[handler] assign[=] call[name[create_handler], parameter[name[MethodView]]]
call[name[apply_http_methods], parameter[name[handler], name[injector]]]
return[call[name[injector].let, parameter[]]] | keyword[def] identifier[method_view] ( identifier[injector] ):
literal[string]
identifier[handler] = identifier[create_handler] ( identifier[MethodView] )
identifier[apply_http_methods] ( identifier[handler] , identifier[injector] )
keyword[return] identifier[injector] . identifier[let] ( identifier[as_view] = identifier[handler] . identifier[as_view] ) | def method_view(injector):
"""Create Flask method based dispatching view from injector class."""
handler = create_handler(MethodView)
apply_http_methods(handler, injector)
return injector.let(as_view=handler.as_view) |
def moralize(self):
"""
Removes all the immoralities in the Network and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN([(('D',0), ('G',0)), (('I',0), ('G',0))])
>>> moral_graph = dbn.moralize()
>>> moral_graph.edges()
[(('G', 0), ('I', 0)),
(('G', 0), ('D', 0)),
(('D', 1), ('I', 1)),
(('D', 1), ('G', 1)),
(('I', 0), ('D', 0)),
(('G', 1), ('I', 1))]
"""
moral_graph = self.to_undirected()
for node in super(DynamicBayesianNetwork, self).nodes():
moral_graph.add_edges_from(combinations(
self.get_parents(node), 2))
return moral_graph | def function[moralize, parameter[self]]:
constant[
Removes all the immoralities in the Network and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN([(('D',0), ('G',0)), (('I',0), ('G',0))])
>>> moral_graph = dbn.moralize()
>>> moral_graph.edges()
[(('G', 0), ('I', 0)),
(('G', 0), ('D', 0)),
(('D', 1), ('I', 1)),
(('D', 1), ('G', 1)),
(('I', 0), ('D', 0)),
(('G', 1), ('I', 1))]
]
variable[moral_graph] assign[=] call[name[self].to_undirected, parameter[]]
for taget[name[node]] in starred[call[call[name[super], parameter[name[DynamicBayesianNetwork], name[self]]].nodes, parameter[]]] begin[:]
call[name[moral_graph].add_edges_from, parameter[call[name[combinations], parameter[call[name[self].get_parents, parameter[name[node]]], constant[2]]]]]
return[name[moral_graph]] | keyword[def] identifier[moralize] ( identifier[self] ):
literal[string]
identifier[moral_graph] = identifier[self] . identifier[to_undirected] ()
keyword[for] identifier[node] keyword[in] identifier[super] ( identifier[DynamicBayesianNetwork] , identifier[self] ). identifier[nodes] ():
identifier[moral_graph] . identifier[add_edges_from] ( identifier[combinations] (
identifier[self] . identifier[get_parents] ( identifier[node] ), literal[int] ))
keyword[return] identifier[moral_graph] | def moralize(self):
"""
Removes all the immoralities in the Network and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN([(('D',0), ('G',0)), (('I',0), ('G',0))])
>>> moral_graph = dbn.moralize()
>>> moral_graph.edges()
[(('G', 0), ('I', 0)),
(('G', 0), ('D', 0)),
(('D', 1), ('I', 1)),
(('D', 1), ('G', 1)),
(('I', 0), ('D', 0)),
(('G', 1), ('I', 1))]
"""
moral_graph = self.to_undirected()
for node in super(DynamicBayesianNetwork, self).nodes():
moral_graph.add_edges_from(combinations(self.get_parents(node), 2)) # depends on [control=['for'], data=['node']]
return moral_graph |
def hash160(self, is_compressed=None):
"""
Return the hash160 representation of this key, if available.
"""
if is_compressed is None:
is_compressed = self.is_compressed()
if is_compressed:
if self._hash160_compressed is None:
self._hash160_compressed = hash160(self.sec(is_compressed=is_compressed))
return self._hash160_compressed
if self._hash160_uncompressed is None:
self._hash160_uncompressed = hash160(self.sec(is_compressed=is_compressed))
return self._hash160_uncompressed | def function[hash160, parameter[self, is_compressed]]:
constant[
Return the hash160 representation of this key, if available.
]
if compare[name[is_compressed] is constant[None]] begin[:]
variable[is_compressed] assign[=] call[name[self].is_compressed, parameter[]]
if name[is_compressed] begin[:]
if compare[name[self]._hash160_compressed is constant[None]] begin[:]
name[self]._hash160_compressed assign[=] call[name[hash160], parameter[call[name[self].sec, parameter[]]]]
return[name[self]._hash160_compressed]
if compare[name[self]._hash160_uncompressed is constant[None]] begin[:]
name[self]._hash160_uncompressed assign[=] call[name[hash160], parameter[call[name[self].sec, parameter[]]]]
return[name[self]._hash160_uncompressed] | keyword[def] identifier[hash160] ( identifier[self] , identifier[is_compressed] = keyword[None] ):
literal[string]
keyword[if] identifier[is_compressed] keyword[is] keyword[None] :
identifier[is_compressed] = identifier[self] . identifier[is_compressed] ()
keyword[if] identifier[is_compressed] :
keyword[if] identifier[self] . identifier[_hash160_compressed] keyword[is] keyword[None] :
identifier[self] . identifier[_hash160_compressed] = identifier[hash160] ( identifier[self] . identifier[sec] ( identifier[is_compressed] = identifier[is_compressed] ))
keyword[return] identifier[self] . identifier[_hash160_compressed]
keyword[if] identifier[self] . identifier[_hash160_uncompressed] keyword[is] keyword[None] :
identifier[self] . identifier[_hash160_uncompressed] = identifier[hash160] ( identifier[self] . identifier[sec] ( identifier[is_compressed] = identifier[is_compressed] ))
keyword[return] identifier[self] . identifier[_hash160_uncompressed] | def hash160(self, is_compressed=None):
"""
Return the hash160 representation of this key, if available.
"""
if is_compressed is None:
is_compressed = self.is_compressed() # depends on [control=['if'], data=['is_compressed']]
if is_compressed:
if self._hash160_compressed is None:
self._hash160_compressed = hash160(self.sec(is_compressed=is_compressed)) # depends on [control=['if'], data=[]]
return self._hash160_compressed # depends on [control=['if'], data=[]]
if self._hash160_uncompressed is None:
self._hash160_uncompressed = hash160(self.sec(is_compressed=is_compressed)) # depends on [control=['if'], data=[]]
return self._hash160_uncompressed |
def _store_generic_inference_results(self,
results_dict,
all_params,
all_names):
"""
Store the model inference values that are common to all choice models.
This includes things like index coefficients, gradients, hessians,
asymptotic covariance matrices, t-values, p-values, and robust versions
of these values.
Parameters
----------
results_dict : dict.
The estimation result dictionary that is output from
scipy.optimize.minimize. In addition to the standard keys which are
included, it should also contain the following keys:
`["utility_coefs", "final_gradient", "final_hessian",
"fisher_info"]`.
The "final_gradient", "final_hessian", and "fisher_info" values
should be the gradient, hessian, and Fisher-Information Matrix of
the log likelihood, evaluated at the final parameter vector.
all_params : list of 1D ndarrays.
Should contain the various types of parameters that were actually
estimated.
all_names : list of strings.
Should contain names of each estimated parameter.
Returns
-------
None. Stores all results on the model instance.
"""
# Store the utility coefficients
self._store_inferential_results(results_dict["utility_coefs"],
index_names=self.ind_var_names,
attribute_name="coefs",
series_name="coefficients")
# Store the gradient
self._store_inferential_results(results_dict["final_gradient"],
index_names=all_names,
attribute_name="gradient",
series_name="gradient")
# Store the hessian
self._store_inferential_results(results_dict["final_hessian"],
index_names=all_names,
attribute_name="hessian",
column_names=all_names)
# Store the variance-covariance matrix
self._store_inferential_results(-1 * scipy.linalg.inv(self.hessian),
index_names=all_names,
attribute_name="cov",
column_names=all_names)
# Store ALL of the estimated parameters
self._store_inferential_results(np.concatenate(all_params, axis=0),
index_names=all_names,
attribute_name="params",
series_name="parameters")
# Store the standard errors
self._store_inferential_results(np.sqrt(np.diag(self.cov)),
index_names=all_names,
attribute_name="standard_errors",
series_name="std_err")
# Store the t-stats of the estimated parameters
self.tvalues = self.params / self.standard_errors
self.tvalues.name = "t_stats"
# Store the p-values
p_vals = 2 * scipy.stats.norm.sf(np.abs(self.tvalues))
self._store_inferential_results(p_vals,
index_names=all_names,
attribute_name="pvalues",
series_name="p_values")
# Store the fischer information matrix of estimated coefficients
self._store_inferential_results(results_dict["fisher_info"],
index_names=all_names,
attribute_name="fisher_information",
column_names=all_names)
# Store the 'robust' variance-covariance matrix
robust_covariance = calc_asymptotic_covariance(self.hessian,
self.fisher_information)
self._store_inferential_results(robust_covariance,
index_names=all_names,
attribute_name="robust_cov",
column_names=all_names)
# Store the 'robust' standard errors
self._store_inferential_results(np.sqrt(np.diag(self.robust_cov)),
index_names=all_names,
attribute_name="robust_std_errs",
series_name="robust_std_err")
# Store the 'robust' t-stats of the estimated coefficients
self.robust_t_stats = self.params / self.robust_std_errs
self.robust_t_stats.name = "robust_t_stats"
# Store the 'robust' p-values
one_sided_p_vals = scipy.stats.norm.sf(np.abs(self.robust_t_stats))
self._store_inferential_results(2 * one_sided_p_vals,
index_names=all_names,
attribute_name="robust_p_vals",
series_name="robust_p_values")
return None | def function[_store_generic_inference_results, parameter[self, results_dict, all_params, all_names]]:
constant[
Store the model inference values that are common to all choice models.
This includes things like index coefficients, gradients, hessians,
asymptotic covariance matrices, t-values, p-values, and robust versions
of these values.
Parameters
----------
results_dict : dict.
The estimation result dictionary that is output from
scipy.optimize.minimize. In addition to the standard keys which are
included, it should also contain the following keys:
`["utility_coefs", "final_gradient", "final_hessian",
"fisher_info"]`.
The "final_gradient", "final_hessian", and "fisher_info" values
should be the gradient, hessian, and Fisher-Information Matrix of
the log likelihood, evaluated at the final parameter vector.
all_params : list of 1D ndarrays.
Should contain the various types of parameters that were actually
estimated.
all_names : list of strings.
Should contain names of each estimated parameter.
Returns
-------
None. Stores all results on the model instance.
]
call[name[self]._store_inferential_results, parameter[call[name[results_dict]][constant[utility_coefs]]]]
call[name[self]._store_inferential_results, parameter[call[name[results_dict]][constant[final_gradient]]]]
call[name[self]._store_inferential_results, parameter[call[name[results_dict]][constant[final_hessian]]]]
call[name[self]._store_inferential_results, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b15cd1e0> * call[name[scipy].linalg.inv, parameter[name[self].hessian]]]]]
call[name[self]._store_inferential_results, parameter[call[name[np].concatenate, parameter[name[all_params]]]]]
call[name[self]._store_inferential_results, parameter[call[name[np].sqrt, parameter[call[name[np].diag, parameter[name[self].cov]]]]]]
name[self].tvalues assign[=] binary_operation[name[self].params / name[self].standard_errors]
name[self].tvalues.name assign[=] constant[t_stats]
variable[p_vals] assign[=] binary_operation[constant[2] * call[name[scipy].stats.norm.sf, parameter[call[name[np].abs, parameter[name[self].tvalues]]]]]
call[name[self]._store_inferential_results, parameter[name[p_vals]]]
call[name[self]._store_inferential_results, parameter[call[name[results_dict]][constant[fisher_info]]]]
variable[robust_covariance] assign[=] call[name[calc_asymptotic_covariance], parameter[name[self].hessian, name[self].fisher_information]]
call[name[self]._store_inferential_results, parameter[name[robust_covariance]]]
call[name[self]._store_inferential_results, parameter[call[name[np].sqrt, parameter[call[name[np].diag, parameter[name[self].robust_cov]]]]]]
name[self].robust_t_stats assign[=] binary_operation[name[self].params / name[self].robust_std_errs]
name[self].robust_t_stats.name assign[=] constant[robust_t_stats]
variable[one_sided_p_vals] assign[=] call[name[scipy].stats.norm.sf, parameter[call[name[np].abs, parameter[name[self].robust_t_stats]]]]
call[name[self]._store_inferential_results, parameter[binary_operation[constant[2] * name[one_sided_p_vals]]]]
return[constant[None]] | keyword[def] identifier[_store_generic_inference_results] ( identifier[self] ,
identifier[results_dict] ,
identifier[all_params] ,
identifier[all_names] ):
literal[string]
identifier[self] . identifier[_store_inferential_results] ( identifier[results_dict] [ literal[string] ],
identifier[index_names] = identifier[self] . identifier[ind_var_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
identifier[self] . identifier[_store_inferential_results] ( identifier[results_dict] [ literal[string] ],
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
identifier[self] . identifier[_store_inferential_results] ( identifier[results_dict] [ literal[string] ],
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[column_names] = identifier[all_names] )
identifier[self] . identifier[_store_inferential_results] (- literal[int] * identifier[scipy] . identifier[linalg] . identifier[inv] ( identifier[self] . identifier[hessian] ),
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[column_names] = identifier[all_names] )
identifier[self] . identifier[_store_inferential_results] ( identifier[np] . identifier[concatenate] ( identifier[all_params] , identifier[axis] = literal[int] ),
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
identifier[self] . identifier[_store_inferential_results] ( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[diag] ( identifier[self] . identifier[cov] )),
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
identifier[self] . identifier[tvalues] = identifier[self] . identifier[params] / identifier[self] . identifier[standard_errors]
identifier[self] . identifier[tvalues] . identifier[name] = literal[string]
identifier[p_vals] = literal[int] * identifier[scipy] . identifier[stats] . identifier[norm] . identifier[sf] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[tvalues] ))
identifier[self] . identifier[_store_inferential_results] ( identifier[p_vals] ,
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
identifier[self] . identifier[_store_inferential_results] ( identifier[results_dict] [ literal[string] ],
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[column_names] = identifier[all_names] )
identifier[robust_covariance] = identifier[calc_asymptotic_covariance] ( identifier[self] . identifier[hessian] ,
identifier[self] . identifier[fisher_information] )
identifier[self] . identifier[_store_inferential_results] ( identifier[robust_covariance] ,
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[column_names] = identifier[all_names] )
identifier[self] . identifier[_store_inferential_results] ( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[diag] ( identifier[self] . identifier[robust_cov] )),
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
identifier[self] . identifier[robust_t_stats] = identifier[self] . identifier[params] / identifier[self] . identifier[robust_std_errs]
identifier[self] . identifier[robust_t_stats] . identifier[name] = literal[string]
identifier[one_sided_p_vals] = identifier[scipy] . identifier[stats] . identifier[norm] . identifier[sf] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[robust_t_stats] ))
identifier[self] . identifier[_store_inferential_results] ( literal[int] * identifier[one_sided_p_vals] ,
identifier[index_names] = identifier[all_names] ,
identifier[attribute_name] = literal[string] ,
identifier[series_name] = literal[string] )
keyword[return] keyword[None] | def _store_generic_inference_results(self, results_dict, all_params, all_names):
"""
Store the model inference values that are common to all choice models.
This includes things like index coefficients, gradients, hessians,
asymptotic covariance matrices, t-values, p-values, and robust versions
of these values.
Parameters
----------
results_dict : dict.
The estimation result dictionary that is output from
scipy.optimize.minimize. In addition to the standard keys which are
included, it should also contain the following keys:
`["utility_coefs", "final_gradient", "final_hessian",
"fisher_info"]`.
The "final_gradient", "final_hessian", and "fisher_info" values
should be the gradient, hessian, and Fisher-Information Matrix of
the log likelihood, evaluated at the final parameter vector.
all_params : list of 1D ndarrays.
Should contain the various types of parameters that were actually
estimated.
all_names : list of strings.
Should contain names of each estimated parameter.
Returns
-------
None. Stores all results on the model instance.
"""
# Store the utility coefficients
self._store_inferential_results(results_dict['utility_coefs'], index_names=self.ind_var_names, attribute_name='coefs', series_name='coefficients')
# Store the gradient
self._store_inferential_results(results_dict['final_gradient'], index_names=all_names, attribute_name='gradient', series_name='gradient')
# Store the hessian
self._store_inferential_results(results_dict['final_hessian'], index_names=all_names, attribute_name='hessian', column_names=all_names)
# Store the variance-covariance matrix
self._store_inferential_results(-1 * scipy.linalg.inv(self.hessian), index_names=all_names, attribute_name='cov', column_names=all_names)
# Store ALL of the estimated parameters
self._store_inferential_results(np.concatenate(all_params, axis=0), index_names=all_names, attribute_name='params', series_name='parameters')
# Store the standard errors
self._store_inferential_results(np.sqrt(np.diag(self.cov)), index_names=all_names, attribute_name='standard_errors', series_name='std_err')
# Store the t-stats of the estimated parameters
self.tvalues = self.params / self.standard_errors
self.tvalues.name = 't_stats'
# Store the p-values
p_vals = 2 * scipy.stats.norm.sf(np.abs(self.tvalues))
self._store_inferential_results(p_vals, index_names=all_names, attribute_name='pvalues', series_name='p_values')
# Store the fischer information matrix of estimated coefficients
self._store_inferential_results(results_dict['fisher_info'], index_names=all_names, attribute_name='fisher_information', column_names=all_names)
# Store the 'robust' variance-covariance matrix
robust_covariance = calc_asymptotic_covariance(self.hessian, self.fisher_information)
self._store_inferential_results(robust_covariance, index_names=all_names, attribute_name='robust_cov', column_names=all_names)
# Store the 'robust' standard errors
self._store_inferential_results(np.sqrt(np.diag(self.robust_cov)), index_names=all_names, attribute_name='robust_std_errs', series_name='robust_std_err')
# Store the 'robust' t-stats of the estimated coefficients
self.robust_t_stats = self.params / self.robust_std_errs
self.robust_t_stats.name = 'robust_t_stats'
# Store the 'robust' p-values
one_sided_p_vals = scipy.stats.norm.sf(np.abs(self.robust_t_stats))
self._store_inferential_results(2 * one_sided_p_vals, index_names=all_names, attribute_name='robust_p_vals', series_name='robust_p_values')
return None |
def _handle_start_dag(self, request):
""" The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag.
"""
dag_name = self._queue_dag(name=request.payload['name'],
data=request.payload['data'])
return Response(success=dag_name is not None, uid=request.uid,
payload={'dag_name': dag_name}) | def function[_handle_start_dag, parameter[self, request]]:
constant[ The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag.
]
variable[dag_name] assign[=] call[name[self]._queue_dag, parameter[]]
return[call[name[Response], parameter[]]] | keyword[def] identifier[_handle_start_dag] ( identifier[self] , identifier[request] ):
literal[string]
identifier[dag_name] = identifier[self] . identifier[_queue_dag] ( identifier[name] = identifier[request] . identifier[payload] [ literal[string] ],
identifier[data] = identifier[request] . identifier[payload] [ literal[string] ])
keyword[return] identifier[Response] ( identifier[success] = identifier[dag_name] keyword[is] keyword[not] keyword[None] , identifier[uid] = identifier[request] . identifier[uid] ,
identifier[payload] ={ literal[string] : identifier[dag_name] }) | def _handle_start_dag(self, request):
""" The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag.
"""
dag_name = self._queue_dag(name=request.payload['name'], data=request.payload['data'])
return Response(success=dag_name is not None, uid=request.uid, payload={'dag_name': dag_name}) |
def as_dict(self):
"""A dictionary representation of the :class:`.ObjectValidationResults`
instance.
Keys:
* ``'result'``: The validation results (``True`` or ``False``)
* ``'errors'``: A list of validation errors.
Returns:
A dictionary representation of an instance of this class.
"""
d = super(ObjectValidationResults, self).as_dict()
if self.errors:
d['errors'] = [x.as_dict() for x in self.errors]
return d | def function[as_dict, parameter[self]]:
constant[A dictionary representation of the :class:`.ObjectValidationResults`
instance.
Keys:
* ``'result'``: The validation results (``True`` or ``False``)
* ``'errors'``: A list of validation errors.
Returns:
A dictionary representation of an instance of this class.
]
variable[d] assign[=] call[call[name[super], parameter[name[ObjectValidationResults], name[self]]].as_dict, parameter[]]
if name[self].errors begin[:]
call[name[d]][constant[errors]] assign[=] <ast.ListComp object at 0x7da1b0f0f1c0>
return[name[d]] | keyword[def] identifier[as_dict] ( identifier[self] ):
literal[string]
identifier[d] = identifier[super] ( identifier[ObjectValidationResults] , identifier[self] ). identifier[as_dict] ()
keyword[if] identifier[self] . identifier[errors] :
identifier[d] [ literal[string] ]=[ identifier[x] . identifier[as_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[errors] ]
keyword[return] identifier[d] | def as_dict(self):
"""A dictionary representation of the :class:`.ObjectValidationResults`
instance.
Keys:
* ``'result'``: The validation results (``True`` or ``False``)
* ``'errors'``: A list of validation errors.
Returns:
A dictionary representation of an instance of this class.
"""
d = super(ObjectValidationResults, self).as_dict()
if self.errors:
d['errors'] = [x.as_dict() for x in self.errors] # depends on [control=['if'], data=[]]
return d |
def downsample(self, sample_factor=2):
"""
Downsample series by an integer factor by averaging.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
"""
if sample_factor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)
newlength = floor(len(self.index) / sample_factor)
func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1)
newindex = arange(newlength)
return self.map(func, index=newindex) | def function[downsample, parameter[self, sample_factor]]:
constant[
Downsample series by an integer factor by averaging.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
]
if compare[name[sample_factor] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da18f810d60>
variable[newlength] assign[=] call[name[floor], parameter[binary_operation[call[name[len], parameter[name[self].index]] / name[sample_factor]]]]
variable[func] assign[=] <ast.Lambda object at 0x7da18f812fb0>
variable[newindex] assign[=] call[name[arange], parameter[name[newlength]]]
return[call[name[self].map, parameter[name[func]]]] | keyword[def] identifier[downsample] ( identifier[self] , identifier[sample_factor] = literal[int] ):
literal[string]
keyword[if] identifier[sample_factor] < literal[int] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[sample_factor] )
identifier[newlength] = identifier[floor] ( identifier[len] ( identifier[self] . identifier[index] )/ identifier[sample_factor] )
identifier[func] = keyword[lambda] identifier[v] : identifier[v] [ literal[int] : identifier[int] ( identifier[newlength] * identifier[sample_factor] )]. identifier[reshape] (- literal[int] , identifier[sample_factor] ). identifier[mean] ( identifier[axis] = literal[int] )
identifier[newindex] = identifier[arange] ( identifier[newlength] )
keyword[return] identifier[self] . identifier[map] ( identifier[func] , identifier[index] = identifier[newindex] ) | def downsample(self, sample_factor=2):
"""
Downsample series by an integer factor by averaging.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
"""
if sample_factor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sample_factor) # depends on [control=['if'], data=['sample_factor']]
newlength = floor(len(self.index) / sample_factor)
func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1)
newindex = arange(newlength)
return self.map(func, index=newindex) |
def append(self, other, inplace=False, **kwargs):
"""
Append any input which can be converted to MAGICCData to self.
Parameters
----------
other : MAGICCData, pd.DataFrame, pd.Series, str
Source of data to append.
inplace : bool
If True, append ``other`` inplace, otherwise return a new ``MAGICCData``
instance.
**kwargs
Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a
``MAGICCData`` instance).
"""
if not isinstance(other, MAGICCData):
other = MAGICCData(other, **kwargs)
if inplace:
super().append(other, inplace=inplace)
self.metadata.update(other.metadata)
else:
res = super().append(other, inplace=inplace)
res.metadata = deepcopy(self.metadata)
res.metadata.update(other.metadata)
return res | def function[append, parameter[self, other, inplace]]:
constant[
Append any input which can be converted to MAGICCData to self.
Parameters
----------
other : MAGICCData, pd.DataFrame, pd.Series, str
Source of data to append.
inplace : bool
If True, append ``other`` inplace, otherwise return a new ``MAGICCData``
instance.
**kwargs
Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a
``MAGICCData`` instance).
]
if <ast.UnaryOp object at 0x7da20c9918a0> begin[:]
variable[other] assign[=] call[name[MAGICCData], parameter[name[other]]]
if name[inplace] begin[:]
call[call[name[super], parameter[]].append, parameter[name[other]]]
call[name[self].metadata.update, parameter[name[other].metadata]] | keyword[def] identifier[append] ( identifier[self] , identifier[other] , identifier[inplace] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[MAGICCData] ):
identifier[other] = identifier[MAGICCData] ( identifier[other] ,** identifier[kwargs] )
keyword[if] identifier[inplace] :
identifier[super] (). identifier[append] ( identifier[other] , identifier[inplace] = identifier[inplace] )
identifier[self] . identifier[metadata] . identifier[update] ( identifier[other] . identifier[metadata] )
keyword[else] :
identifier[res] = identifier[super] (). identifier[append] ( identifier[other] , identifier[inplace] = identifier[inplace] )
identifier[res] . identifier[metadata] = identifier[deepcopy] ( identifier[self] . identifier[metadata] )
identifier[res] . identifier[metadata] . identifier[update] ( identifier[other] . identifier[metadata] )
keyword[return] identifier[res] | def append(self, other, inplace=False, **kwargs):
"""
Append any input which can be converted to MAGICCData to self.
Parameters
----------
other : MAGICCData, pd.DataFrame, pd.Series, str
Source of data to append.
inplace : bool
If True, append ``other`` inplace, otherwise return a new ``MAGICCData``
instance.
**kwargs
Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a
``MAGICCData`` instance).
"""
if not isinstance(other, MAGICCData):
other = MAGICCData(other, **kwargs) # depends on [control=['if'], data=[]]
if inplace:
super().append(other, inplace=inplace)
self.metadata.update(other.metadata) # depends on [control=['if'], data=[]]
else:
res = super().append(other, inplace=inplace)
res.metadata = deepcopy(self.metadata)
res.metadata.update(other.metadata)
return res |
def notify(title,
message,
secret,
endpoint=None,
level=3,
link=None,
retcode=None):
"""
Required parameter:
* ``secret`` - The Pushjet service secret token, created with
http://docs.pushjet.io/docs/creating-a-new-service
Optional parameters:
* ``endpoint`` - custom Pushjet API endpoint
(defaults to https://api.pushjet.io)
* ``level`` - The importance level from 1(low) to 5(high)
* ``link``
"""
data = {
'title': title,
'message': message,
'level': level,
'secret': secret,
}
if link:
data['link'] = link
headers = {'User-Agent': USER_AGENT}
if endpoint is None:
endpoint = 'https://api.pushjet.io'
resp = requests.post(endpoint + '/message', data=data, headers=headers)
resp.raise_for_status() | def function[notify, parameter[title, message, secret, endpoint, level, link, retcode]]:
constant[
Required parameter:
* ``secret`` - The Pushjet service secret token, created with
http://docs.pushjet.io/docs/creating-a-new-service
Optional parameters:
* ``endpoint`` - custom Pushjet API endpoint
(defaults to https://api.pushjet.io)
* ``level`` - The importance level from 1(low) to 5(high)
* ``link``
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1ecf130>, <ast.Constant object at 0x7da1b1ece290>, <ast.Constant object at 0x7da1b1ecf2b0>, <ast.Constant object at 0x7da1b1ecf880>], [<ast.Name object at 0x7da1b1ecdd50>, <ast.Name object at 0x7da1b1eceaa0>, <ast.Name object at 0x7da1b1ecd270>, <ast.Name object at 0x7da1b1ecd030>]]
if name[link] begin[:]
call[name[data]][constant[link]] assign[=] name[link]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b1ece6e0>], [<ast.Name object at 0x7da1b1ece1d0>]]
if compare[name[endpoint] is constant[None]] begin[:]
variable[endpoint] assign[=] constant[https://api.pushjet.io]
variable[resp] assign[=] call[name[requests].post, parameter[binary_operation[name[endpoint] + constant[/message]]]]
call[name[resp].raise_for_status, parameter[]] | keyword[def] identifier[notify] ( identifier[title] ,
identifier[message] ,
identifier[secret] ,
identifier[endpoint] = keyword[None] ,
identifier[level] = literal[int] ,
identifier[link] = keyword[None] ,
identifier[retcode] = keyword[None] ):
literal[string]
identifier[data] ={
literal[string] : identifier[title] ,
literal[string] : identifier[message] ,
literal[string] : identifier[level] ,
literal[string] : identifier[secret] ,
}
keyword[if] identifier[link] :
identifier[data] [ literal[string] ]= identifier[link]
identifier[headers] ={ literal[string] : identifier[USER_AGENT] }
keyword[if] identifier[endpoint] keyword[is] keyword[None] :
identifier[endpoint] = literal[string]
identifier[resp] = identifier[requests] . identifier[post] ( identifier[endpoint] + literal[string] , identifier[data] = identifier[data] , identifier[headers] = identifier[headers] )
identifier[resp] . identifier[raise_for_status] () | def notify(title, message, secret, endpoint=None, level=3, link=None, retcode=None):
"""
Required parameter:
* ``secret`` - The Pushjet service secret token, created with
http://docs.pushjet.io/docs/creating-a-new-service
Optional parameters:
* ``endpoint`` - custom Pushjet API endpoint
(defaults to https://api.pushjet.io)
* ``level`` - The importance level from 1(low) to 5(high)
* ``link``
"""
data = {'title': title, 'message': message, 'level': level, 'secret': secret}
if link:
data['link'] = link # depends on [control=['if'], data=[]]
headers = {'User-Agent': USER_AGENT}
if endpoint is None:
endpoint = 'https://api.pushjet.io' # depends on [control=['if'], data=['endpoint']]
resp = requests.post(endpoint + '/message', data=data, headers=headers)
resp.raise_for_status() |
def execute_perceval_job(backend, backend_args, qitems, task_id, category,
archive_args=None, max_retries=MAX_JOB_RETRIES):
"""Execute a Perceval job on RQ.
The items fetched during the process will be stored in a
Redis queue named `queue`.
Setting the parameter `archive_path`, raw data will be stored
with the archive manager. The contents from the archive can
be retrieved setting the pameter `fetch_from_archive` to `True`,
too. Take into account this behaviour will be only available
when the backend supports the use of the archive. If archiving
is not supported, an `AttributeError` exception will be raised.
:param backend: backend to execute
:param bakend_args: dict of arguments for running the backend
:param qitems: name of the RQ queue used to store the items
:param task_id: identifier of the task linked to this job
:param category: category of the items to retrieve
:param archive_args: archive arguments
:param max_retries: maximum number of attempts this job can execute
before failing
:returns: a `JobResult` instance
:raises NotFoundError: raised when the backend is not found
:raises AttributeError: raised when archiving is not supported but
any of the archive parameters were set
"""
rq_job = rq.get_current_job()
job = PercevalJob(rq_job.id, task_id, backend, category,
rq_job.connection, qitems)
logger.debug("Running job #%s (task: %s) (%s) (cat:%s)",
job.job_id, task_id, backend, category)
if not job.has_archiving() and archive_args:
raise AttributeError("archive attributes set but archive is not supported")
run_job = True
resume = False
failures = 0
while run_job:
try:
job.run(backend_args, archive_args=archive_args, resume=resume)
except AttributeError as e:
raise e
except Exception as e:
logger.debug("Error running job %s (%s) - %s",
job.job_id, backend, str(e))
failures += 1
if not job.has_resuming() or failures >= max_retries:
logger.error("Cancelling job #%s (task: %s) (%s)",
job.job_id, task_id, backend)
raise e
logger.warning("Resuming job #%s (task: %s) (%s) due to a failure (n %s, max %s)",
job.job_id, task_id, backend, failures, max_retries)
resume = True
else:
# No failure, do not retry
run_job = False
result = job.result
logger.debug("Job #%s (task: %s) completed (%s) - %s items (%s) fetched",
result.job_id, task_id, result.backend, str(result.nitems), result.category)
return result | def function[execute_perceval_job, parameter[backend, backend_args, qitems, task_id, category, archive_args, max_retries]]:
constant[Execute a Perceval job on RQ.
The items fetched during the process will be stored in a
Redis queue named `queue`.
Setting the parameter `archive_path`, raw data will be stored
with the archive manager. The contents from the archive can
be retrieved setting the pameter `fetch_from_archive` to `True`,
too. Take into account this behaviour will be only available
when the backend supports the use of the archive. If archiving
is not supported, an `AttributeError` exception will be raised.
:param backend: backend to execute
:param bakend_args: dict of arguments for running the backend
:param qitems: name of the RQ queue used to store the items
:param task_id: identifier of the task linked to this job
:param category: category of the items to retrieve
:param archive_args: archive arguments
:param max_retries: maximum number of attempts this job can execute
before failing
:returns: a `JobResult` instance
:raises NotFoundError: raised when the backend is not found
:raises AttributeError: raised when archiving is not supported but
any of the archive parameters were set
]
variable[rq_job] assign[=] call[name[rq].get_current_job, parameter[]]
variable[job] assign[=] call[name[PercevalJob], parameter[name[rq_job].id, name[task_id], name[backend], name[category], name[rq_job].connection, name[qitems]]]
call[name[logger].debug, parameter[constant[Running job #%s (task: %s) (%s) (cat:%s)], name[job].job_id, name[task_id], name[backend], name[category]]]
if <ast.BoolOp object at 0x7da2041d83d0> begin[:]
<ast.Raise object at 0x7da2041dab00>
variable[run_job] assign[=] constant[True]
variable[resume] assign[=] constant[False]
variable[failures] assign[=] constant[0]
while name[run_job] begin[:]
<ast.Try object at 0x7da2041dace0>
variable[result] assign[=] name[job].result
call[name[logger].debug, parameter[constant[Job #%s (task: %s) completed (%s) - %s items (%s) fetched], name[result].job_id, name[task_id], name[result].backend, call[name[str], parameter[name[result].nitems]], name[result].category]]
return[name[result]] | keyword[def] identifier[execute_perceval_job] ( identifier[backend] , identifier[backend_args] , identifier[qitems] , identifier[task_id] , identifier[category] ,
identifier[archive_args] = keyword[None] , identifier[max_retries] = identifier[MAX_JOB_RETRIES] ):
literal[string]
identifier[rq_job] = identifier[rq] . identifier[get_current_job] ()
identifier[job] = identifier[PercevalJob] ( identifier[rq_job] . identifier[id] , identifier[task_id] , identifier[backend] , identifier[category] ,
identifier[rq_job] . identifier[connection] , identifier[qitems] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[job] . identifier[job_id] , identifier[task_id] , identifier[backend] , identifier[category] )
keyword[if] keyword[not] identifier[job] . identifier[has_archiving] () keyword[and] identifier[archive_args] :
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[run_job] = keyword[True]
identifier[resume] = keyword[False]
identifier[failures] = literal[int]
keyword[while] identifier[run_job] :
keyword[try] :
identifier[job] . identifier[run] ( identifier[backend_args] , identifier[archive_args] = identifier[archive_args] , identifier[resume] = identifier[resume] )
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[raise] identifier[e]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[job] . identifier[job_id] , identifier[backend] , identifier[str] ( identifier[e] ))
identifier[failures] += literal[int]
keyword[if] keyword[not] identifier[job] . identifier[has_resuming] () keyword[or] identifier[failures] >= identifier[max_retries] :
identifier[logger] . identifier[error] ( literal[string] ,
identifier[job] . identifier[job_id] , identifier[task_id] , identifier[backend] )
keyword[raise] identifier[e]
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[job] . identifier[job_id] , identifier[task_id] , identifier[backend] , identifier[failures] , identifier[max_retries] )
identifier[resume] = keyword[True]
keyword[else] :
identifier[run_job] = keyword[False]
identifier[result] = identifier[job] . identifier[result]
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[result] . identifier[job_id] , identifier[task_id] , identifier[result] . identifier[backend] , identifier[str] ( identifier[result] . identifier[nitems] ), identifier[result] . identifier[category] )
keyword[return] identifier[result] | def execute_perceval_job(backend, backend_args, qitems, task_id, category, archive_args=None, max_retries=MAX_JOB_RETRIES):
"""Execute a Perceval job on RQ.
The items fetched during the process will be stored in a
Redis queue named `queue`.
Setting the parameter `archive_path`, raw data will be stored
with the archive manager. The contents from the archive can
be retrieved setting the pameter `fetch_from_archive` to `True`,
too. Take into account this behaviour will be only available
when the backend supports the use of the archive. If archiving
is not supported, an `AttributeError` exception will be raised.
:param backend: backend to execute
:param bakend_args: dict of arguments for running the backend
:param qitems: name of the RQ queue used to store the items
:param task_id: identifier of the task linked to this job
:param category: category of the items to retrieve
:param archive_args: archive arguments
:param max_retries: maximum number of attempts this job can execute
before failing
:returns: a `JobResult` instance
:raises NotFoundError: raised when the backend is not found
:raises AttributeError: raised when archiving is not supported but
any of the archive parameters were set
"""
rq_job = rq.get_current_job()
job = PercevalJob(rq_job.id, task_id, backend, category, rq_job.connection, qitems)
logger.debug('Running job #%s (task: %s) (%s) (cat:%s)', job.job_id, task_id, backend, category)
if not job.has_archiving() and archive_args:
raise AttributeError('archive attributes set but archive is not supported') # depends on [control=['if'], data=[]]
run_job = True
resume = False
failures = 0
while run_job:
try:
job.run(backend_args, archive_args=archive_args, resume=resume) # depends on [control=['try'], data=[]]
except AttributeError as e:
raise e # depends on [control=['except'], data=['e']]
except Exception as e:
logger.debug('Error running job %s (%s) - %s', job.job_id, backend, str(e))
failures += 1
if not job.has_resuming() or failures >= max_retries:
logger.error('Cancelling job #%s (task: %s) (%s)', job.job_id, task_id, backend)
raise e # depends on [control=['if'], data=[]]
logger.warning('Resuming job #%s (task: %s) (%s) due to a failure (n %s, max %s)', job.job_id, task_id, backend, failures, max_retries)
resume = True # depends on [control=['except'], data=['e']]
else:
# No failure, do not retry
run_job = False # depends on [control=['while'], data=[]]
result = job.result
logger.debug('Job #%s (task: %s) completed (%s) - %s items (%s) fetched', result.job_id, task_id, result.backend, str(result.nitems), result.category)
return result |
def connect(self, addr):
'''
Call adb connect
Return true when connect success
'''
if addr.find(':') == -1:
addr += ':5555'
output = self.run_cmd('connect', addr)
return 'unable to connect' not in output | def function[connect, parameter[self, addr]]:
constant[
Call adb connect
Return true when connect success
]
if compare[call[name[addr].find, parameter[constant[:]]] equal[==] <ast.UnaryOp object at 0x7da18ede6140>] begin[:]
<ast.AugAssign object at 0x7da18ede7b50>
variable[output] assign[=] call[name[self].run_cmd, parameter[constant[connect], name[addr]]]
return[compare[constant[unable to connect] <ast.NotIn object at 0x7da2590d7190> name[output]]] | keyword[def] identifier[connect] ( identifier[self] , identifier[addr] ):
literal[string]
keyword[if] identifier[addr] . identifier[find] ( literal[string] )==- literal[int] :
identifier[addr] += literal[string]
identifier[output] = identifier[self] . identifier[run_cmd] ( literal[string] , identifier[addr] )
keyword[return] literal[string] keyword[not] keyword[in] identifier[output] | def connect(self, addr):
"""
Call adb connect
Return true when connect success
"""
if addr.find(':') == -1:
addr += ':5555' # depends on [control=['if'], data=[]]
output = self.run_cmd('connect', addr)
return 'unable to connect' not in output |
def _get_metadata(self, file_id, metadata_fields=''):
''' a helper method for retrieving the metadata of a file '''
title = '%s._get_metadata' % self.__class__.__name__
# construct fields arg
if not metadata_fields:
metadata_fields = ','.join(self.object_file.keys())
else:
field_list = metadata_fields.split(',')
for field in field_list:
if not field in self.object_file.keys():
raise ValueError('%s(metadata_fields="%s") is not a valid drive file field' % (title, field))
# send request
try:
metadata_details = self.drive.get(fileId=file_id, fields=metadata_fields).execute()
except:
raise DriveConnectionError(title)
return metadata_details | def function[_get_metadata, parameter[self, file_id, metadata_fields]]:
constant[ a helper method for retrieving the metadata of a file ]
variable[title] assign[=] binary_operation[constant[%s._get_metadata] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]
if <ast.UnaryOp object at 0x7da20e955510> begin[:]
variable[metadata_fields] assign[=] call[constant[,].join, parameter[call[name[self].object_file.keys, parameter[]]]]
<ast.Try object at 0x7da20e957580>
return[name[metadata_details]] | keyword[def] identifier[_get_metadata] ( identifier[self] , identifier[file_id] , identifier[metadata_fields] = literal[string] ):
literal[string]
identifier[title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__]
keyword[if] keyword[not] identifier[metadata_fields] :
identifier[metadata_fields] = literal[string] . identifier[join] ( identifier[self] . identifier[object_file] . identifier[keys] ())
keyword[else] :
identifier[field_list] = identifier[metadata_fields] . identifier[split] ( literal[string] )
keyword[for] identifier[field] keyword[in] identifier[field_list] :
keyword[if] keyword[not] identifier[field] keyword[in] identifier[self] . identifier[object_file] . identifier[keys] ():
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[title] , identifier[field] ))
keyword[try] :
identifier[metadata_details] = identifier[self] . identifier[drive] . identifier[get] ( identifier[fileId] = identifier[file_id] , identifier[fields] = identifier[metadata_fields] ). identifier[execute] ()
keyword[except] :
keyword[raise] identifier[DriveConnectionError] ( identifier[title] )
keyword[return] identifier[metadata_details] | def _get_metadata(self, file_id, metadata_fields=''):
""" a helper method for retrieving the metadata of a file """
title = '%s._get_metadata' % self.__class__.__name__
# construct fields arg
if not metadata_fields:
metadata_fields = ','.join(self.object_file.keys()) # depends on [control=['if'], data=[]]
else:
field_list = metadata_fields.split(',')
for field in field_list:
if not field in self.object_file.keys():
raise ValueError('%s(metadata_fields="%s") is not a valid drive file field' % (title, field)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
# send request
try:
metadata_details = self.drive.get(fileId=file_id, fields=metadata_fields).execute() # depends on [control=['try'], data=[]]
except:
raise DriveConnectionError(title) # depends on [control=['except'], data=[]]
return metadata_details |
def client(host='localhost', port=2379,
ca_cert=None, cert_key=None, cert_cert=None,
timeout=None, protocol="http"):
"""Return an instance of an Etcd3Client."""
return Etcd3Client(host=host,
port=port,
ca_cert=ca_cert,
cert_key=cert_key,
cert_cert=cert_cert,
timeout=timeout,
protocol=protocol) | def function[client, parameter[host, port, ca_cert, cert_key, cert_cert, timeout, protocol]]:
constant[Return an instance of an Etcd3Client.]
return[call[name[Etcd3Client], parameter[]]] | keyword[def] identifier[client] ( identifier[host] = literal[string] , identifier[port] = literal[int] ,
identifier[ca_cert] = keyword[None] , identifier[cert_key] = keyword[None] , identifier[cert_cert] = keyword[None] ,
identifier[timeout] = keyword[None] , identifier[protocol] = literal[string] ):
literal[string]
keyword[return] identifier[Etcd3Client] ( identifier[host] = identifier[host] ,
identifier[port] = identifier[port] ,
identifier[ca_cert] = identifier[ca_cert] ,
identifier[cert_key] = identifier[cert_key] ,
identifier[cert_cert] = identifier[cert_cert] ,
identifier[timeout] = identifier[timeout] ,
identifier[protocol] = identifier[protocol] ) | def client(host='localhost', port=2379, ca_cert=None, cert_key=None, cert_cert=None, timeout=None, protocol='http'):
"""Return an instance of an Etcd3Client."""
return Etcd3Client(host=host, port=port, ca_cert=ca_cert, cert_key=cert_key, cert_cert=cert_cert, timeout=timeout, protocol=protocol) |
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
style_guide = StyleGuide(parse_argv=True)
options = style_guide.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(style_guide)
else:
report = style_guide.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1) | def function[_main, parameter[]]:
constant[Parse options and run checks on Python source.]
import module[signal]
<ast.Try object at 0x7da1b07795d0>
variable[style_guide] assign[=] call[name[StyleGuide], parameter[]]
variable[options] assign[=] name[style_guide].options
if <ast.BoolOp object at 0x7da1b08dd570> begin[:]
from relative_module[testsuite.support] import module[run_tests]
variable[report] assign[=] call[name[run_tests], parameter[name[style_guide]]]
if name[options].statistics begin[:]
call[name[report].print_statistics, parameter[]]
if name[options].benchmark begin[:]
call[name[report].print_benchmark, parameter[]]
if <ast.BoolOp object at 0x7da1b08dc850> begin[:]
call[name[report].print_results, parameter[]]
if name[report].total_errors begin[:]
if name[options].count begin[:]
call[name[sys].stderr.write, parameter[binary_operation[call[name[str], parameter[name[report].total_errors]] + constant[
]]]]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[_main] ():
literal[string]
keyword[import] identifier[signal]
keyword[try] :
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGPIPE] , keyword[lambda] identifier[signum] , identifier[frame] : identifier[sys] . identifier[exit] ( literal[int] ))
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[style_guide] = identifier[StyleGuide] ( identifier[parse_argv] = keyword[True] )
identifier[options] = identifier[style_guide] . identifier[options]
keyword[if] identifier[options] . identifier[doctest] keyword[or] identifier[options] . identifier[testsuite] :
keyword[from] identifier[testsuite] . identifier[support] keyword[import] identifier[run_tests]
identifier[report] = identifier[run_tests] ( identifier[style_guide] )
keyword[else] :
identifier[report] = identifier[style_guide] . identifier[check_files] ()
keyword[if] identifier[options] . identifier[statistics] :
identifier[report] . identifier[print_statistics] ()
keyword[if] identifier[options] . identifier[benchmark] :
identifier[report] . identifier[print_benchmark] ()
keyword[if] identifier[options] . identifier[testsuite] keyword[and] keyword[not] identifier[options] . identifier[quiet] :
identifier[report] . identifier[print_results] ()
keyword[if] identifier[report] . identifier[total_errors] :
keyword[if] identifier[options] . identifier[count] :
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[str] ( identifier[report] . identifier[total_errors] )+ literal[string] )
identifier[sys] . identifier[exit] ( literal[int] ) | def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) # depends on [control=['try'], data=[]]
except AttributeError:
pass # not supported on Windows # depends on [control=['except'], data=[]]
style_guide = StyleGuide(parse_argv=True)
options = style_guide.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(style_guide) # depends on [control=['if'], data=[]]
else:
report = style_guide.check_files()
if options.statistics:
report.print_statistics() # depends on [control=['if'], data=[]]
if options.benchmark:
report.print_benchmark() # depends on [control=['if'], data=[]]
if options.testsuite and (not options.quiet):
report.print_results() # depends on [control=['if'], data=[]]
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n') # depends on [control=['if'], data=[]]
sys.exit(1) # depends on [control=['if'], data=[]] |
def factorize(self, A):
"""
Factorizes A.
Parameters
----------
A : matrix
For symmetric systems, should contain only lower diagonal part.
"""
A = csc_matrix(A)
if self.prop == self.SYMMETRIC:
A = (A + A.T) - triu(A)
self.lu = self.umfpack.splu(A) | def function[factorize, parameter[self, A]]:
constant[
Factorizes A.
Parameters
----------
A : matrix
For symmetric systems, should contain only lower diagonal part.
]
variable[A] assign[=] call[name[csc_matrix], parameter[name[A]]]
if compare[name[self].prop equal[==] name[self].SYMMETRIC] begin[:]
variable[A] assign[=] binary_operation[binary_operation[name[A] + name[A].T] - call[name[triu], parameter[name[A]]]]
name[self].lu assign[=] call[name[self].umfpack.splu, parameter[name[A]]] | keyword[def] identifier[factorize] ( identifier[self] , identifier[A] ):
literal[string]
identifier[A] = identifier[csc_matrix] ( identifier[A] )
keyword[if] identifier[self] . identifier[prop] == identifier[self] . identifier[SYMMETRIC] :
identifier[A] =( identifier[A] + identifier[A] . identifier[T] )- identifier[triu] ( identifier[A] )
identifier[self] . identifier[lu] = identifier[self] . identifier[umfpack] . identifier[splu] ( identifier[A] ) | def factorize(self, A):
"""
Factorizes A.
Parameters
----------
A : matrix
For symmetric systems, should contain only lower diagonal part.
"""
A = csc_matrix(A)
if self.prop == self.SYMMETRIC:
A = A + A.T - triu(A) # depends on [control=['if'], data=[]]
self.lu = self.umfpack.splu(A) |
def flavor_list(request):
"""Utility method to retrieve a list of flavors."""
try:
return api.nova.flavor_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return [] | def function[flavor_list, parameter[request]]:
constant[Utility method to retrieve a list of flavors.]
<ast.Try object at 0x7da1b18dc610> | keyword[def] identifier[flavor_list] ( identifier[request] ):
literal[string]
keyword[try] :
keyword[return] identifier[api] . identifier[nova] . identifier[flavor_list] ( identifier[request] )
keyword[except] identifier[Exception] :
identifier[exceptions] . identifier[handle] ( identifier[request] ,
identifier[_] ( literal[string] ))
keyword[return] [] | def flavor_list(request):
"""Utility method to retrieve a list of flavors."""
try:
return api.nova.flavor_list(request) # depends on [control=['try'], data=[]]
except Exception:
exceptions.handle(request, _('Unable to retrieve instance flavors.'))
return [] # depends on [control=['except'], data=[]] |
def raw(self) -> str:
"""
Return Revocation raw document string
:return:
"""
if not isinstance(self.identity, Identity):
raise MalformedDocumentError("Can not return full revocation document created from inline")
return """Version: {version}
Type: Revocation
Currency: {currency}
Issuer: {pubkey}
IdtyUniqueID: {uid}
IdtyTimestamp: {timestamp}
IdtySignature: {signature}
""".format(version=self.version,
currency=self.currency,
pubkey=self.identity.pubkey,
uid=self.identity.uid,
timestamp=self.identity.timestamp,
signature=self.identity.signatures[0]) | def function[raw, parameter[self]]:
constant[
Return Revocation raw document string
:return:
]
if <ast.UnaryOp object at 0x7da18f00e6e0> begin[:]
<ast.Raise object at 0x7da18f00eaa0>
return[call[constant[Version: {version}
Type: Revocation
Currency: {currency}
Issuer: {pubkey}
IdtyUniqueID: {uid}
IdtyTimestamp: {timestamp}
IdtySignature: {signature}
].format, parameter[]]] | keyword[def] identifier[raw] ( identifier[self] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[identity] , identifier[Identity] ):
keyword[raise] identifier[MalformedDocumentError] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[version] = identifier[self] . identifier[version] ,
identifier[currency] = identifier[self] . identifier[currency] ,
identifier[pubkey] = identifier[self] . identifier[identity] . identifier[pubkey] ,
identifier[uid] = identifier[self] . identifier[identity] . identifier[uid] ,
identifier[timestamp] = identifier[self] . identifier[identity] . identifier[timestamp] ,
identifier[signature] = identifier[self] . identifier[identity] . identifier[signatures] [ literal[int] ]) | def raw(self) -> str:
"""
Return Revocation raw document string
:return:
"""
if not isinstance(self.identity, Identity):
raise MalformedDocumentError('Can not return full revocation document created from inline') # depends on [control=['if'], data=[]]
return 'Version: {version}\nType: Revocation\nCurrency: {currency}\nIssuer: {pubkey}\nIdtyUniqueID: {uid}\nIdtyTimestamp: {timestamp}\nIdtySignature: {signature}\n'.format(version=self.version, currency=self.currency, pubkey=self.identity.pubkey, uid=self.identity.uid, timestamp=self.identity.timestamp, signature=self.identity.signatures[0]) |
def sg_prod(tensor, opt):
r"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | def function[sg_prod, parameter[tensor, opt]]:
constant[Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
]
return[call[name[tf].reduce_prod, parameter[name[tensor]]]] | keyword[def] identifier[sg_prod] ( identifier[tensor] , identifier[opt] ):
literal[string]
keyword[return] identifier[tf] . identifier[reduce_prod] ( identifier[tensor] , identifier[axis] = identifier[opt] . identifier[axis] , identifier[keep_dims] = identifier[opt] . identifier[keep_dims] , identifier[name] = identifier[opt] . identifier[name] ) | def sg_prod(tensor, opt):
"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) |
def certificate_issuer(self, value):
"""
An asn1crypto.x509.Certificate object of the issuer of the certificate.
This should only be set if the OCSP responder is not the issuer of
the certificate, but instead a special certificate only for OCSP
responses.
"""
if value is not None:
is_oscrypto = isinstance(value, asymmetric.Certificate)
if not is_oscrypto and not isinstance(value, x509.Certificate):
raise TypeError(_pretty_message(
'''
certificate_issuer must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''',
_type_name(value)
))
if is_oscrypto:
value = value.asn1
self._certificate_issuer = value | def function[certificate_issuer, parameter[self, value]]:
constant[
An asn1crypto.x509.Certificate object of the issuer of the certificate.
This should only be set if the OCSP responder is not the issuer of
the certificate, but instead a special certificate only for OCSP
responses.
]
if compare[name[value] is_not constant[None]] begin[:]
variable[is_oscrypto] assign[=] call[name[isinstance], parameter[name[value], name[asymmetric].Certificate]]
if <ast.BoolOp object at 0x7da1b1910910> begin[:]
<ast.Raise object at 0x7da1b1912d40>
if name[is_oscrypto] begin[:]
variable[value] assign[=] name[value].asn1
name[self]._certificate_issuer assign[=] name[value] | keyword[def] identifier[certificate_issuer] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[is_oscrypto] = identifier[isinstance] ( identifier[value] , identifier[asymmetric] . identifier[Certificate] )
keyword[if] keyword[not] identifier[is_oscrypto] keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[x509] . identifier[Certificate] ):
keyword[raise] identifier[TypeError] ( identifier[_pretty_message] (
literal[string] ,
identifier[_type_name] ( identifier[value] )
))
keyword[if] identifier[is_oscrypto] :
identifier[value] = identifier[value] . identifier[asn1]
identifier[self] . identifier[_certificate_issuer] = identifier[value] | def certificate_issuer(self, value):
"""
An asn1crypto.x509.Certificate object of the issuer of the certificate.
This should only be set if the OCSP responder is not the issuer of
the certificate, but instead a special certificate only for OCSP
responses.
"""
if value is not None:
is_oscrypto = isinstance(value, asymmetric.Certificate)
if not is_oscrypto and (not isinstance(value, x509.Certificate)):
raise TypeError(_pretty_message('\n certificate_issuer must be an instance of\n asn1crypto.x509.Certificate or\n oscrypto.asymmetric.Certificate, not %s\n ', _type_name(value))) # depends on [control=['if'], data=[]]
if is_oscrypto:
value = value.asn1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
self._certificate_issuer = value |
def remove(self, session_id):
"""Remove session object from the container
`session_id`
Session identifier
"""
session = self._items.get(session_id, None)
if session is not None:
session.promoted = -1
session.on_delete(True)
del self._items[session_id]
return True
return False | def function[remove, parameter[self, session_id]]:
constant[Remove session object from the container
`session_id`
Session identifier
]
variable[session] assign[=] call[name[self]._items.get, parameter[name[session_id], constant[None]]]
if compare[name[session] is_not constant[None]] begin[:]
name[session].promoted assign[=] <ast.UnaryOp object at 0x7da1b0a02f80>
call[name[session].on_delete, parameter[constant[True]]]
<ast.Delete object at 0x7da1b0a008e0>
return[constant[True]]
return[constant[False]] | keyword[def] identifier[remove] ( identifier[self] , identifier[session_id] ):
literal[string]
identifier[session] = identifier[self] . identifier[_items] . identifier[get] ( identifier[session_id] , keyword[None] )
keyword[if] identifier[session] keyword[is] keyword[not] keyword[None] :
identifier[session] . identifier[promoted] =- literal[int]
identifier[session] . identifier[on_delete] ( keyword[True] )
keyword[del] identifier[self] . identifier[_items] [ identifier[session_id] ]
keyword[return] keyword[True]
keyword[return] keyword[False] | def remove(self, session_id):
"""Remove session object from the container
`session_id`
Session identifier
"""
session = self._items.get(session_id, None)
if session is not None:
session.promoted = -1
session.on_delete(True)
del self._items[session_id]
return True # depends on [control=['if'], data=['session']]
return False |
def get_instance(self, payload):
"""
Build an instance of AssetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.asset.AssetInstance
:rtype: twilio.rest.serverless.v1.service.asset.AssetInstance
"""
return AssetInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | def function[get_instance, parameter[self, payload]]:
constant[
Build an instance of AssetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.asset.AssetInstance
:rtype: twilio.rest.serverless.v1.service.asset.AssetInstance
]
return[call[name[AssetInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[get_instance] ( identifier[self] , identifier[payload] ):
literal[string]
keyword[return] identifier[AssetInstance] ( identifier[self] . identifier[_version] , identifier[payload] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ],) | def get_instance(self, payload):
"""
Build an instance of AssetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.asset.AssetInstance
:rtype: twilio.rest.serverless.v1.service.asset.AssetInstance
"""
return AssetInstance(self._version, payload, service_sid=self._solution['service_sid']) |
def _pretty_size(size):
'''
Print sizes in a similar fashion as eclean
'''
units = [' G', ' M', ' K', ' B']
while units and size >= 1000:
size = size / 1024.0
units.pop()
return '{0}{1}'.format(round(size, 1), units[-1]) | def function[_pretty_size, parameter[size]]:
constant[
Print sizes in a similar fashion as eclean
]
variable[units] assign[=] list[[<ast.Constant object at 0x7da1b1f2a050>, <ast.Constant object at 0x7da1b1f299f0>, <ast.Constant object at 0x7da1b1f28700>, <ast.Constant object at 0x7da1b1f2baf0>]]
while <ast.BoolOp object at 0x7da1b1f29300> begin[:]
variable[size] assign[=] binary_operation[name[size] / constant[1024.0]]
call[name[units].pop, parameter[]]
return[call[constant[{0}{1}].format, parameter[call[name[round], parameter[name[size], constant[1]]], call[name[units]][<ast.UnaryOp object at 0x7da1b1cde260>]]]] | keyword[def] identifier[_pretty_size] ( identifier[size] ):
literal[string]
identifier[units] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[while] identifier[units] keyword[and] identifier[size] >= literal[int] :
identifier[size] = identifier[size] / literal[int]
identifier[units] . identifier[pop] ()
keyword[return] literal[string] . identifier[format] ( identifier[round] ( identifier[size] , literal[int] ), identifier[units] [- literal[int] ]) | def _pretty_size(size):
"""
Print sizes in a similar fashion as eclean
"""
units = [' G', ' M', ' K', ' B']
while units and size >= 1000:
size = size / 1024.0
units.pop() # depends on [control=['while'], data=[]]
return '{0}{1}'.format(round(size, 1), units[-1]) |
def generate_documentation(self, app_name, **kwargs):
"""Generate documentation for this specification.
Documentation is generated in Markdown format. An example
of the generated documentation can be found at:
https://github.com/loganasherjones/yapconf/blob/master/example/doc.md
Args:
app_name (str): The name of your application.
Keyword Args:
output_file_name (str): If provided, will write to this file.
encoding (str): The encoding to use for the output file. Default
is utf-8.
Returns:
A string representation of the documentation.
"""
output_file = kwargs.get('output_file_name')
encoding = kwargs.get('encoding', 'utf-8')
doc_string = generate_markdown_doc(app_name, self)
if output_file:
with open(output_file, 'w', encoding=encoding) as doc_file:
doc_file.write(doc_string)
return doc_string | def function[generate_documentation, parameter[self, app_name]]:
constant[Generate documentation for this specification.
Documentation is generated in Markdown format. An example
of the generated documentation can be found at:
https://github.com/loganasherjones/yapconf/blob/master/example/doc.md
Args:
app_name (str): The name of your application.
Keyword Args:
output_file_name (str): If provided, will write to this file.
encoding (str): The encoding to use for the output file. Default
is utf-8.
Returns:
A string representation of the documentation.
]
variable[output_file] assign[=] call[name[kwargs].get, parameter[constant[output_file_name]]]
variable[encoding] assign[=] call[name[kwargs].get, parameter[constant[encoding], constant[utf-8]]]
variable[doc_string] assign[=] call[name[generate_markdown_doc], parameter[name[app_name], name[self]]]
if name[output_file] begin[:]
with call[name[open], parameter[name[output_file], constant[w]]] begin[:]
call[name[doc_file].write, parameter[name[doc_string]]]
return[name[doc_string]] | keyword[def] identifier[generate_documentation] ( identifier[self] , identifier[app_name] ,** identifier[kwargs] ):
literal[string]
identifier[output_file] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[encoding] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[doc_string] = identifier[generate_markdown_doc] ( identifier[app_name] , identifier[self] )
keyword[if] identifier[output_file] :
keyword[with] identifier[open] ( identifier[output_file] , literal[string] , identifier[encoding] = identifier[encoding] ) keyword[as] identifier[doc_file] :
identifier[doc_file] . identifier[write] ( identifier[doc_string] )
keyword[return] identifier[doc_string] | def generate_documentation(self, app_name, **kwargs):
"""Generate documentation for this specification.
Documentation is generated in Markdown format. An example
of the generated documentation can be found at:
https://github.com/loganasherjones/yapconf/blob/master/example/doc.md
Args:
app_name (str): The name of your application.
Keyword Args:
output_file_name (str): If provided, will write to this file.
encoding (str): The encoding to use for the output file. Default
is utf-8.
Returns:
A string representation of the documentation.
"""
output_file = kwargs.get('output_file_name')
encoding = kwargs.get('encoding', 'utf-8')
doc_string = generate_markdown_doc(app_name, self)
if output_file:
with open(output_file, 'w', encoding=encoding) as doc_file:
doc_file.write(doc_string) # depends on [control=['with'], data=['doc_file']] # depends on [control=['if'], data=[]]
return doc_string |
def truncate(string, maxchar):
"""
Truncate a string to a maximum number of characters.
If the string is longer than maxchar, then remove excess
characters and append an ellipses.
Arguments:
string (str): String to truncate.
maxchar (int): Maximum length of string in characters. Must be >= 4.
Returns:
str: Of length <= maxchar.
Raises:
TruncateError: In case of an error.
"""
if maxchar < 4:
raise TruncateError("Maxchar must be > 3")
if len(string) <= maxchar:
return string
else:
return string[:maxchar - 3] + "..." | def function[truncate, parameter[string, maxchar]]:
constant[
Truncate a string to a maximum number of characters.
If the string is longer than maxchar, then remove excess
characters and append an ellipses.
Arguments:
string (str): String to truncate.
maxchar (int): Maximum length of string in characters. Must be >= 4.
Returns:
str: Of length <= maxchar.
Raises:
TruncateError: In case of an error.
]
if compare[name[maxchar] less[<] constant[4]] begin[:]
<ast.Raise object at 0x7da1b085e350>
if compare[call[name[len], parameter[name[string]]] less_or_equal[<=] name[maxchar]] begin[:]
return[name[string]] | keyword[def] identifier[truncate] ( identifier[string] , identifier[maxchar] ):
literal[string]
keyword[if] identifier[maxchar] < literal[int] :
keyword[raise] identifier[TruncateError] ( literal[string] )
keyword[if] identifier[len] ( identifier[string] )<= identifier[maxchar] :
keyword[return] identifier[string]
keyword[else] :
keyword[return] identifier[string] [: identifier[maxchar] - literal[int] ]+ literal[string] | def truncate(string, maxchar):
"""
Truncate a string to a maximum number of characters.
If the string is longer than maxchar, then remove excess
characters and append an ellipses.
Arguments:
string (str): String to truncate.
maxchar (int): Maximum length of string in characters. Must be >= 4.
Returns:
str: Of length <= maxchar.
Raises:
TruncateError: In case of an error.
"""
if maxchar < 4:
raise TruncateError('Maxchar must be > 3') # depends on [control=['if'], data=[]]
if len(string) <= maxchar:
return string # depends on [control=['if'], data=[]]
else:
return string[:maxchar - 3] + '...' |
def sentiment(text):
"""
Returns a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative valence.
"""
sentiment.valence_dict = load_valence_dict() if sentiment.valence_dict is None else sentiment_valence_dict
wordsAndEmoticons = str(text).split() #doesn't separate words from adjacent punctuation (keeps emoticons & contractions)
text_mod = regex_remove_punctuation.sub('', text) # removes punctuation (but loses emoticons & contractions)
wordsOnly = str(text_mod).split()
# get rid of empty items or single letter "words" like 'a' and 'I' from wordsOnly
for word in wordsOnly:
if len(word) <= 1:
wordsOnly.remove(word)
# now remove adjacent & redundant punctuation from [wordsAndEmoticons] while keeping emoticons and contractions
puncList = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
for word in wordsOnly:
for p in puncList:
pword = p + word
x1 = wordsAndEmoticons.count(pword)
while x1 > 0:
i = wordsAndEmoticons.index(pword)
wordsAndEmoticons.remove(pword)
wordsAndEmoticons.insert(i, word)
x1 = wordsAndEmoticons.count(pword)
wordp = word + p
x2 = wordsAndEmoticons.count(wordp)
while x2 > 0:
i = wordsAndEmoticons.index(wordp)
wordsAndEmoticons.remove(wordp)
wordsAndEmoticons.insert(i, word)
x2 = wordsAndEmoticons.count(wordp)
# get rid of residual empty items or single letter "words" like 'a' and 'I' from wordsAndEmoticons
for word in wordsAndEmoticons:
if len(word) <= 1:
wordsAndEmoticons.remove(word)
# remove stopwords from [wordsAndEmoticons]
#stopwords = [str(word).strip() for word in open('stopwords.txt')]
#for word in wordsAndEmoticons:
# if word in stopwords:
# wordsAndEmoticons.remove(word)
# check for negation
negate = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
def negated(list, nWords=[], includeNT=True):
nWords.extend(negate)
for word in nWords:
if word in list:
return True
if includeNT:
for word in list:
if "n't" in word:
return True
if "least" in list:
i = list.index("least")
if i > 0 and list[i-1] != "at":
return True
return False
def normalize(score, alpha=15):
# normalize the score to be between -1 and 1 using an alpha that approximates the max expected value
normScore = score/math.sqrt( ((score*score) + alpha) )
return normScore
def wildCardMatch(patternWithWildcard, listOfStringsToMatchAgainst):
listOfMatches = fnmatch.filter(listOfStringsToMatchAgainst, patternWithWildcard)
return listOfMatches
def isALLCAP_differential(wordList):
countALLCAPS= 0
for w in wordList:
if str(w).isupper():
countALLCAPS += 1
cap_differential = len(wordList) - countALLCAPS
if cap_differential > 0 and cap_differential < len(wordList):
isDiff = True
else: isDiff = False
return isDiff
isCap_diff = isALLCAP_differential(wordsAndEmoticons)
b_incr = 0.293 #(empirically derived mean sentiment intensity rating increase for booster words)
b_decr = -0.293
# booster/dampener 'intensifiers' or 'degree adverbs' http://en.wiktionary.org/wiki/Category:English_degree_adverbs
booster_dict = {"absolutely": b_incr, "amazingly": b_incr, "awfully": b_incr, "completely": b_incr, "considerably": b_incr,
"decidedly": b_incr, "deeply": b_incr, "effing": b_incr, "enormously": b_incr,
"entirely": b_incr, "especially": b_incr, "exceptionally": b_incr, "extremely": b_incr,
"fabulously": b_incr, "flipping": b_incr, "flippin": b_incr,
"fricking": b_incr, "frickin": b_incr, "frigging": b_incr, "friggin": b_incr, "fully": b_incr, "fucking": b_incr,
"greatly": b_incr, "hella": b_incr, "highly": b_incr, "hugely": b_incr, "incredibly": b_incr,
"intensely": b_incr, "majorly": b_incr, "more": b_incr, "most": b_incr, "particularly": b_incr,
"purely": b_incr, "quite": b_incr, "really": b_incr, "remarkably": b_incr,
"so": b_incr, "substantially": b_incr,
"thoroughly": b_incr, "totally": b_incr, "tremendously": b_incr,
"uber": b_incr, "unbelievably": b_incr, "unusually": b_incr, "utterly": b_incr,
"very": b_incr,
"almost": b_decr, "barely": b_decr, "hardly": b_decr, "just enough": b_decr,
"kind of": b_decr, "kinda": b_decr, "kindof": b_decr, "kind-of": b_decr,
"less": b_decr, "little": b_decr, "marginally": b_decr, "occasionally": b_decr, "partly": b_decr,
"scarcely": b_decr, "slightly": b_decr, "somewhat": b_decr,
"sort of": b_decr, "sorta": b_decr, "sortof": b_decr, "sort-of": b_decr}
sentiments = []
for item in wordsAndEmoticons:
v = 0
i = wordsAndEmoticons.index(item)
if (i < len(wordsAndEmoticons)-1 and str(item).lower() == "kind" and \
str(wordsAndEmoticons[i+1]).lower() == "of") or str(item).lower() in booster_dict:
sentiments.append(v)
continue
item_lowercase = str(item).lower()
if item_lowercase in sentiment.valence_dict:
#get the sentiment valence
v = float(sentiment.valence_dict[item_lowercase])
#check if sentiment laden word is in ALLCAPS (while others aren't)
c_incr = 0.733 #(empirically derived mean sentiment intensity rating increase for using ALLCAPs to emphasize a word)
if str(item).isupper() and isCap_diff:
if v > 0: v += c_incr
else: v -= c_incr
#check if the preceding words increase, decrease, or negate/nullify the valence
def scalar_inc_dec(word, valence):
scalar = 0.0
word_lower = str(word).lower()
if word_lower in booster_dict:
scalar = booster_dict[word_lower]
if valence < 0: scalar *= -1
#check if booster/dampener word is in ALLCAPS (while others aren't)
if str(word).isupper() and isCap_diff:
if valence > 0: scalar += c_incr
else: scalar -= c_incr
return scalar
n_scalar = -0.74
if i > 0 and str(wordsAndEmoticons[i-1]).lower() not in sentiment.valence_dict:
s1 = scalar_inc_dec(wordsAndEmoticons[i-1], v)
v = v+s1
if negated([wordsAndEmoticons[i-1]]): v = v*n_scalar
if i > 1 and str(wordsAndEmoticons[i-2]).lower() not in sentiment.valence_dict:
s2 = scalar_inc_dec(wordsAndEmoticons[i-2], v)
if s2 != 0: s2 = s2*0.95
v = v+s2
# check for special use of 'never' as valence modifier instead of negation
if wordsAndEmoticons[i-2] == "never" and (wordsAndEmoticons[i-1] == "so" or wordsAndEmoticons[i-1] == "this"):
v = v*1.5
# otherwise, check for negation/nullification
elif negated([wordsAndEmoticons[i-2]]): v = v*n_scalar
if i > 2 and str(wordsAndEmoticons[i-3]).lower() not in sentiment.valence_dict:
s3 = scalar_inc_dec(wordsAndEmoticons[i-3], v)
if s3 != 0: s3 = s3*0.9
v = v+s3
# check for special use of 'never' as valence modifier instead of negation
if wordsAndEmoticons[i-3] == "never" and \
(wordsAndEmoticons[i-2] == "so" or wordsAndEmoticons[i-2] == "this") or \
(wordsAndEmoticons[i-1] == "so" or wordsAndEmoticons[i-1] == "this"):
v = v*1.25
# otherwise, check for negation/nullification
elif negated([wordsAndEmoticons[i-3]]): v = v*n_scalar
# check for special case idioms using a sentiment-laden keyword known to SAGE
special_case_idioms = {"the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2,
"cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2}
# future work: consider other sentiment-laden idioms
#other_idioms = {"back handed": -2, "blow smoke": -2, "blowing smoke": -2, "upper hand": 1, "break a leg": 2,
# "cooking with gas": 2, "in the black": 2, "in the red": -2, "on the ball": 2,"under the weather": -2}
onezero = "{} {}".format(str(wordsAndEmoticons[i-1]), str(wordsAndEmoticons[i]))
twoonezero = "{} {}".format(str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]), str(wordsAndEmoticons[i]))
twoone = "{} {}".format(str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]))
threetwoone = "{} {} {}".format(str(wordsAndEmoticons[i-3]), str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]))
threetwo = "{} {}".format(str(wordsAndEmoticons[i-3]), str(wordsAndEmoticons[i-2]))
if onezero in special_case_idioms: v = special_case_idioms[onezero]
elif twoonezero in special_case_idioms: v = special_case_idioms[twoonezero]
elif twoone in special_case_idioms: v = special_case_idioms[twoone]
elif threetwoone in special_case_idioms: v = special_case_idioms[threetwoone]
elif threetwo in special_case_idioms: v = special_case_idioms[threetwo]
if len(wordsAndEmoticons)-1 > i:
zeroone = "{} {}".format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i+1]))
if zeroone in special_case_idioms: v = special_case_idioms[zeroone]
if len(wordsAndEmoticons)-1 > i+1:
zeroonetwo = "{} {}".format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i+1]), str(wordsAndEmoticons[i+2]))
if zeroonetwo in special_case_idioms: v = special_case_idioms[zeroonetwo]
# check for booster/dampener bi-grams such as 'sort of' or 'kind of'
if threetwo in booster_dict or twoone in booster_dict:
v = v+b_decr
# check for negation case using "least"
if i > 1 and str(wordsAndEmoticons[i-1]).lower() not in sentiment.valence_dict \
and str(wordsAndEmoticons[i-1]).lower() == "least":
if (str(wordsAndEmoticons[i-2]).lower() != "at" and str(wordsAndEmoticons[i-2]).lower() != "very"):
v = v*n_scalar
elif i > 0 and str(wordsAndEmoticons[i-1]).lower() not in sentiment.valence_dict \
and str(wordsAndEmoticons[i-1]).lower() == "least":
v = v*n_scalar
sentiments.append(v)
# check for modification in sentiment due to contrastive conjunction 'but'
if 'but' in wordsAndEmoticons or 'BUT' in wordsAndEmoticons:
try: bi = wordsAndEmoticons.index('but')
except: bi = wordsAndEmoticons.index('BUT')
for s in sentiments:
si = sentiments.index(s)
if si < bi:
sentiments.pop(si)
sentiments.insert(si, s*0.5)
elif si > bi:
sentiments.pop(si)
sentiments.insert(si, s*1.5)
if sentiments:
sum_s = float(sum(sentiments))
#print sentiments, sum_s
# check for added emphasis resulting from exclamation points (up to 4 of them)
ep_count = str(text).count("!")
if ep_count > 4: ep_count = 4
ep_amplifier = ep_count*0.292 #(empirically derived mean sentiment intensity rating increase for exclamation points)
if sum_s > 0: sum_s += ep_amplifier
elif sum_s < 0: sum_s -= ep_amplifier
# check for added emphasis resulting from question marks (2 or 3+)
qm_count = str(text).count("?")
qm_amplifier = 0
if qm_count > 1:
if qm_count <= 3: qm_amplifier = qm_count*0.18
else: qm_amplifier = 0.96
if sum_s > 0: sum_s += qm_amplifier
elif sum_s < 0: sum_s -= qm_amplifier
compound = normalize(sum_s)
# want separate positive versus negative sentiment scores
pos_sum = 0.0
neg_sum = 0.0
neu_count = 0
for sentiment_score in sentiments:
if sentiment_score > 0:
pos_sum += (float(sentiment_score) +1) # compensates for neutral words that are counted as 1
if sentiment_score < 0:
neg_sum += (float(sentiment_score) -1) # when used with math.fabs(), compensates for neutrals
if sentiment_score == 0:
neu_count += 1
if pos_sum > math.fabs(neg_sum): pos_sum += (ep_amplifier+qm_amplifier)
elif pos_sum < math.fabs(neg_sum): neg_sum -= (ep_amplifier+qm_amplifier)
total = pos_sum + math.fabs(neg_sum) + neu_count
pos = math.fabs(pos_sum / total)
neg = math.fabs(neg_sum / total)
neu = math.fabs(neu_count / total)
else:
compound, pos, neg, neu = 0., 0., 0., 0.
s = {"neg" : round(neg, 3),
"neu" : round(neu, 3),
"pos" : round(pos, 3),
"compound" : round(compound, 4)}
return s | def function[sentiment, parameter[text]]:
constant[
Returns a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative valence.
]
name[sentiment].valence_dict assign[=] <ast.IfExp object at 0x7da18dc07b50>
variable[wordsAndEmoticons] assign[=] call[call[name[str], parameter[name[text]]].split, parameter[]]
variable[text_mod] assign[=] call[name[regex_remove_punctuation].sub, parameter[constant[], name[text]]]
variable[wordsOnly] assign[=] call[call[name[str], parameter[name[text_mod]]].split, parameter[]]
for taget[name[word]] in starred[name[wordsOnly]] begin[:]
if compare[call[name[len], parameter[name[word]]] less_or_equal[<=] constant[1]] begin[:]
call[name[wordsOnly].remove, parameter[name[word]]]
variable[puncList] assign[=] list[[<ast.Constant object at 0x7da18dc07970>, <ast.Constant object at 0x7da18dc06140>, <ast.Constant object at 0x7da18dc07a00>, <ast.Constant object at 0x7da18dc04790>, <ast.Constant object at 0x7da18dc07790>, <ast.Constant object at 0x7da18dc07580>, <ast.Constant object at 0x7da18dc046a0>, <ast.Constant object at 0x7da18dc04dc0>, <ast.Constant object at 0x7da18dc07c70>, <ast.Constant object at 0x7da18dc04340>, <ast.Constant object at 0x7da18dc05c00>, <ast.Constant object at 0x7da18dc06530>, <ast.Constant object at 0x7da18dc05450>, <ast.Constant object at 0x7da18dc06080>, <ast.Constant object at 0x7da18dc07bb0>, <ast.Constant object at 0x7da18dc071c0>, <ast.Constant object at 0x7da18dc06a70>]]
for taget[name[word]] in starred[name[wordsOnly]] begin[:]
for taget[name[p]] in starred[name[puncList]] begin[:]
variable[pword] assign[=] binary_operation[name[p] + name[word]]
variable[x1] assign[=] call[name[wordsAndEmoticons].count, parameter[name[pword]]]
while compare[name[x1] greater[>] constant[0]] begin[:]
variable[i] assign[=] call[name[wordsAndEmoticons].index, parameter[name[pword]]]
call[name[wordsAndEmoticons].remove, parameter[name[pword]]]
call[name[wordsAndEmoticons].insert, parameter[name[i], name[word]]]
variable[x1] assign[=] call[name[wordsAndEmoticons].count, parameter[name[pword]]]
variable[wordp] assign[=] binary_operation[name[word] + name[p]]
variable[x2] assign[=] call[name[wordsAndEmoticons].count, parameter[name[wordp]]]
while compare[name[x2] greater[>] constant[0]] begin[:]
variable[i] assign[=] call[name[wordsAndEmoticons].index, parameter[name[wordp]]]
call[name[wordsAndEmoticons].remove, parameter[name[wordp]]]
call[name[wordsAndEmoticons].insert, parameter[name[i], name[word]]]
variable[x2] assign[=] call[name[wordsAndEmoticons].count, parameter[name[wordp]]]
for taget[name[word]] in starred[name[wordsAndEmoticons]] begin[:]
if compare[call[name[len], parameter[name[word]]] less_or_equal[<=] constant[1]] begin[:]
call[name[wordsAndEmoticons].remove, parameter[name[word]]]
variable[negate] assign[=] list[[<ast.Constant object at 0x7da18dc05180>, <ast.Constant object at 0x7da18dc04b20>, <ast.Constant object at 0x7da18dc07430>, <ast.Constant object at 0x7da18dc05750>, <ast.Constant object at 0x7da18dc05b70>, <ast.Constant object at 0x7da18dc054e0>, <ast.Constant object at 0x7da18dc07af0>, <ast.Constant object at 0x7da18dc04af0>, <ast.Constant object at 0x7da18dc05a80>, <ast.Constant object at 0x7da18dc075b0>, <ast.Constant object at 0x7da18dc079a0>, <ast.Constant object at 0x7da18dc05ea0>, <ast.Constant object at 0x7da18dc064a0>, <ast.Constant object at 0x7da18dc04e20>, <ast.Constant object at 0x7da18dc07eb0>, <ast.Constant object at 0x7da18dc04910>, <ast.Constant object at 0x7da18dc05150>, <ast.Constant object at 0x7da18dc05fc0>, <ast.Constant object at 0x7da18dc047c0>, <ast.Constant object at 0x7da18dc070d0>, <ast.Constant object at 0x7da18dc05030>, <ast.Constant object at 0x7da18dc06c20>, <ast.Constant object at 0x7da18dc05540>, <ast.Constant object at 0x7da18dc05420>, <ast.Constant object at 0x7da18dc04160>, <ast.Constant object at 0x7da18dc07610>, <ast.Constant object at 0x7da18dc07e50>, <ast.Constant object at 0x7da18dc07280>, <ast.Constant object at 0x7da18dc042b0>, <ast.Constant object at 0x7da18dc06d10>, <ast.Constant object at 0x7da18dc058d0>, <ast.Constant object at 0x7da18dc04fa0>, <ast.Constant object at 0x7da18dc04610>, <ast.Constant object at 0x7da18dc07ee0>, <ast.Constant object at 0x7da18dc05ff0>, <ast.Constant object at 0x7da18dc077f0>, <ast.Constant object at 0x7da18dc07100>, <ast.Constant object at 0x7da18dc05780>, <ast.Constant object at 0x7da18dc04640>, <ast.Constant object at 0x7da18dc04c70>, <ast.Constant object at 0x7da18dc07ac0>, <ast.Constant object at 0x7da18dc05b10>, <ast.Constant object at 0x7da18dc04cd0>, <ast.Constant object at 0x7da18dc07f70>, <ast.Constant object at 0x7da18dc05570>, <ast.Constant object at 0x7da18dc051b0>, <ast.Constant object at 0x7da18dc05ae0>, <ast.Constant object at 0x7da18dc05ba0>, <ast.Constant object at 0x7da18dc06f50>, <ast.Constant object at 0x7da18dc069e0>, <ast.Constant object at 0x7da18dc06830>, <ast.Constant object at 0x7da18dc05720>, <ast.Constant object at 0x7da18dc04670>, <ast.Constant object at 0x7da18dc06bc0>, <ast.Constant object at 0x7da18dc062c0>, <ast.Constant object at 0x7da18dc065f0>, <ast.Constant object at 0x7da18dc046d0>, <ast.Constant object at 0x7da18dc04130>, <ast.Constant object at 0x7da18dc06a40>]]
def function[negated, parameter[list, nWords, includeNT]]:
call[name[nWords].extend, parameter[name[negate]]]
for taget[name[word]] in starred[name[nWords]] begin[:]
if compare[name[word] in name[list]] begin[:]
return[constant[True]]
if name[includeNT] begin[:]
for taget[name[word]] in starred[name[list]] begin[:]
if compare[constant[n't] in name[word]] begin[:]
return[constant[True]]
if compare[constant[least] in name[list]] begin[:]
variable[i] assign[=] call[name[list].index, parameter[constant[least]]]
if <ast.BoolOp object at 0x7da18dc05d80> begin[:]
return[constant[True]]
return[constant[False]]
def function[normalize, parameter[score, alpha]]:
variable[normScore] assign[=] binary_operation[name[score] / call[name[math].sqrt, parameter[binary_operation[binary_operation[name[score] * name[score]] + name[alpha]]]]]
return[name[normScore]]
def function[wildCardMatch, parameter[patternWithWildcard, listOfStringsToMatchAgainst]]:
variable[listOfMatches] assign[=] call[name[fnmatch].filter, parameter[name[listOfStringsToMatchAgainst], name[patternWithWildcard]]]
return[name[listOfMatches]]
def function[isALLCAP_differential, parameter[wordList]]:
variable[countALLCAPS] assign[=] constant[0]
for taget[name[w]] in starred[name[wordList]] begin[:]
if call[call[name[str], parameter[name[w]]].isupper, parameter[]] begin[:]
<ast.AugAssign object at 0x7da18fe90d60>
variable[cap_differential] assign[=] binary_operation[call[name[len], parameter[name[wordList]]] - name[countALLCAPS]]
if <ast.BoolOp object at 0x7da18fe91720> begin[:]
variable[isDiff] assign[=] constant[True]
return[name[isDiff]]
variable[isCap_diff] assign[=] call[name[isALLCAP_differential], parameter[name[wordsAndEmoticons]]]
variable[b_incr] assign[=] constant[0.293]
variable[b_decr] assign[=] <ast.UnaryOp object at 0x7da18fe91a20>
variable[booster_dict] assign[=] dictionary[[<ast.Constant object at 0x7da18fe92ce0>, <ast.Constant object at 0x7da18fe92b90>, <ast.Constant object at 0x7da18fe92d10>, <ast.Constant object at 0x7da18fe917b0>, <ast.Constant object at 0x7da18fe91420>, <ast.Constant object at 0x7da18fe91d50>, <ast.Constant object at 0x7da18fe91900>, <ast.Constant object at 0x7da18fe90160>, <ast.Constant object at 0x7da18fe902b0>, <ast.Constant object at 0x7da18fe90040>, <ast.Constant object at 0x7da18fe92260>, <ast.Constant object at 0x7da18fe93a30>, <ast.Constant object at 0x7da18fe90190>, <ast.Constant object at 0x7da18fe90730>, <ast.Constant object at 0x7da18fe91870>, <ast.Constant object at 0x7da18fe90490>, <ast.Constant object at 0x7da18fe90a00>, <ast.Constant object at 0x7da18fe92500>, <ast.Constant object at 0x7da18fe91990>, <ast.Constant object at 0x7da18fe91cc0>, <ast.Constant object at 0x7da18fe90100>, <ast.Constant object at 0x7da18fe91090>, <ast.Constant object at 0x7da18fe918a0>, <ast.Constant object at 0x7da18fe91ed0>, <ast.Constant object at 0x7da18fe91120>, <ast.Constant object at 0x7da18fe92680>, <ast.Constant object at 0x7da18fe90d90>, <ast.Constant object at 0x7da18fe93ee0>, <ast.Constant object at 0x7da18fe93310>, <ast.Constant object at 0x7da18fe935b0>, <ast.Constant object at 0x7da18fe90ca0>, <ast.Constant object at 0x7da18fe90430>, <ast.Constant object at 0x7da18fe928f0>, <ast.Constant object at 0x7da18fe90520>, <ast.Constant object at 0x7da18fe93b80>, <ast.Constant object at 0x7da18fe915a0>, <ast.Constant object at 0x7da18fe93b20>, <ast.Constant object at 0x7da18fe91d20>, <ast.Constant object at 0x7da18fe91de0>, <ast.Constant object at 0x7da18fe91a50>, <ast.Constant object at 0x7da18fe92440>, <ast.Constant object at 0x7da18fe90d30>, <ast.Constant object at 0x7da18fe919c0>, <ast.Constant object at 0x7da18fe907c0>, <ast.Constant object at 0x7da18fe93be0>, <ast.Constant object at 0x7da18fe93c70>, <ast.Constant object at 0x7da18fe916c0>, <ast.Constant object at 0x7da18fe90a30>, <ast.Constant object at 0x7da18fe93070>, <ast.Constant object at 0x7da18fe933d0>, <ast.Constant object at 0x7da18fe916f0>, <ast.Constant object at 0x7da18fe92410>, <ast.Constant object at 0x7da18fe93940>, <ast.Constant object at 0x7da18fe93130>, <ast.Constant object at 0x7da18fe93fd0>, <ast.Constant object at 0x7da18fe93670>, <ast.Constant object at 0x7da18fe90370>, <ast.Constant object at 0x7da18fe923b0>, <ast.Constant object at 0x7da18fe91f30>, <ast.Constant object at 0x7da18fe90a90>, <ast.Constant object at 0x7da18fe924a0>, <ast.Constant object at 0x7da18fe92a10>, <ast.Constant object at 0x7da18fe92c20>, <ast.Constant object at 0x7da18fe906d0>, <ast.Constant object at 0x7da18fe913f0>, <ast.Constant object at 0x7da18fe918d0>], [<ast.Name object at 0x7da18fe90580>, <ast.Name object at 0x7da18fe92c80>, <ast.Name object at 0x7da18fe90ac0>, <ast.Name object at 0x7da18fe93a60>, <ast.Name object at 0x7da18fe919f0>, <ast.Name object at 0x7da18fe93220>, <ast.Name object at 0x7da18fe90790>, <ast.Name object at 0x7da18fe912a0>, <ast.Name object at 0x7da18fe917e0>, <ast.Name object at 0x7da18fe91840>, <ast.Name object at 0x7da18fe932b0>, <ast.Name object at 0x7da18fe93550>, <ast.Name object at 0x7da18fe920e0>, <ast.Name object at 0x7da18fe936a0>, <ast.Name object at 0x7da18fe93d00>, <ast.Name object at 0x7da18fe90850>, <ast.Name object at 0x7da18fe90b80>, <ast.Name object at 0x7da18fe91750>, <ast.Name object at 0x7da18fe909d0>, <ast.Name object at 0x7da18fe90cd0>, <ast.Name object at 0x7da18fe91630>, <ast.Name object at 0x7da18fe91f90>, <ast.Name object at 0x7da18fe93f40>, <ast.Name object at 0x7da18fe92770>, <ast.Name object at 0x7da18fe91030>, <ast.Name object at 0x7da18fe937f0>, <ast.Name object at 0x7da18fe91450>, <ast.Name object at 0x7da18fe90460>, <ast.Name object at 0x7da18fe93640>, <ast.Name object at 0x7da18fe935e0>, <ast.Name object at 0x7da18fe930d0>, <ast.Name object at 0x7da18fe91fc0>, <ast.Name object at 0x7da18fe91ab0>, <ast.Name object at 0x7da18fe93040>, <ast.Name object at 0x7da18fe90070>, <ast.Name object at 0x7da18fe93b50>, <ast.Name object at 0x7da18fe928c0>, <ast.Name object at 0x7da18fe91b70>, <ast.Name object at 0x7da18fe934c0>, <ast.Name object at 0x7da18fe90ee0>, <ast.Name object at 0x7da18fe93f10>, <ast.Name object at 0x7da18fe92110>, <ast.Name object at 0x7da18fe90c70>, <ast.Name object at 0x7da18fe93af0>, <ast.Name object at 0x7da18fe914e0>, <ast.Name object at 0x7da18fe92080>, <ast.Name object at 0x7da18fe938e0>, <ast.Name object at 0x7da18fe91a80>, <ast.Name object at 0x7da18fe90130>, <ast.Name object at 0x7da18fe931c0>, <ast.Name object at 0x7da18fe90fd0>, <ast.Name object at 0x7da18fe93fa0>, <ast.Name object at 0x7da18fe922c0>, <ast.Name object at 0x7da18fe90c10>, <ast.Name object at 0x7da18fe93850>, <ast.Name object at 0x7da18fe909a0>, <ast.Name object at 0x7da18fe91780>, <ast.Name object at 0x7da18fe90760>, <ast.Name object at 0x7da18fe93250>, <ast.Name object at 0x7da18fe92ad0>, <ast.Name object at 0x7da18fe926e0>, <ast.Name object at 0x7da18fe93970>, <ast.Name object at 0x7da18fe92bc0>, <ast.Name object at 0x7da18fe91540>, <ast.Name object at 0x7da18fe91e40>, <ast.Name object at 0x7da18fe911b0>]]
variable[sentiments] assign[=] list[[]]
for taget[name[item]] in starred[name[wordsAndEmoticons]] begin[:]
variable[v] assign[=] constant[0]
variable[i] assign[=] call[name[wordsAndEmoticons].index, parameter[name[item]]]
if <ast.BoolOp object at 0x7da18fe90e20> begin[:]
call[name[sentiments].append, parameter[name[v]]]
continue
variable[item_lowercase] assign[=] call[call[name[str], parameter[name[item]]].lower, parameter[]]
if compare[name[item_lowercase] in name[sentiment].valence_dict] begin[:]
variable[v] assign[=] call[name[float], parameter[call[name[sentiment].valence_dict][name[item_lowercase]]]]
variable[c_incr] assign[=] constant[0.733]
if <ast.BoolOp object at 0x7da18fe92d70> begin[:]
if compare[name[v] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18fe92320>
def function[scalar_inc_dec, parameter[word, valence]]:
variable[scalar] assign[=] constant[0.0]
variable[word_lower] assign[=] call[call[name[str], parameter[name[word]]].lower, parameter[]]
if compare[name[word_lower] in name[booster_dict]] begin[:]
variable[scalar] assign[=] call[name[booster_dict]][name[word_lower]]
if compare[name[valence] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18fe90df0>
if <ast.BoolOp object at 0x7da18fe93730> begin[:]
if compare[name[valence] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18fe90f70>
return[name[scalar]]
variable[n_scalar] assign[=] <ast.UnaryOp object at 0x7da18f7211e0>
if <ast.BoolOp object at 0x7da18f720e20> begin[:]
variable[s1] assign[=] call[name[scalar_inc_dec], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[1]]], name[v]]]
variable[v] assign[=] binary_operation[name[v] + name[s1]]
if call[name[negated], parameter[list[[<ast.Subscript object at 0x7da18f7224a0>]]]] begin[:]
variable[v] assign[=] binary_operation[name[v] * name[n_scalar]]
if <ast.BoolOp object at 0x7da18f7202b0> begin[:]
variable[s2] assign[=] call[name[scalar_inc_dec], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[2]]], name[v]]]
if compare[name[s2] not_equal[!=] constant[0]] begin[:]
variable[s2] assign[=] binary_operation[name[s2] * constant[0.95]]
variable[v] assign[=] binary_operation[name[v] + name[s2]]
if <ast.BoolOp object at 0x7da18f721ba0> begin[:]
variable[v] assign[=] binary_operation[name[v] * constant[1.5]]
if <ast.BoolOp object at 0x7da18f722230> begin[:]
variable[s3] assign[=] call[name[scalar_inc_dec], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[3]]], name[v]]]
if compare[name[s3] not_equal[!=] constant[0]] begin[:]
variable[s3] assign[=] binary_operation[name[s3] * constant[0.9]]
variable[v] assign[=] binary_operation[name[v] + name[s3]]
if <ast.BoolOp object at 0x7da18f723490> begin[:]
variable[v] assign[=] binary_operation[name[v] * constant[1.25]]
variable[special_case_idioms] assign[=] dictionary[[<ast.Constant object at 0x7da18f720fd0>, <ast.Constant object at 0x7da18f721090>, <ast.Constant object at 0x7da18f7201c0>, <ast.Constant object at 0x7da18f721420>, <ast.Constant object at 0x7da18f7214e0>, <ast.Constant object at 0x7da18f722530>, <ast.Constant object at 0x7da18f723ca0>], [<ast.Constant object at 0x7da18f7202e0>, <ast.Constant object at 0x7da18f721180>, <ast.Constant object at 0x7da18f722c50>, <ast.UnaryOp object at 0x7da18f722cb0>, <ast.Constant object at 0x7da18f722620>, <ast.UnaryOp object at 0x7da18f720100>, <ast.UnaryOp object at 0x7da18f721e40>]]
variable[onezero] assign[=] call[constant[{} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[1]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][name[i]]]]]]
variable[twoonezero] assign[=] call[constant[{} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[2]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[1]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][name[i]]]]]]
variable[twoone] assign[=] call[constant[{} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[2]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[1]]]]]]]
variable[threetwoone] assign[=] call[constant[{} {} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[3]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[2]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[1]]]]]]]
variable[threetwo] assign[=] call[constant[{} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[3]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] - constant[2]]]]]]]
if compare[name[onezero] in name[special_case_idioms]] begin[:]
variable[v] assign[=] call[name[special_case_idioms]][name[onezero]]
if compare[binary_operation[call[name[len], parameter[name[wordsAndEmoticons]]] - constant[1]] greater[>] name[i]] begin[:]
variable[zeroone] assign[=] call[constant[{} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][name[i]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] + constant[1]]]]]]]
if compare[name[zeroone] in name[special_case_idioms]] begin[:]
variable[v] assign[=] call[name[special_case_idioms]][name[zeroone]]
if compare[binary_operation[call[name[len], parameter[name[wordsAndEmoticons]]] - constant[1]] greater[>] binary_operation[name[i] + constant[1]]] begin[:]
variable[zeroonetwo] assign[=] call[constant[{} {}].format, parameter[call[name[str], parameter[call[name[wordsAndEmoticons]][name[i]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] + constant[1]]]]], call[name[str], parameter[call[name[wordsAndEmoticons]][binary_operation[name[i] + constant[2]]]]]]]
if compare[name[zeroonetwo] in name[special_case_idioms]] begin[:]
variable[v] assign[=] call[name[special_case_idioms]][name[zeroonetwo]]
if <ast.BoolOp object at 0x7da18ede5810> begin[:]
variable[v] assign[=] binary_operation[name[v] + name[b_decr]]
if <ast.BoolOp object at 0x7da18ede4550> begin[:]
if <ast.BoolOp object at 0x7da18ede7cd0> begin[:]
variable[v] assign[=] binary_operation[name[v] * name[n_scalar]]
call[name[sentiments].append, parameter[name[v]]]
if <ast.BoolOp object at 0x7da18ede60e0> begin[:]
<ast.Try object at 0x7da18ede4e50>
for taget[name[s]] in starred[name[sentiments]] begin[:]
variable[si] assign[=] call[name[sentiments].index, parameter[name[s]]]
if compare[name[si] less[<] name[bi]] begin[:]
call[name[sentiments].pop, parameter[name[si]]]
call[name[sentiments].insert, parameter[name[si], binary_operation[name[s] * constant[0.5]]]]
if name[sentiments] begin[:]
variable[sum_s] assign[=] call[name[float], parameter[call[name[sum], parameter[name[sentiments]]]]]
variable[ep_count] assign[=] call[call[name[str], parameter[name[text]]].count, parameter[constant[!]]]
if compare[name[ep_count] greater[>] constant[4]] begin[:]
variable[ep_count] assign[=] constant[4]
variable[ep_amplifier] assign[=] binary_operation[name[ep_count] * constant[0.292]]
if compare[name[sum_s] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c796aa0>
variable[qm_count] assign[=] call[call[name[str], parameter[name[text]]].count, parameter[constant[?]]]
variable[qm_amplifier] assign[=] constant[0]
if compare[name[qm_count] greater[>] constant[1]] begin[:]
if compare[name[qm_count] less_or_equal[<=] constant[3]] begin[:]
variable[qm_amplifier] assign[=] binary_operation[name[qm_count] * constant[0.18]]
if compare[name[sum_s] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c794e80>
variable[compound] assign[=] call[name[normalize], parameter[name[sum_s]]]
variable[pos_sum] assign[=] constant[0.0]
variable[neg_sum] assign[=] constant[0.0]
variable[neu_count] assign[=] constant[0]
for taget[name[sentiment_score]] in starred[name[sentiments]] begin[:]
if compare[name[sentiment_score] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c7942b0>
if compare[name[sentiment_score] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c794730>
if compare[name[sentiment_score] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20e9b06a0>
if compare[name[pos_sum] greater[>] call[name[math].fabs, parameter[name[neg_sum]]]] begin[:]
<ast.AugAssign object at 0x7da20e9b20b0>
variable[total] assign[=] binary_operation[binary_operation[name[pos_sum] + call[name[math].fabs, parameter[name[neg_sum]]]] + name[neu_count]]
variable[pos] assign[=] call[name[math].fabs, parameter[binary_operation[name[pos_sum] / name[total]]]]
variable[neg] assign[=] call[name[math].fabs, parameter[binary_operation[name[neg_sum] / name[total]]]]
variable[neu] assign[=] call[name[math].fabs, parameter[binary_operation[name[neu_count] / name[total]]]]
variable[s] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b3070>, <ast.Constant object at 0x7da20e9b1060>, <ast.Constant object at 0x7da20e9b0430>, <ast.Constant object at 0x7da20e9b2b30>], [<ast.Call object at 0x7da20e9b2830>, <ast.Call object at 0x7da20e9b1e70>, <ast.Call object at 0x7da20e9b27d0>, <ast.Call object at 0x7da20e9b16f0>]]
return[name[s]] | keyword[def] identifier[sentiment] ( identifier[text] ):
literal[string]
identifier[sentiment] . identifier[valence_dict] = identifier[load_valence_dict] () keyword[if] identifier[sentiment] . identifier[valence_dict] keyword[is] keyword[None] keyword[else] identifier[sentiment_valence_dict]
identifier[wordsAndEmoticons] = identifier[str] ( identifier[text] ). identifier[split] ()
identifier[text_mod] = identifier[regex_remove_punctuation] . identifier[sub] ( literal[string] , identifier[text] )
identifier[wordsOnly] = identifier[str] ( identifier[text_mod] ). identifier[split] ()
keyword[for] identifier[word] keyword[in] identifier[wordsOnly] :
keyword[if] identifier[len] ( identifier[word] )<= literal[int] :
identifier[wordsOnly] . identifier[remove] ( identifier[word] )
identifier[puncList] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[word] keyword[in] identifier[wordsOnly] :
keyword[for] identifier[p] keyword[in] identifier[puncList] :
identifier[pword] = identifier[p] + identifier[word]
identifier[x1] = identifier[wordsAndEmoticons] . identifier[count] ( identifier[pword] )
keyword[while] identifier[x1] > literal[int] :
identifier[i] = identifier[wordsAndEmoticons] . identifier[index] ( identifier[pword] )
identifier[wordsAndEmoticons] . identifier[remove] ( identifier[pword] )
identifier[wordsAndEmoticons] . identifier[insert] ( identifier[i] , identifier[word] )
identifier[x1] = identifier[wordsAndEmoticons] . identifier[count] ( identifier[pword] )
identifier[wordp] = identifier[word] + identifier[p]
identifier[x2] = identifier[wordsAndEmoticons] . identifier[count] ( identifier[wordp] )
keyword[while] identifier[x2] > literal[int] :
identifier[i] = identifier[wordsAndEmoticons] . identifier[index] ( identifier[wordp] )
identifier[wordsAndEmoticons] . identifier[remove] ( identifier[wordp] )
identifier[wordsAndEmoticons] . identifier[insert] ( identifier[i] , identifier[word] )
identifier[x2] = identifier[wordsAndEmoticons] . identifier[count] ( identifier[wordp] )
keyword[for] identifier[word] keyword[in] identifier[wordsAndEmoticons] :
keyword[if] identifier[len] ( identifier[word] )<= literal[int] :
identifier[wordsAndEmoticons] . identifier[remove] ( identifier[word] )
identifier[negate] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[def] identifier[negated] ( identifier[list] , identifier[nWords] =[], identifier[includeNT] = keyword[True] ):
identifier[nWords] . identifier[extend] ( identifier[negate] )
keyword[for] identifier[word] keyword[in] identifier[nWords] :
keyword[if] identifier[word] keyword[in] identifier[list] :
keyword[return] keyword[True]
keyword[if] identifier[includeNT] :
keyword[for] identifier[word] keyword[in] identifier[list] :
keyword[if] literal[string] keyword[in] identifier[word] :
keyword[return] keyword[True]
keyword[if] literal[string] keyword[in] identifier[list] :
identifier[i] = identifier[list] . identifier[index] ( literal[string] )
keyword[if] identifier[i] > literal[int] keyword[and] identifier[list] [ identifier[i] - literal[int] ]!= literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[def] identifier[normalize] ( identifier[score] , identifier[alpha] = literal[int] ):
identifier[normScore] = identifier[score] / identifier[math] . identifier[sqrt] ((( identifier[score] * identifier[score] )+ identifier[alpha] ))
keyword[return] identifier[normScore]
keyword[def] identifier[wildCardMatch] ( identifier[patternWithWildcard] , identifier[listOfStringsToMatchAgainst] ):
identifier[listOfMatches] = identifier[fnmatch] . identifier[filter] ( identifier[listOfStringsToMatchAgainst] , identifier[patternWithWildcard] )
keyword[return] identifier[listOfMatches]
keyword[def] identifier[isALLCAP_differential] ( identifier[wordList] ):
identifier[countALLCAPS] = literal[int]
keyword[for] identifier[w] keyword[in] identifier[wordList] :
keyword[if] identifier[str] ( identifier[w] ). identifier[isupper] ():
identifier[countALLCAPS] += literal[int]
identifier[cap_differential] = identifier[len] ( identifier[wordList] )- identifier[countALLCAPS]
keyword[if] identifier[cap_differential] > literal[int] keyword[and] identifier[cap_differential] < identifier[len] ( identifier[wordList] ):
identifier[isDiff] = keyword[True]
keyword[else] : identifier[isDiff] = keyword[False]
keyword[return] identifier[isDiff]
identifier[isCap_diff] = identifier[isALLCAP_differential] ( identifier[wordsAndEmoticons] )
identifier[b_incr] = literal[int]
identifier[b_decr] =- literal[int]
identifier[booster_dict] ={ literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] , literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_incr] ,
literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] ,
literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] ,
literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] ,
literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] ,
literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] , literal[string] : identifier[b_decr] }
identifier[sentiments] =[]
keyword[for] identifier[item] keyword[in] identifier[wordsAndEmoticons] :
identifier[v] = literal[int]
identifier[i] = identifier[wordsAndEmoticons] . identifier[index] ( identifier[item] )
keyword[if] ( identifier[i] < identifier[len] ( identifier[wordsAndEmoticons] )- literal[int] keyword[and] identifier[str] ( identifier[item] ). identifier[lower] ()== literal[string] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] + literal[int] ]). identifier[lower] ()== literal[string] ) keyword[or] identifier[str] ( identifier[item] ). identifier[lower] () keyword[in] identifier[booster_dict] :
identifier[sentiments] . identifier[append] ( identifier[v] )
keyword[continue]
identifier[item_lowercase] = identifier[str] ( identifier[item] ). identifier[lower] ()
keyword[if] identifier[item_lowercase] keyword[in] identifier[sentiment] . identifier[valence_dict] :
identifier[v] = identifier[float] ( identifier[sentiment] . identifier[valence_dict] [ identifier[item_lowercase] ])
identifier[c_incr] = literal[int]
keyword[if] identifier[str] ( identifier[item] ). identifier[isupper] () keyword[and] identifier[isCap_diff] :
keyword[if] identifier[v] > literal[int] : identifier[v] += identifier[c_incr]
keyword[else] : identifier[v] -= identifier[c_incr]
keyword[def] identifier[scalar_inc_dec] ( identifier[word] , identifier[valence] ):
identifier[scalar] = literal[int]
identifier[word_lower] = identifier[str] ( identifier[word] ). identifier[lower] ()
keyword[if] identifier[word_lower] keyword[in] identifier[booster_dict] :
identifier[scalar] = identifier[booster_dict] [ identifier[word_lower] ]
keyword[if] identifier[valence] < literal[int] : identifier[scalar] *=- literal[int]
keyword[if] identifier[str] ( identifier[word] ). identifier[isupper] () keyword[and] identifier[isCap_diff] :
keyword[if] identifier[valence] > literal[int] : identifier[scalar] += identifier[c_incr]
keyword[else] : identifier[scalar] -= identifier[c_incr]
keyword[return] identifier[scalar]
identifier[n_scalar] =- literal[int]
keyword[if] identifier[i] > literal[int] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] () keyword[not] keyword[in] identifier[sentiment] . identifier[valence_dict] :
identifier[s1] = identifier[scalar_inc_dec] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ], identifier[v] )
identifier[v] = identifier[v] + identifier[s1]
keyword[if] identifier[negated] ([ identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]]): identifier[v] = identifier[v] * identifier[n_scalar]
keyword[if] identifier[i] > literal[int] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] () keyword[not] keyword[in] identifier[sentiment] . identifier[valence_dict] :
identifier[s2] = identifier[scalar_inc_dec] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ], identifier[v] )
keyword[if] identifier[s2] != literal[int] : identifier[s2] = identifier[s2] * literal[int]
identifier[v] = identifier[v] + identifier[s2]
keyword[if] identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] keyword[and] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] keyword[or] identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] ):
identifier[v] = identifier[v] * literal[int]
keyword[elif] identifier[negated] ([ identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]]): identifier[v] = identifier[v] * identifier[n_scalar]
keyword[if] identifier[i] > literal[int] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] () keyword[not] keyword[in] identifier[sentiment] . identifier[valence_dict] :
identifier[s3] = identifier[scalar_inc_dec] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ], identifier[v] )
keyword[if] identifier[s3] != literal[int] : identifier[s3] = identifier[s3] * literal[int]
identifier[v] = identifier[v] + identifier[s3]
keyword[if] identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] keyword[and] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] keyword[or] identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] ) keyword[or] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] keyword[or] identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]== literal[string] ):
identifier[v] = identifier[v] * literal[int]
keyword[elif] identifier[negated] ([ identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]]): identifier[v] = identifier[v] * identifier[n_scalar]
identifier[special_case_idioms] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] :- literal[int] ,
literal[string] : literal[int] , literal[string] :- literal[int] , literal[string] :- literal[int] }
identifier[onezero] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] ]))
identifier[twoonezero] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] ]))
identifier[twoone] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]))
identifier[threetwoone] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]))
identifier[threetwo] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]))
keyword[if] identifier[onezero] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[onezero] ]
keyword[elif] identifier[twoonezero] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[twoonezero] ]
keyword[elif] identifier[twoone] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[twoone] ]
keyword[elif] identifier[threetwoone] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[threetwoone] ]
keyword[elif] identifier[threetwo] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[threetwo] ]
keyword[if] identifier[len] ( identifier[wordsAndEmoticons] )- literal[int] > identifier[i] :
identifier[zeroone] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] + literal[int] ]))
keyword[if] identifier[zeroone] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[zeroone] ]
keyword[if] identifier[len] ( identifier[wordsAndEmoticons] )- literal[int] > identifier[i] + literal[int] :
identifier[zeroonetwo] = literal[string] . identifier[format] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] + literal[int] ]), identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] + literal[int] ]))
keyword[if] identifier[zeroonetwo] keyword[in] identifier[special_case_idioms] : identifier[v] = identifier[special_case_idioms] [ identifier[zeroonetwo] ]
keyword[if] identifier[threetwo] keyword[in] identifier[booster_dict] keyword[or] identifier[twoone] keyword[in] identifier[booster_dict] :
identifier[v] = identifier[v] + identifier[b_decr]
keyword[if] identifier[i] > literal[int] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] () keyword[not] keyword[in] identifier[sentiment] . identifier[valence_dict] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] ()== literal[string] :
keyword[if] ( identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] ()!= literal[string] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] ()!= literal[string] ):
identifier[v] = identifier[v] * identifier[n_scalar]
keyword[elif] identifier[i] > literal[int] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] () keyword[not] keyword[in] identifier[sentiment] . identifier[valence_dict] keyword[and] identifier[str] ( identifier[wordsAndEmoticons] [ identifier[i] - literal[int] ]). identifier[lower] ()== literal[string] :
identifier[v] = identifier[v] * identifier[n_scalar]
identifier[sentiments] . identifier[append] ( identifier[v] )
keyword[if] literal[string] keyword[in] identifier[wordsAndEmoticons] keyword[or] literal[string] keyword[in] identifier[wordsAndEmoticons] :
keyword[try] : identifier[bi] = identifier[wordsAndEmoticons] . identifier[index] ( literal[string] )
keyword[except] : identifier[bi] = identifier[wordsAndEmoticons] . identifier[index] ( literal[string] )
keyword[for] identifier[s] keyword[in] identifier[sentiments] :
identifier[si] = identifier[sentiments] . identifier[index] ( identifier[s] )
keyword[if] identifier[si] < identifier[bi] :
identifier[sentiments] . identifier[pop] ( identifier[si] )
identifier[sentiments] . identifier[insert] ( identifier[si] , identifier[s] * literal[int] )
keyword[elif] identifier[si] > identifier[bi] :
identifier[sentiments] . identifier[pop] ( identifier[si] )
identifier[sentiments] . identifier[insert] ( identifier[si] , identifier[s] * literal[int] )
keyword[if] identifier[sentiments] :
identifier[sum_s] = identifier[float] ( identifier[sum] ( identifier[sentiments] ))
identifier[ep_count] = identifier[str] ( identifier[text] ). identifier[count] ( literal[string] )
keyword[if] identifier[ep_count] > literal[int] : identifier[ep_count] = literal[int]
identifier[ep_amplifier] = identifier[ep_count] * literal[int]
keyword[if] identifier[sum_s] > literal[int] : identifier[sum_s] += identifier[ep_amplifier]
keyword[elif] identifier[sum_s] < literal[int] : identifier[sum_s] -= identifier[ep_amplifier]
identifier[qm_count] = identifier[str] ( identifier[text] ). identifier[count] ( literal[string] )
identifier[qm_amplifier] = literal[int]
keyword[if] identifier[qm_count] > literal[int] :
keyword[if] identifier[qm_count] <= literal[int] : identifier[qm_amplifier] = identifier[qm_count] * literal[int]
keyword[else] : identifier[qm_amplifier] = literal[int]
keyword[if] identifier[sum_s] > literal[int] : identifier[sum_s] += identifier[qm_amplifier]
keyword[elif] identifier[sum_s] < literal[int] : identifier[sum_s] -= identifier[qm_amplifier]
identifier[compound] = identifier[normalize] ( identifier[sum_s] )
identifier[pos_sum] = literal[int]
identifier[neg_sum] = literal[int]
identifier[neu_count] = literal[int]
keyword[for] identifier[sentiment_score] keyword[in] identifier[sentiments] :
keyword[if] identifier[sentiment_score] > literal[int] :
identifier[pos_sum] +=( identifier[float] ( identifier[sentiment_score] )+ literal[int] )
keyword[if] identifier[sentiment_score] < literal[int] :
identifier[neg_sum] +=( identifier[float] ( identifier[sentiment_score] )- literal[int] )
keyword[if] identifier[sentiment_score] == literal[int] :
identifier[neu_count] += literal[int]
keyword[if] identifier[pos_sum] > identifier[math] . identifier[fabs] ( identifier[neg_sum] ): identifier[pos_sum] +=( identifier[ep_amplifier] + identifier[qm_amplifier] )
keyword[elif] identifier[pos_sum] < identifier[math] . identifier[fabs] ( identifier[neg_sum] ): identifier[neg_sum] -=( identifier[ep_amplifier] + identifier[qm_amplifier] )
identifier[total] = identifier[pos_sum] + identifier[math] . identifier[fabs] ( identifier[neg_sum] )+ identifier[neu_count]
identifier[pos] = identifier[math] . identifier[fabs] ( identifier[pos_sum] / identifier[total] )
identifier[neg] = identifier[math] . identifier[fabs] ( identifier[neg_sum] / identifier[total] )
identifier[neu] = identifier[math] . identifier[fabs] ( identifier[neu_count] / identifier[total] )
keyword[else] :
identifier[compound] , identifier[pos] , identifier[neg] , identifier[neu] = literal[int] , literal[int] , literal[int] , literal[int]
identifier[s] ={ literal[string] : identifier[round] ( identifier[neg] , literal[int] ),
literal[string] : identifier[round] ( identifier[neu] , literal[int] ),
literal[string] : identifier[round] ( identifier[pos] , literal[int] ),
literal[string] : identifier[round] ( identifier[compound] , literal[int] )}
keyword[return] identifier[s] | def sentiment(text):
"""
Returns a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative valence.
"""
sentiment.valence_dict = load_valence_dict() if sentiment.valence_dict is None else sentiment_valence_dict
wordsAndEmoticons = str(text).split() #doesn't separate words from adjacent punctuation (keeps emoticons & contractions)
text_mod = regex_remove_punctuation.sub('', text) # removes punctuation (but loses emoticons & contractions)
wordsOnly = str(text_mod).split() # get rid of empty items or single letter "words" like 'a' and 'I' from wordsOnly
for word in wordsOnly:
if len(word) <= 1:
wordsOnly.remove(word) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # now remove adjacent & redundant punctuation from [wordsAndEmoticons] while keeping emoticons and contractions
puncList = ['.', '!', '?', ',', ';', ':', '-', "'", '"', '!!', '!!!', '??', '???', '?!?', '!?!', '?!?!', '!?!?']
for word in wordsOnly:
for p in puncList:
pword = p + word
x1 = wordsAndEmoticons.count(pword)
while x1 > 0:
i = wordsAndEmoticons.index(pword)
wordsAndEmoticons.remove(pword)
wordsAndEmoticons.insert(i, word)
x1 = wordsAndEmoticons.count(pword) # depends on [control=['while'], data=['x1']]
wordp = word + p
x2 = wordsAndEmoticons.count(wordp)
while x2 > 0:
i = wordsAndEmoticons.index(wordp)
wordsAndEmoticons.remove(wordp)
wordsAndEmoticons.insert(i, word)
x2 = wordsAndEmoticons.count(wordp) # depends on [control=['while'], data=['x2']] # depends on [control=['for'], data=['p']] # depends on [control=['for'], data=['word']] # get rid of residual empty items or single letter "words" like 'a' and 'I' from wordsAndEmoticons
for word in wordsAndEmoticons:
if len(word) <= 1:
wordsAndEmoticons.remove(word) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # remove stopwords from [wordsAndEmoticons]
#stopwords = [str(word).strip() for word in open('stopwords.txt')]
#for word in wordsAndEmoticons:
# if word in stopwords:
# wordsAndEmoticons.remove(word)
# check for negation
negate = ['aint', 'arent', 'cannot', 'cant', 'couldnt', 'darent', 'didnt', 'doesnt', "ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't", 'dont', 'hadnt', 'hasnt', 'havent', 'isnt', 'mightnt', 'mustnt', 'neither', "don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't", 'neednt', "needn't", 'never', 'none', 'nope', 'nor', 'not', 'nothing', 'nowhere', 'oughtnt', 'shant', 'shouldnt', 'uhuh', 'wasnt', 'werent', "oughtn't", "shan't", "shouldn't", 'uh-uh', "wasn't", "weren't", 'without', 'wont', 'wouldnt', "won't", "wouldn't", 'rarely', 'seldom', 'despite']
def negated(list, nWords=[], includeNT=True):
nWords.extend(negate)
for word in nWords:
if word in list:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']]
if includeNT:
for word in list:
if "n't" in word:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]]
if 'least' in list:
i = list.index('least')
if i > 0 and list[i - 1] != 'at':
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['list']]
return False
def normalize(score, alpha=15): # normalize the score to be between -1 and 1 using an alpha that approximates the max expected value
normScore = score / math.sqrt(score * score + alpha)
return normScore
def wildCardMatch(patternWithWildcard, listOfStringsToMatchAgainst):
listOfMatches = fnmatch.filter(listOfStringsToMatchAgainst, patternWithWildcard)
return listOfMatches
def isALLCAP_differential(wordList):
countALLCAPS = 0
for w in wordList:
if str(w).isupper():
countALLCAPS += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']]
cap_differential = len(wordList) - countALLCAPS
if cap_differential > 0 and cap_differential < len(wordList):
isDiff = True # depends on [control=['if'], data=[]]
else:
isDiff = False
return isDiff
isCap_diff = isALLCAP_differential(wordsAndEmoticons)
b_incr = 0.293 #(empirically derived mean sentiment intensity rating increase for booster words)
b_decr = -0.293 # booster/dampener 'intensifiers' or 'degree adverbs' http://en.wiktionary.org/wiki/Category:English_degree_adverbs
booster_dict = {'absolutely': b_incr, 'amazingly': b_incr, 'awfully': b_incr, 'completely': b_incr, 'considerably': b_incr, 'decidedly': b_incr, 'deeply': b_incr, 'effing': b_incr, 'enormously': b_incr, 'entirely': b_incr, 'especially': b_incr, 'exceptionally': b_incr, 'extremely': b_incr, 'fabulously': b_incr, 'flipping': b_incr, 'flippin': b_incr, 'fricking': b_incr, 'frickin': b_incr, 'frigging': b_incr, 'friggin': b_incr, 'fully': b_incr, 'fucking': b_incr, 'greatly': b_incr, 'hella': b_incr, 'highly': b_incr, 'hugely': b_incr, 'incredibly': b_incr, 'intensely': b_incr, 'majorly': b_incr, 'more': b_incr, 'most': b_incr, 'particularly': b_incr, 'purely': b_incr, 'quite': b_incr, 'really': b_incr, 'remarkably': b_incr, 'so': b_incr, 'substantially': b_incr, 'thoroughly': b_incr, 'totally': b_incr, 'tremendously': b_incr, 'uber': b_incr, 'unbelievably': b_incr, 'unusually': b_incr, 'utterly': b_incr, 'very': b_incr, 'almost': b_decr, 'barely': b_decr, 'hardly': b_decr, 'just enough': b_decr, 'kind of': b_decr, 'kinda': b_decr, 'kindof': b_decr, 'kind-of': b_decr, 'less': b_decr, 'little': b_decr, 'marginally': b_decr, 'occasionally': b_decr, 'partly': b_decr, 'scarcely': b_decr, 'slightly': b_decr, 'somewhat': b_decr, 'sort of': b_decr, 'sorta': b_decr, 'sortof': b_decr, 'sort-of': b_decr}
sentiments = []
for item in wordsAndEmoticons:
v = 0
i = wordsAndEmoticons.index(item)
if i < len(wordsAndEmoticons) - 1 and str(item).lower() == 'kind' and (str(wordsAndEmoticons[i + 1]).lower() == 'of') or str(item).lower() in booster_dict:
sentiments.append(v)
continue # depends on [control=['if'], data=[]]
item_lowercase = str(item).lower()
if item_lowercase in sentiment.valence_dict: #get the sentiment valence
v = float(sentiment.valence_dict[item_lowercase]) #check if sentiment laden word is in ALLCAPS (while others aren't)
c_incr = 0.733 #(empirically derived mean sentiment intensity rating increase for using ALLCAPs to emphasize a word)
if str(item).isupper() and isCap_diff:
if v > 0:
v += c_incr # depends on [control=['if'], data=['v']]
else:
v -= c_incr # depends on [control=['if'], data=[]] #check if the preceding words increase, decrease, or negate/nullify the valence
def scalar_inc_dec(word, valence):
scalar = 0.0
word_lower = str(word).lower()
if word_lower in booster_dict:
scalar = booster_dict[word_lower]
if valence < 0:
scalar *= -1 # depends on [control=['if'], data=[]] #check if booster/dampener word is in ALLCAPS (while others aren't)
if str(word).isupper() and isCap_diff:
if valence > 0:
scalar += c_incr # depends on [control=['if'], data=[]]
else:
scalar -= c_incr # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['word_lower', 'booster_dict']]
return scalar
n_scalar = -0.74
if i > 0 and str(wordsAndEmoticons[i - 1]).lower() not in sentiment.valence_dict:
s1 = scalar_inc_dec(wordsAndEmoticons[i - 1], v)
v = v + s1
if negated([wordsAndEmoticons[i - 1]]):
v = v * n_scalar # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if i > 1 and str(wordsAndEmoticons[i - 2]).lower() not in sentiment.valence_dict:
s2 = scalar_inc_dec(wordsAndEmoticons[i - 2], v)
if s2 != 0:
s2 = s2 * 0.95 # depends on [control=['if'], data=['s2']]
v = v + s2 # check for special use of 'never' as valence modifier instead of negation
if wordsAndEmoticons[i - 2] == 'never' and (wordsAndEmoticons[i - 1] == 'so' or wordsAndEmoticons[i - 1] == 'this'):
v = v * 1.5 # depends on [control=['if'], data=[]] # otherwise, check for negation/nullification
elif negated([wordsAndEmoticons[i - 2]]):
v = v * n_scalar # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if i > 2 and str(wordsAndEmoticons[i - 3]).lower() not in sentiment.valence_dict:
s3 = scalar_inc_dec(wordsAndEmoticons[i - 3], v)
if s3 != 0:
s3 = s3 * 0.9 # depends on [control=['if'], data=['s3']]
v = v + s3 # check for special use of 'never' as valence modifier instead of negation
if wordsAndEmoticons[i - 3] == 'never' and (wordsAndEmoticons[i - 2] == 'so' or wordsAndEmoticons[i - 2] == 'this') or (wordsAndEmoticons[i - 1] == 'so' or wordsAndEmoticons[i - 1] == 'this'):
v = v * 1.25 # depends on [control=['if'], data=[]] # otherwise, check for negation/nullification
elif negated([wordsAndEmoticons[i - 3]]):
v = v * n_scalar # depends on [control=['if'], data=[]] # check for special case idioms using a sentiment-laden keyword known to SAGE
special_case_idioms = {'the shit': 3, 'the bomb': 3, 'bad ass': 1.5, 'yeah right': -2, 'cut the mustard': 2, 'kiss of death': -1.5, 'hand to mouth': -2} # future work: consider other sentiment-laden idioms
#other_idioms = {"back handed": -2, "blow smoke": -2, "blowing smoke": -2, "upper hand": 1, "break a leg": 2,
# "cooking with gas": 2, "in the black": 2, "in the red": -2, "on the ball": 2,"under the weather": -2}
onezero = '{} {}'.format(str(wordsAndEmoticons[i - 1]), str(wordsAndEmoticons[i]))
twoonezero = '{} {}'.format(str(wordsAndEmoticons[i - 2]), str(wordsAndEmoticons[i - 1]), str(wordsAndEmoticons[i]))
twoone = '{} {}'.format(str(wordsAndEmoticons[i - 2]), str(wordsAndEmoticons[i - 1]))
threetwoone = '{} {} {}'.format(str(wordsAndEmoticons[i - 3]), str(wordsAndEmoticons[i - 2]), str(wordsAndEmoticons[i - 1]))
threetwo = '{} {}'.format(str(wordsAndEmoticons[i - 3]), str(wordsAndEmoticons[i - 2]))
if onezero in special_case_idioms:
v = special_case_idioms[onezero] # depends on [control=['if'], data=['onezero', 'special_case_idioms']]
elif twoonezero in special_case_idioms:
v = special_case_idioms[twoonezero] # depends on [control=['if'], data=['twoonezero', 'special_case_idioms']]
elif twoone in special_case_idioms:
v = special_case_idioms[twoone] # depends on [control=['if'], data=['twoone', 'special_case_idioms']]
elif threetwoone in special_case_idioms:
v = special_case_idioms[threetwoone] # depends on [control=['if'], data=['threetwoone', 'special_case_idioms']]
elif threetwo in special_case_idioms:
v = special_case_idioms[threetwo] # depends on [control=['if'], data=['threetwo', 'special_case_idioms']]
if len(wordsAndEmoticons) - 1 > i:
zeroone = '{} {}'.format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i + 1]))
if zeroone in special_case_idioms:
v = special_case_idioms[zeroone] # depends on [control=['if'], data=['zeroone', 'special_case_idioms']] # depends on [control=['if'], data=['i']]
if len(wordsAndEmoticons) - 1 > i + 1:
zeroonetwo = '{} {}'.format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i + 1]), str(wordsAndEmoticons[i + 2]))
if zeroonetwo in special_case_idioms:
v = special_case_idioms[zeroonetwo] # depends on [control=['if'], data=['zeroonetwo', 'special_case_idioms']] # depends on [control=['if'], data=[]] # check for booster/dampener bi-grams such as 'sort of' or 'kind of'
if threetwo in booster_dict or twoone in booster_dict:
v = v + b_decr # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # check for negation case using "least"
if i > 1 and str(wordsAndEmoticons[i - 1]).lower() not in sentiment.valence_dict and (str(wordsAndEmoticons[i - 1]).lower() == 'least'):
if str(wordsAndEmoticons[i - 2]).lower() != 'at' and str(wordsAndEmoticons[i - 2]).lower() != 'very':
v = v * n_scalar # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif i > 0 and str(wordsAndEmoticons[i - 1]).lower() not in sentiment.valence_dict and (str(wordsAndEmoticons[i - 1]).lower() == 'least'):
v = v * n_scalar # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['item_lowercase']]
sentiments.append(v) # depends on [control=['for'], data=['item']] # check for modification in sentiment due to contrastive conjunction 'but'
if 'but' in wordsAndEmoticons or 'BUT' in wordsAndEmoticons:
try:
bi = wordsAndEmoticons.index('but') # depends on [control=['try'], data=[]]
except:
bi = wordsAndEmoticons.index('BUT') # depends on [control=['except'], data=[]]
for s in sentiments:
si = sentiments.index(s)
if si < bi:
sentiments.pop(si)
sentiments.insert(si, s * 0.5) # depends on [control=['if'], data=['si']]
elif si > bi:
sentiments.pop(si)
sentiments.insert(si, s * 1.5) # depends on [control=['if'], data=['si']] # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]]
if sentiments:
sum_s = float(sum(sentiments)) #print sentiments, sum_s
# check for added emphasis resulting from exclamation points (up to 4 of them)
ep_count = str(text).count('!')
if ep_count > 4:
ep_count = 4 # depends on [control=['if'], data=['ep_count']]
ep_amplifier = ep_count * 0.292 #(empirically derived mean sentiment intensity rating increase for exclamation points)
if sum_s > 0:
sum_s += ep_amplifier # depends on [control=['if'], data=['sum_s']]
elif sum_s < 0:
sum_s -= ep_amplifier # depends on [control=['if'], data=['sum_s']] # check for added emphasis resulting from question marks (2 or 3+)
qm_count = str(text).count('?')
qm_amplifier = 0
if qm_count > 1:
if qm_count <= 3:
qm_amplifier = qm_count * 0.18 # depends on [control=['if'], data=['qm_count']]
else:
qm_amplifier = 0.96
if sum_s > 0:
sum_s += qm_amplifier # depends on [control=['if'], data=['sum_s']]
elif sum_s < 0:
sum_s -= qm_amplifier # depends on [control=['if'], data=['sum_s']] # depends on [control=['if'], data=['qm_count']]
compound = normalize(sum_s) # want separate positive versus negative sentiment scores
pos_sum = 0.0
neg_sum = 0.0
neu_count = 0
for sentiment_score in sentiments:
if sentiment_score > 0:
pos_sum += float(sentiment_score) + 1 # compensates for neutral words that are counted as 1 # depends on [control=['if'], data=['sentiment_score']]
if sentiment_score < 0:
neg_sum += float(sentiment_score) - 1 # when used with math.fabs(), compensates for neutrals # depends on [control=['if'], data=['sentiment_score']]
if sentiment_score == 0:
neu_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sentiment_score']]
if pos_sum > math.fabs(neg_sum):
pos_sum += ep_amplifier + qm_amplifier # depends on [control=['if'], data=['pos_sum']]
elif pos_sum < math.fabs(neg_sum):
neg_sum -= ep_amplifier + qm_amplifier # depends on [control=['if'], data=[]]
total = pos_sum + math.fabs(neg_sum) + neu_count
pos = math.fabs(pos_sum / total)
neg = math.fabs(neg_sum / total)
neu = math.fabs(neu_count / total) # depends on [control=['if'], data=[]]
else:
(compound, pos, neg, neu) = (0.0, 0.0, 0.0, 0.0)
s = {'neg': round(neg, 3), 'neu': round(neu, 3), 'pos': round(pos, 3), 'compound': round(compound, 4)}
return s |
def delete_task(self, id, client=None):
"""Deletes a task from the current task queue.
If the task isn't found (backend 404), raises a
:class:`gcloud.exceptions.NotFound`.
:type id: string
:param id: A task name to delete.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current taskqueue.
:raises: :class:`gcloud.exceptions.NotFound`
"""
client = self._require_client(client)
task = Task(taskqueue=self, id=id)
# We intentionally pass `_target_object=None` since a DELETE
# request has no response value (whether in a standard request or
# in a batch request).
client.connection.api_request(method='DELETE', path=task.path, _target_object=None) | def function[delete_task, parameter[self, id, client]]:
constant[Deletes a task from the current task queue.
If the task isn't found (backend 404), raises a
:class:`gcloud.exceptions.NotFound`.
:type id: string
:param id: A task name to delete.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current taskqueue.
:raises: :class:`gcloud.exceptions.NotFound`
]
variable[client] assign[=] call[name[self]._require_client, parameter[name[client]]]
variable[task] assign[=] call[name[Task], parameter[]]
call[name[client].connection.api_request, parameter[]] | keyword[def] identifier[delete_task] ( identifier[self] , identifier[id] , identifier[client] = keyword[None] ):
literal[string]
identifier[client] = identifier[self] . identifier[_require_client] ( identifier[client] )
identifier[task] = identifier[Task] ( identifier[taskqueue] = identifier[self] , identifier[id] = identifier[id] )
identifier[client] . identifier[connection] . identifier[api_request] ( identifier[method] = literal[string] , identifier[path] = identifier[task] . identifier[path] , identifier[_target_object] = keyword[None] ) | def delete_task(self, id, client=None):
"""Deletes a task from the current task queue.
If the task isn't found (backend 404), raises a
:class:`gcloud.exceptions.NotFound`.
:type id: string
:param id: A task name to delete.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current taskqueue.
:raises: :class:`gcloud.exceptions.NotFound`
"""
client = self._require_client(client)
task = Task(taskqueue=self, id=id)
# We intentionally pass `_target_object=None` since a DELETE
# request has no response value (whether in a standard request or
# in a batch request).
client.connection.api_request(method='DELETE', path=task.path, _target_object=None) |
def get_object_from_session(entity, key):
"""Get an object from the database given an entity and the session key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_session_function(service, message):
"""Actual implementation of get_object_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_session(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_)
return result
return object_from_session_function | def function[get_object_from_session, parameter[entity, key]]:
constant[Get an object from the database given an entity and the session key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
]
def function[object_from_session_function, parameter[service, message]]:
constant[Actual implementation of get_object_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
]
variable[id_] assign[=] call[call[name[get_value_from_session], parameter[name[key]]], parameter[name[service], name[message]]]
variable[result] assign[=] call[call[name[service].session.query, parameter[name[entity]]].get, parameter[name[id_]]]
if <ast.UnaryOp object at 0x7da1b168cd00> begin[:]
<ast.Raise object at 0x7da1b168d240>
return[name[result]]
return[name[object_from_session_function]] | keyword[def] identifier[get_object_from_session] ( identifier[entity] , identifier[key] ):
literal[string]
keyword[def] identifier[object_from_session_function] ( identifier[service] , identifier[message] ):
literal[string]
identifier[id_] = identifier[get_value_from_session] ( identifier[key] )( identifier[service] , identifier[message] )
identifier[result] = identifier[service] . identifier[session] . identifier[query] ( identifier[entity] ). identifier[get] ( identifier[id_] )
keyword[if] keyword[not] identifier[result] :
keyword[raise] identifier[SelenolInvalidArgumentException] ( identifier[key] , identifier[id_] )
keyword[return] identifier[result]
keyword[return] identifier[object_from_session_function] | def get_object_from_session(entity, key):
"""Get an object from the database given an entity and the session key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_session_function(service, message):
"""Actual implementation of get_object_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_session(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_) # depends on [control=['if'], data=[]]
return result
return object_from_session_function |
def parse_singular_alphabetic_character(t, tag_name):
'''Parses the sole alphabetic character value with name tag_name in tag t. Heavy-handed with the asserts.'''
pos = t.getElementsByTagName(tag_name)
assert(len(pos) == 1)
pos = pos[0]
assert(len(pos.childNodes) == 1)
v = pos.childNodes[0].data
assert(len(v) == 1 and v >= 'A' and 'v' <= 'z') # no floats allowed
return v | def function[parse_singular_alphabetic_character, parameter[t, tag_name]]:
constant[Parses the sole alphabetic character value with name tag_name in tag t. Heavy-handed with the asserts.]
variable[pos] assign[=] call[name[t].getElementsByTagName, parameter[name[tag_name]]]
assert[compare[call[name[len], parameter[name[pos]]] equal[==] constant[1]]]
variable[pos] assign[=] call[name[pos]][constant[0]]
assert[compare[call[name[len], parameter[name[pos].childNodes]] equal[==] constant[1]]]
variable[v] assign[=] call[name[pos].childNodes][constant[0]].data
assert[<ast.BoolOp object at 0x7da18f00cb20>]
return[name[v]] | keyword[def] identifier[parse_singular_alphabetic_character] ( identifier[t] , identifier[tag_name] ):
literal[string]
identifier[pos] = identifier[t] . identifier[getElementsByTagName] ( identifier[tag_name] )
keyword[assert] ( identifier[len] ( identifier[pos] )== literal[int] )
identifier[pos] = identifier[pos] [ literal[int] ]
keyword[assert] ( identifier[len] ( identifier[pos] . identifier[childNodes] )== literal[int] )
identifier[v] = identifier[pos] . identifier[childNodes] [ literal[int] ]. identifier[data]
keyword[assert] ( identifier[len] ( identifier[v] )== literal[int] keyword[and] identifier[v] >= literal[string] keyword[and] literal[string] <= literal[string] )
keyword[return] identifier[v] | def parse_singular_alphabetic_character(t, tag_name):
"""Parses the sole alphabetic character value with name tag_name in tag t. Heavy-handed with the asserts."""
pos = t.getElementsByTagName(tag_name)
assert len(pos) == 1
pos = pos[0]
assert len(pos.childNodes) == 1
v = pos.childNodes[0].data
assert len(v) == 1 and v >= 'A' and ('v' <= 'z') # no floats allowed
return v |
def get(self):
"""
Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None
"""
if PyFunceble.HTTP_CODE["active"]:
# The http status code extraction is activated.
# We get the http status code.
http_code = self._access()
# We initiate a variable which will save the list of allowed
# http status code.
list_of_valid_http_code = []
for codes in [
PyFunceble.HTTP_CODE["list"]["up"],
PyFunceble.HTTP_CODE["list"]["potentially_down"],
PyFunceble.HTTP_CODE["list"]["potentially_up"],
]:
# We loop throught the list of http status code.
# We extend the list of valid with the currently read
# codes.
list_of_valid_http_code.extend(codes)
if http_code not in list_of_valid_http_code or http_code is None:
# * The extracted http code is not in the list of valid http code.
# or
# * The extracted http code is equal to `None`.
# We return 3 star in order to mention that we were not eable to extract
# the http status code.
return "*" * 3
# * The extracted http code is in the list of valid http code.
# or
# * The extracted http code is not equal to `None`.
# We return the extracted http status code.
return http_code
# The http status code extraction is activated.
# We return None.
return None | def function[get, parameter[self]]:
constant[
Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None
]
if call[name[PyFunceble].HTTP_CODE][constant[active]] begin[:]
variable[http_code] assign[=] call[name[self]._access, parameter[]]
variable[list_of_valid_http_code] assign[=] list[[]]
for taget[name[codes]] in starred[list[[<ast.Subscript object at 0x7da18c4ce2c0>, <ast.Subscript object at 0x7da18c4ce860>, <ast.Subscript object at 0x7da18c4cccd0>]]] begin[:]
call[name[list_of_valid_http_code].extend, parameter[name[codes]]]
if <ast.BoolOp object at 0x7da18c4cf490> begin[:]
return[binary_operation[constant[*] * constant[3]]]
return[name[http_code]]
return[constant[None]] | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
keyword[if] identifier[PyFunceble] . identifier[HTTP_CODE] [ literal[string] ]:
identifier[http_code] = identifier[self] . identifier[_access] ()
identifier[list_of_valid_http_code] =[]
keyword[for] identifier[codes] keyword[in] [
identifier[PyFunceble] . identifier[HTTP_CODE] [ literal[string] ][ literal[string] ],
identifier[PyFunceble] . identifier[HTTP_CODE] [ literal[string] ][ literal[string] ],
identifier[PyFunceble] . identifier[HTTP_CODE] [ literal[string] ][ literal[string] ],
]:
identifier[list_of_valid_http_code] . identifier[extend] ( identifier[codes] )
keyword[if] identifier[http_code] keyword[not] keyword[in] identifier[list_of_valid_http_code] keyword[or] identifier[http_code] keyword[is] keyword[None] :
keyword[return] literal[string] * literal[int]
keyword[return] identifier[http_code]
keyword[return] keyword[None] | def get(self):
"""
Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None
"""
if PyFunceble.HTTP_CODE['active']:
# The http status code extraction is activated.
# We get the http status code.
http_code = self._access()
# We initiate a variable which will save the list of allowed
# http status code.
list_of_valid_http_code = []
for codes in [PyFunceble.HTTP_CODE['list']['up'], PyFunceble.HTTP_CODE['list']['potentially_down'], PyFunceble.HTTP_CODE['list']['potentially_up']]:
# We loop throught the list of http status code.
# We extend the list of valid with the currently read
# codes.
list_of_valid_http_code.extend(codes) # depends on [control=['for'], data=['codes']]
if http_code not in list_of_valid_http_code or http_code is None:
# * The extracted http code is not in the list of valid http code.
# or
# * The extracted http code is equal to `None`.
# We return 3 star in order to mention that we were not eable to extract
# the http status code.
return '*' * 3 # depends on [control=['if'], data=[]]
# * The extracted http code is in the list of valid http code.
# or
# * The extracted http code is not equal to `None`.
# We return the extracted http status code.
return http_code # depends on [control=['if'], data=[]]
# The http status code extraction is activated.
# We return None.
return None |
def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
header = self._swap_synonyms(next(reader))
hgroups = self._collapse_header(header)
htypes = self._characterize_header(header, hgroups)
for node_type in node_types:
try:
name_index = header.index(node_type)
except ValueError:
name_index = None
if name_index is None:
#print "Could not find standard header name: %s in %s" \
# % (node_type, header)
continue
in_handle.seek(0, 0)
for line in reader:
name = line[name_index]
#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)
node_index = self._build_node_index(node_type,name)
#skip the header line and empty lines
if name in header:
continue
if (not name):
continue
try:
node = nodes[node_index]
except KeyError:
#print("creating node ", name, " index", node_index)
node = NodeRecord(name, node_type)
node.metadata = collections.defaultdict(set)
nodes[node_index] = node
attrs = self._line_keyvals(line, header, hgroups, htypes, node.metadata)
nodes[node_index].metadata = attrs
return dict([(k, self._finalize_metadata(v)) for k, v in nodes.items()]) | def function[_parse_study, parameter[self, fname, node_types]]:
constant[Parse study or assay row oriented file around the supplied base node.
]
if <ast.UnaryOp object at 0x7da204344280> begin[:]
return[constant[None]]
variable[nodes] assign[=] dictionary[[], []]
with call[name[open], parameter[call[name[os].path.join, parameter[name[self]._dir, name[fname]]], constant[rU]]] begin[:]
variable[reader] assign[=] call[name[csv].reader, parameter[name[in_handle]]]
variable[header] assign[=] call[name[self]._swap_synonyms, parameter[call[name[next], parameter[name[reader]]]]]
variable[hgroups] assign[=] call[name[self]._collapse_header, parameter[name[header]]]
variable[htypes] assign[=] call[name[self]._characterize_header, parameter[name[header], name[hgroups]]]
for taget[name[node_type]] in starred[name[node_types]] begin[:]
<ast.Try object at 0x7da204344d00>
if compare[name[name_index] is constant[None]] begin[:]
continue
call[name[in_handle].seek, parameter[constant[0], constant[0]]]
for taget[name[line]] in starred[name[reader]] begin[:]
variable[name] assign[=] call[name[line]][name[name_index]]
variable[node_index] assign[=] call[name[self]._build_node_index, parameter[name[node_type], name[name]]]
if compare[name[name] in name[header]] begin[:]
continue
if <ast.UnaryOp object at 0x7da204344fd0> begin[:]
continue
<ast.Try object at 0x7da204346320>
return[call[name[dict], parameter[<ast.ListComp object at 0x7da204347c10>]]] | keyword[def] identifier[_parse_study] ( identifier[self] , identifier[fname] , identifier[node_types] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_dir] , identifier[fname] )):
keyword[return] keyword[None]
identifier[nodes] ={}
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_dir] , identifier[fname] ), literal[string] ) keyword[as] identifier[in_handle] :
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[in_handle] , identifier[dialect] = literal[string] )
identifier[header] = identifier[self] . identifier[_swap_synonyms] ( identifier[next] ( identifier[reader] ))
identifier[hgroups] = identifier[self] . identifier[_collapse_header] ( identifier[header] )
identifier[htypes] = identifier[self] . identifier[_characterize_header] ( identifier[header] , identifier[hgroups] )
keyword[for] identifier[node_type] keyword[in] identifier[node_types] :
keyword[try] :
identifier[name_index] = identifier[header] . identifier[index] ( identifier[node_type] )
keyword[except] identifier[ValueError] :
identifier[name_index] = keyword[None]
keyword[if] identifier[name_index] keyword[is] keyword[None] :
keyword[continue]
identifier[in_handle] . identifier[seek] ( literal[int] , literal[int] )
keyword[for] identifier[line] keyword[in] identifier[reader] :
identifier[name] = identifier[line] [ identifier[name_index] ]
identifier[node_index] = identifier[self] . identifier[_build_node_index] ( identifier[node_type] , identifier[name] )
keyword[if] identifier[name] keyword[in] identifier[header] :
keyword[continue]
keyword[if] ( keyword[not] identifier[name] ):
keyword[continue]
keyword[try] :
identifier[node] = identifier[nodes] [ identifier[node_index] ]
keyword[except] identifier[KeyError] :
identifier[node] = identifier[NodeRecord] ( identifier[name] , identifier[node_type] )
identifier[node] . identifier[metadata] = identifier[collections] . identifier[defaultdict] ( identifier[set] )
identifier[nodes] [ identifier[node_index] ]= identifier[node]
identifier[attrs] = identifier[self] . identifier[_line_keyvals] ( identifier[line] , identifier[header] , identifier[hgroups] , identifier[htypes] , identifier[node] . identifier[metadata] )
identifier[nodes] [ identifier[node_index] ]. identifier[metadata] = identifier[attrs]
keyword[return] identifier[dict] ([( identifier[k] , identifier[self] . identifier[_finalize_metadata] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[nodes] . identifier[items] ()]) | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None # depends on [control=['if'], data=[]]
nodes = {}
with open(os.path.join(self._dir, fname), 'rU') as in_handle:
reader = csv.reader(in_handle, dialect='excel-tab')
header = self._swap_synonyms(next(reader))
hgroups = self._collapse_header(header)
htypes = self._characterize_header(header, hgroups)
for node_type in node_types:
try:
name_index = header.index(node_type) # depends on [control=['try'], data=[]]
except ValueError:
name_index = None # depends on [control=['except'], data=[]]
if name_index is None:
#print "Could not find standard header name: %s in %s" \
# % (node_type, header)
continue # depends on [control=['if'], data=[]]
in_handle.seek(0, 0)
for line in reader:
name = line[name_index]
#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)
node_index = self._build_node_index(node_type, name)
#skip the header line and empty lines
if name in header:
continue # depends on [control=['if'], data=[]]
if not name:
continue # depends on [control=['if'], data=[]]
try:
node = nodes[node_index] # depends on [control=['try'], data=[]]
except KeyError:
#print("creating node ", name, " index", node_index)
node = NodeRecord(name, node_type)
node.metadata = collections.defaultdict(set)
nodes[node_index] = node
attrs = self._line_keyvals(line, header, hgroups, htypes, node.metadata)
nodes[node_index].metadata = attrs # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['for'], data=['node_type']] # depends on [control=['with'], data=['in_handle']]
return dict([(k, self._finalize_metadata(v)) for (k, v) in nodes.items()]) |
def _get_cols(fields, schema):
""" Get column metadata for Google Charts based on field list and schema. """
typemap = {
'STRING': 'string',
'INT64': 'number',
'INTEGER': 'number',
'FLOAT': 'number',
'FLOAT64': 'number',
'BOOL': 'boolean',
'BOOLEAN': 'boolean',
'DATE': 'date',
'TIME': 'timeofday',
'DATETIME': 'datetime',
'TIMESTAMP': 'datetime'
}
cols = []
for col in fields:
if schema:
f = schema[col]
t = 'string' if f.mode == 'REPEATED' else typemap.get(f.data_type, 'string')
cols.append({'id': f.name, 'label': f.name, 'type': t})
else:
# This will only happen if we had no rows to infer a schema from, so the type
# is not really important, except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number.
cols.append({'id': col, 'label': col, 'type': 'number'})
return cols | def function[_get_cols, parameter[fields, schema]]:
constant[ Get column metadata for Google Charts based on field list and schema. ]
variable[typemap] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344a60>, <ast.Constant object at 0x7da1b2344a00>, <ast.Constant object at 0x7da1b23479d0>, <ast.Constant object at 0x7da1b23475e0>, <ast.Constant object at 0x7da1b2347c10>, <ast.Constant object at 0x7da1b2344bb0>, <ast.Constant object at 0x7da1b2345090>, <ast.Constant object at 0x7da1b23479a0>, <ast.Constant object at 0x7da1b2345300>, <ast.Constant object at 0x7da1b2346020>, <ast.Constant object at 0x7da1b2346fb0>], [<ast.Constant object at 0x7da1b23471f0>, <ast.Constant object at 0x7da1b2344370>, <ast.Constant object at 0x7da1b2346d40>, <ast.Constant object at 0x7da1b23441c0>, <ast.Constant object at 0x7da1b2347a90>, <ast.Constant object at 0x7da1b23453c0>, <ast.Constant object at 0x7da1b2344250>, <ast.Constant object at 0x7da1b2346290>, <ast.Constant object at 0x7da1b2346110>, <ast.Constant object at 0x7da1b2347b50>, <ast.Constant object at 0x7da1b2347cd0>]]
variable[cols] assign[=] list[[]]
for taget[name[col]] in starred[name[fields]] begin[:]
if name[schema] begin[:]
variable[f] assign[=] call[name[schema]][name[col]]
variable[t] assign[=] <ast.IfExp object at 0x7da1b2346c80>
call[name[cols].append, parameter[dictionary[[<ast.Constant object at 0x7da18dc06500>, <ast.Constant object at 0x7da18dc04550>, <ast.Constant object at 0x7da18dc06aa0>], [<ast.Attribute object at 0x7da18dc04430>, <ast.Attribute object at 0x7da18dc059f0>, <ast.Name object at 0x7da18dc07eb0>]]]]
return[name[cols]] | keyword[def] identifier[_get_cols] ( identifier[fields] , identifier[schema] ):
literal[string]
identifier[typemap] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[cols] =[]
keyword[for] identifier[col] keyword[in] identifier[fields] :
keyword[if] identifier[schema] :
identifier[f] = identifier[schema] [ identifier[col] ]
identifier[t] = literal[string] keyword[if] identifier[f] . identifier[mode] == literal[string] keyword[else] identifier[typemap] . identifier[get] ( identifier[f] . identifier[data_type] , literal[string] )
identifier[cols] . identifier[append] ({ literal[string] : identifier[f] . identifier[name] , literal[string] : identifier[f] . identifier[name] , literal[string] : identifier[t] })
keyword[else] :
identifier[cols] . identifier[append] ({ literal[string] : identifier[col] , literal[string] : identifier[col] , literal[string] : literal[string] })
keyword[return] identifier[cols] | def _get_cols(fields, schema):
""" Get column metadata for Google Charts based on field list and schema. """
typemap = {'STRING': 'string', 'INT64': 'number', 'INTEGER': 'number', 'FLOAT': 'number', 'FLOAT64': 'number', 'BOOL': 'boolean', 'BOOLEAN': 'boolean', 'DATE': 'date', 'TIME': 'timeofday', 'DATETIME': 'datetime', 'TIMESTAMP': 'datetime'}
cols = []
for col in fields:
if schema:
f = schema[col]
t = 'string' if f.mode == 'REPEATED' else typemap.get(f.data_type, 'string')
cols.append({'id': f.name, 'label': f.name, 'type': t}) # depends on [control=['if'], data=[]]
else:
# This will only happen if we had no rows to infer a schema from, so the type
# is not really important, except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number.
cols.append({'id': col, 'label': col, 'type': 'number'}) # depends on [control=['for'], data=['col']]
return cols |
def _check_cmd(call):
'''
Check the output of the cmd.run_all function call.
'''
if call['retcode'] != 0:
comment = ''
std_err = call.get('stderr')
std_out = call.get('stdout')
if std_err:
comment += std_err
if std_out:
comment += std_out
raise CommandExecutionError('Error running command: {0}'.format(comment))
return call | def function[_check_cmd, parameter[call]]:
constant[
Check the output of the cmd.run_all function call.
]
if compare[call[name[call]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
variable[comment] assign[=] constant[]
variable[std_err] assign[=] call[name[call].get, parameter[constant[stderr]]]
variable[std_out] assign[=] call[name[call].get, parameter[constant[stdout]]]
if name[std_err] begin[:]
<ast.AugAssign object at 0x7da20c6c4fa0>
if name[std_out] begin[:]
<ast.AugAssign object at 0x7da20c6c77c0>
<ast.Raise object at 0x7da20c6c5600>
return[name[call]] | keyword[def] identifier[_check_cmd] ( identifier[call] ):
literal[string]
keyword[if] identifier[call] [ literal[string] ]!= literal[int] :
identifier[comment] = literal[string]
identifier[std_err] = identifier[call] . identifier[get] ( literal[string] )
identifier[std_out] = identifier[call] . identifier[get] ( literal[string] )
keyword[if] identifier[std_err] :
identifier[comment] += identifier[std_err]
keyword[if] identifier[std_out] :
identifier[comment] += identifier[std_out]
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[comment] ))
keyword[return] identifier[call] | def _check_cmd(call):
"""
Check the output of the cmd.run_all function call.
"""
if call['retcode'] != 0:
comment = ''
std_err = call.get('stderr')
std_out = call.get('stdout')
if std_err:
comment += std_err # depends on [control=['if'], data=[]]
if std_out:
comment += std_out # depends on [control=['if'], data=[]]
raise CommandExecutionError('Error running command: {0}'.format(comment)) # depends on [control=['if'], data=[]]
return call |
def keelhaul(rest):
"Inflict great pain and embarassment on some(one|thing)"
keelee = rest
karma.Karma.store.change(keelee, -1)
return (
"/me straps %s to a dirty rope, tosses 'em overboard and pulls "
"with great speed. Yarrr!" % keelee) | def function[keelhaul, parameter[rest]]:
constant[Inflict great pain and embarassment on some(one|thing)]
variable[keelee] assign[=] name[rest]
call[name[karma].Karma.store.change, parameter[name[keelee], <ast.UnaryOp object at 0x7da1b0399990>]]
return[binary_operation[constant[/me straps %s to a dirty rope, tosses 'em overboard and pulls with great speed. Yarrr!] <ast.Mod object at 0x7da2590d6920> name[keelee]]] | keyword[def] identifier[keelhaul] ( identifier[rest] ):
literal[string]
identifier[keelee] = identifier[rest]
identifier[karma] . identifier[Karma] . identifier[store] . identifier[change] ( identifier[keelee] ,- literal[int] )
keyword[return] (
literal[string]
literal[string] % identifier[keelee] ) | def keelhaul(rest):
"""Inflict great pain and embarassment on some(one|thing)"""
keelee = rest
karma.Karma.store.change(keelee, -1)
return "/me straps %s to a dirty rope, tosses 'em overboard and pulls with great speed. Yarrr!" % keelee |
def try_create_dir(self, path):
"""
Try to create a directory if it doesn't exist and raise error if there is a non-directory with the same name.
:param path: str path to the directory
"""
if not os.path.exists(path):
os.mkdir(path)
elif not os.path.isdir(path):
ValueError("Unable to create directory:" + path + " because a file already exists with the same name.") | def function[try_create_dir, parameter[self, path]]:
constant[
Try to create a directory if it doesn't exist and raise error if there is a non-directory with the same name.
:param path: str path to the directory
]
if <ast.UnaryOp object at 0x7da1b1a5d720> begin[:]
call[name[os].mkdir, parameter[name[path]]] | keyword[def] identifier[try_create_dir] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[os] . identifier[mkdir] ( identifier[path] )
keyword[elif] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
identifier[ValueError] ( literal[string] + identifier[path] + literal[string] ) | def try_create_dir(self, path):
"""
Try to create a directory if it doesn't exist and raise error if there is a non-directory with the same name.
:param path: str path to the directory
"""
if not os.path.exists(path):
os.mkdir(path) # depends on [control=['if'], data=[]]
elif not os.path.isdir(path):
ValueError('Unable to create directory:' + path + ' because a file already exists with the same name.') # depends on [control=['if'], data=[]] |
def output_file(self):
"""
If only one output file return it. Otherwise raise an exception.
"""
out_files = self.output_files
if len(out_files) != 1:
err_msg = "output_file property is only valid if there is a single"
err_msg += " output file. Here there are "
err_msg += "%d output files." %(len(out_files))
raise ValueError(err_msg)
return out_files[0] | def function[output_file, parameter[self]]:
constant[
If only one output file return it. Otherwise raise an exception.
]
variable[out_files] assign[=] name[self].output_files
if compare[call[name[len], parameter[name[out_files]]] not_equal[!=] constant[1]] begin[:]
variable[err_msg] assign[=] constant[output_file property is only valid if there is a single]
<ast.AugAssign object at 0x7da2041d9690>
<ast.AugAssign object at 0x7da2041db6d0>
<ast.Raise object at 0x7da2041dbac0>
return[call[name[out_files]][constant[0]]] | keyword[def] identifier[output_file] ( identifier[self] ):
literal[string]
identifier[out_files] = identifier[self] . identifier[output_files]
keyword[if] identifier[len] ( identifier[out_files] )!= literal[int] :
identifier[err_msg] = literal[string]
identifier[err_msg] += literal[string]
identifier[err_msg] += literal[string] %( identifier[len] ( identifier[out_files] ))
keyword[raise] identifier[ValueError] ( identifier[err_msg] )
keyword[return] identifier[out_files] [ literal[int] ] | def output_file(self):
"""
If only one output file return it. Otherwise raise an exception.
"""
out_files = self.output_files
if len(out_files) != 1:
err_msg = 'output_file property is only valid if there is a single'
err_msg += ' output file. Here there are '
err_msg += '%d output files.' % len(out_files)
raise ValueError(err_msg) # depends on [control=['if'], data=[]]
return out_files[0] |
def args_from_interpreter_flags():
"""
Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions.
"""
flag_opt_map = {
'debug': 'd',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
'optimize': 'O',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag, 0)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args | def function[args_from_interpreter_flags, parameter[]]:
constant[
Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions.
]
variable[flag_opt_map] assign[=] dictionary[[<ast.Constant object at 0x7da1b12b62c0>, <ast.Constant object at 0x7da1b12b5870>, <ast.Constant object at 0x7da1b12b7c70>, <ast.Constant object at 0x7da1b12b55a0>, <ast.Constant object at 0x7da1b12b6050>, <ast.Constant object at 0x7da1b12b57e0>, <ast.Constant object at 0x7da1b12b5e70>, <ast.Constant object at 0x7da1b12b4790>, <ast.Constant object at 0x7da1b12b4430>], [<ast.Constant object at 0x7da1b12b60b0>, <ast.Constant object at 0x7da1b12b4460>, <ast.Constant object at 0x7da1b12b5540>, <ast.Constant object at 0x7da1b12b5ea0>, <ast.Constant object at 0x7da1b12b5990>, <ast.Constant object at 0x7da1b12b5510>, <ast.Constant object at 0x7da1b12b48b0>, <ast.Constant object at 0x7da1b12b55d0>, <ast.Constant object at 0x7da1b12b40a0>]]
variable[args] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b12b6830>, <ast.Name object at 0x7da1b11392a0>]]] in starred[call[name[flag_opt_map].items, parameter[]]] begin[:]
variable[v] assign[=] call[name[getattr], parameter[name[sys].flags, name[flag], constant[0]]]
if compare[name[v] greater[>] constant[0]] begin[:]
call[name[args].append, parameter[binary_operation[constant[-] + binary_operation[name[opt] * name[v]]]]]
for taget[name[opt]] in starred[name[sys].warnoptions] begin[:]
call[name[args].append, parameter[binary_operation[constant[-W] + name[opt]]]]
return[name[args]] | keyword[def] identifier[args_from_interpreter_flags] ():
literal[string]
identifier[flag_opt_map] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[args] =[]
keyword[for] identifier[flag] , identifier[opt] keyword[in] identifier[flag_opt_map] . identifier[items] ():
identifier[v] = identifier[getattr] ( identifier[sys] . identifier[flags] , identifier[flag] , literal[int] )
keyword[if] identifier[v] > literal[int] :
identifier[args] . identifier[append] ( literal[string] + identifier[opt] * identifier[v] )
keyword[for] identifier[opt] keyword[in] identifier[sys] . identifier[warnoptions] :
identifier[args] . identifier[append] ( literal[string] + identifier[opt] )
keyword[return] identifier[args] | def args_from_interpreter_flags():
"""
Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions.
"""
flag_opt_map = {'debug': 'd', 'dont_write_bytecode': 'B', 'no_user_site': 's', 'no_site': 'S', 'ignore_environment': 'E', 'verbose': 'v', 'bytes_warning': 'b', 'quiet': 'q', 'optimize': 'O'}
args = []
for (flag, opt) in flag_opt_map.items():
v = getattr(sys.flags, flag, 0)
if v > 0:
args.append('-' + opt * v) # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=[]]
for opt in sys.warnoptions:
args.append('-W' + opt) # depends on [control=['for'], data=['opt']]
return args |
def multi_replace(str_, search_list, repl_list):
r"""
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam
"""
if isinstance(repl_list, six.string_types):
repl_list_ = [repl_list] * len(search_list)
else:
repl_list_ = repl_list
newstr = str_
assert len(search_list) == len(repl_list_), 'bad lens'
for search, repl in zip(search_list, repl_list_):
newstr = newstr.replace(search, repl)
return newstr | def function[multi_replace, parameter[str_, search_list, repl_list]]:
constant[
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam
]
if call[name[isinstance], parameter[name[repl_list], name[six].string_types]] begin[:]
variable[repl_list_] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24c5300>]] * call[name[len], parameter[name[search_list]]]]
variable[newstr] assign[=] name[str_]
assert[compare[call[name[len], parameter[name[search_list]]] equal[==] call[name[len], parameter[name[repl_list_]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b24c73d0>, <ast.Name object at 0x7da1b24c49d0>]]] in starred[call[name[zip], parameter[name[search_list], name[repl_list_]]]] begin[:]
variable[newstr] assign[=] call[name[newstr].replace, parameter[name[search], name[repl]]]
return[name[newstr]] | keyword[def] identifier[multi_replace] ( identifier[str_] , identifier[search_list] , identifier[repl_list] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[repl_list] , identifier[six] . identifier[string_types] ):
identifier[repl_list_] =[ identifier[repl_list] ]* identifier[len] ( identifier[search_list] )
keyword[else] :
identifier[repl_list_] = identifier[repl_list]
identifier[newstr] = identifier[str_]
keyword[assert] identifier[len] ( identifier[search_list] )== identifier[len] ( identifier[repl_list_] ), literal[string]
keyword[for] identifier[search] , identifier[repl] keyword[in] identifier[zip] ( identifier[search_list] , identifier[repl_list_] ):
identifier[newstr] = identifier[newstr] . identifier[replace] ( identifier[search] , identifier[repl] )
keyword[return] identifier[newstr] | def multi_replace(str_, search_list, repl_list):
"""
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam
"""
if isinstance(repl_list, six.string_types):
repl_list_ = [repl_list] * len(search_list) # depends on [control=['if'], data=[]]
else:
repl_list_ = repl_list
newstr = str_
assert len(search_list) == len(repl_list_), 'bad lens'
for (search, repl) in zip(search_list, repl_list_):
newstr = newstr.replace(search, repl) # depends on [control=['for'], data=[]]
return newstr |
def value(self):
""" Return current value for the metric """
if self.buffer:
return np.quantile(self.buffer, self.quantile)
else:
return 0.0 | def function[value, parameter[self]]:
constant[ Return current value for the metric ]
if name[self].buffer begin[:]
return[call[name[np].quantile, parameter[name[self].buffer, name[self].quantile]]] | keyword[def] identifier[value] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[buffer] :
keyword[return] identifier[np] . identifier[quantile] ( identifier[self] . identifier[buffer] , identifier[self] . identifier[quantile] )
keyword[else] :
keyword[return] literal[int] | def value(self):
""" Return current value for the metric """
if self.buffer:
return np.quantile(self.buffer, self.quantile) # depends on [control=['if'], data=[]]
else:
return 0.0 |
def describe(vpc_id=None, vpc_name=None, region=None, key=None,
keyid=None, profile=None):
'''
Given a VPC ID describe its properties.
Returns a dictionary of interesting properties.
.. versionchanged:: 2015.8.0
Added vpc_name argument
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe vpc_id=vpc-123456
salt myminion boto_vpc.describe vpc_name=myvpc
'''
if not any((vpc_id, vpc_name)):
raise SaltInvocationError('A valid vpc id or name needs to be specified.')
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
except BotoServerError as err:
boto_err = __utils__['boto.get_error'](err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return None.
return {'vpc': None}
return {'error': boto_err}
if not vpc_id:
return {'vpc': None}
filter_parameters = {'vpc_ids': vpc_id}
try:
vpcs = conn.get_all_vpcs(**filter_parameters)
except BotoServerError as err:
return {'error': __utils__['boto.get_error'](err)}
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: %s', vpc.id)
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
'dhcp_options_id', 'instance_tenancy')
_r = dict([(k, getattr(vpc, k)) for k in keys])
_r.update({'region': getattr(vpc, 'region').name})
return {'vpc': _r}
else:
return {'vpc': None} | def function[describe, parameter[vpc_id, vpc_name, region, key, keyid, profile]]:
constant[
Given a VPC ID describe its properties.
Returns a dictionary of interesting properties.
.. versionchanged:: 2015.8.0
Added vpc_name argument
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe vpc_id=vpc-123456
salt myminion boto_vpc.describe vpc_name=myvpc
]
if <ast.UnaryOp object at 0x7da1b20edba0> begin[:]
<ast.Raise object at 0x7da1b20edcc0>
<ast.Try object at 0x7da1b20edd80>
if <ast.UnaryOp object at 0x7da1b20ed180> begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b20ed0c0>], [<ast.Constant object at 0x7da1b20ed090>]]]
variable[filter_parameters] assign[=] dictionary[[<ast.Constant object at 0x7da1b20ecfa0>], [<ast.Name object at 0x7da1b20ecf70>]]
<ast.Try object at 0x7da1b20ecf40>
if name[vpcs] begin[:]
variable[vpc] assign[=] call[name[vpcs]][constant[0]]
call[name[log].debug, parameter[constant[Found VPC: %s], name[vpc].id]]
variable[keys] assign[=] tuple[[<ast.Constant object at 0x7da20c6ab100>, <ast.Constant object at 0x7da20c6ab850>, <ast.Constant object at 0x7da20c6ab6d0>, <ast.Constant object at 0x7da20c6aa3e0>, <ast.Constant object at 0x7da20c6a9b10>, <ast.Constant object at 0x7da20c6a9d20>, <ast.Constant object at 0x7da20c6abf70>]]
variable[_r] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da20c6a9cf0>]]
call[name[_r].update, parameter[dictionary[[<ast.Constant object at 0x7da20c6ab790>], [<ast.Attribute object at 0x7da20c6aaef0>]]]]
return[dictionary[[<ast.Constant object at 0x7da20c6ab550>], [<ast.Name object at 0x7da20c6a9e10>]]] | keyword[def] identifier[describe] ( identifier[vpc_id] = keyword[None] , identifier[vpc_name] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] ,
identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[any] (( identifier[vpc_id] , identifier[vpc_name] )):
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
keyword[try] :
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[vpc_id] = identifier[check_vpc] ( identifier[vpc_id] , identifier[vpc_name] , identifier[region] , identifier[key] , identifier[keyid] , identifier[profile] )
keyword[except] identifier[BotoServerError] keyword[as] identifier[err] :
identifier[boto_err] = identifier[__utils__] [ literal[string] ]( identifier[err] )
keyword[if] identifier[boto_err] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] )== literal[string] :
keyword[return] { literal[string] : keyword[None] }
keyword[return] { literal[string] : identifier[boto_err] }
keyword[if] keyword[not] identifier[vpc_id] :
keyword[return] { literal[string] : keyword[None] }
identifier[filter_parameters] ={ literal[string] : identifier[vpc_id] }
keyword[try] :
identifier[vpcs] = identifier[conn] . identifier[get_all_vpcs] (** identifier[filter_parameters] )
keyword[except] identifier[BotoServerError] keyword[as] identifier[err] :
keyword[return] { literal[string] : identifier[__utils__] [ literal[string] ]( identifier[err] )}
keyword[if] identifier[vpcs] :
identifier[vpc] = identifier[vpcs] [ literal[int] ]
identifier[log] . identifier[debug] ( literal[string] , identifier[vpc] . identifier[id] )
identifier[keys] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] )
identifier[_r] = identifier[dict] ([( identifier[k] , identifier[getattr] ( identifier[vpc] , identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[keys] ])
identifier[_r] . identifier[update] ({ literal[string] : identifier[getattr] ( identifier[vpc] , literal[string] ). identifier[name] })
keyword[return] { literal[string] : identifier[_r] }
keyword[else] :
keyword[return] { literal[string] : keyword[None] } | def describe(vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None):
"""
Given a VPC ID describe its properties.
Returns a dictionary of interesting properties.
.. versionchanged:: 2015.8.0
Added vpc_name argument
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe vpc_id=vpc-123456
salt myminion boto_vpc.describe vpc_name=myvpc
"""
if not any((vpc_id, vpc_name)):
raise SaltInvocationError('A valid vpc id or name needs to be specified.') # depends on [control=['if'], data=[]]
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile) # depends on [control=['try'], data=[]]
except BotoServerError as err:
boto_err = __utils__['boto.get_error'](err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return None.
return {'vpc': None} # depends on [control=['if'], data=[]]
return {'error': boto_err} # depends on [control=['except'], data=['err']]
if not vpc_id:
return {'vpc': None} # depends on [control=['if'], data=[]]
filter_parameters = {'vpc_ids': vpc_id}
try:
vpcs = conn.get_all_vpcs(**filter_parameters) # depends on [control=['try'], data=[]]
except BotoServerError as err:
return {'error': __utils__['boto.get_error'](err)} # depends on [control=['except'], data=['err']]
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: %s', vpc.id)
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags', 'dhcp_options_id', 'instance_tenancy')
_r = dict([(k, getattr(vpc, k)) for k in keys])
_r.update({'region': getattr(vpc, 'region').name})
return {'vpc': _r} # depends on [control=['if'], data=[]]
else:
return {'vpc': None} |
def get_changes(self, extracted_name, similar=False, global_=False):
"""Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
"""
info = _ExtractInfo(
self.project, self.resource, self.start_offset, self.end_offset,
extracted_name, variable=self.kind == 'variable',
similar=similar, make_global=global_)
new_contents = _ExtractPerformer(info).extract()
changes = ChangeSet('Extract %s <%s>' % (self.kind,
extracted_name))
changes.add_change(ChangeContents(self.resource, new_contents))
return changes | def function[get_changes, parameter[self, extracted_name, similar, global_]]:
constant[Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
]
variable[info] assign[=] call[name[_ExtractInfo], parameter[name[self].project, name[self].resource, name[self].start_offset, name[self].end_offset, name[extracted_name]]]
variable[new_contents] assign[=] call[call[name[_ExtractPerformer], parameter[name[info]]].extract, parameter[]]
variable[changes] assign[=] call[name[ChangeSet], parameter[binary_operation[constant[Extract %s <%s>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da204620550>, <ast.Name object at 0x7da204621630>]]]]]
call[name[changes].add_change, parameter[call[name[ChangeContents], parameter[name[self].resource, name[new_contents]]]]]
return[name[changes]] | keyword[def] identifier[get_changes] ( identifier[self] , identifier[extracted_name] , identifier[similar] = keyword[False] , identifier[global_] = keyword[False] ):
literal[string]
identifier[info] = identifier[_ExtractInfo] (
identifier[self] . identifier[project] , identifier[self] . identifier[resource] , identifier[self] . identifier[start_offset] , identifier[self] . identifier[end_offset] ,
identifier[extracted_name] , identifier[variable] = identifier[self] . identifier[kind] == literal[string] ,
identifier[similar] = identifier[similar] , identifier[make_global] = identifier[global_] )
identifier[new_contents] = identifier[_ExtractPerformer] ( identifier[info] ). identifier[extract] ()
identifier[changes] = identifier[ChangeSet] ( literal[string] %( identifier[self] . identifier[kind] ,
identifier[extracted_name] ))
identifier[changes] . identifier[add_change] ( identifier[ChangeContents] ( identifier[self] . identifier[resource] , identifier[new_contents] ))
keyword[return] identifier[changes] | def get_changes(self, extracted_name, similar=False, global_=False):
"""Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
"""
info = _ExtractInfo(self.project, self.resource, self.start_offset, self.end_offset, extracted_name, variable=self.kind == 'variable', similar=similar, make_global=global_)
new_contents = _ExtractPerformer(info).extract()
changes = ChangeSet('Extract %s <%s>' % (self.kind, extracted_name))
changes.add_change(ChangeContents(self.resource, new_contents))
return changes |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF File Set
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF File Set Descriptor.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Set Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.recording_date.record(), 3, 3, 1, 1,
self.file_set_num, 0, self.log_vol_char_set,
self.log_vol_ident, self.file_set_char_set,
self.file_set_ident, self.copyright_file_ident,
self.abstract_file_ident, self.root_dir_icb.record(),
self.domain_ident.record(), b'\x00' * 16,
b'\x00' * 48)[16:]
return self.desc_tag.record(rec) + rec | def function[record, parameter[self]]:
constant[
A method to generate the string representing this UDF File Set
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF File Set Descriptor.
]
if <ast.UnaryOp object at 0x7da1b0fcc3d0> begin[:]
<ast.Raise object at 0x7da1b0fcdf00>
variable[rec] assign[=] call[call[name[struct].pack, parameter[name[self].FMT, binary_operation[constant[b'\x00'] * constant[16]], call[name[self].recording_date.record, parameter[]], constant[3], constant[3], constant[1], constant[1], name[self].file_set_num, constant[0], name[self].log_vol_char_set, name[self].log_vol_ident, name[self].file_set_char_set, name[self].file_set_ident, name[self].copyright_file_ident, name[self].abstract_file_ident, call[name[self].root_dir_icb.record, parameter[]], call[name[self].domain_ident.record, parameter[]], binary_operation[constant[b'\x00'] * constant[16]], binary_operation[constant[b'\x00'] * constant[48]]]]][<ast.Slice object at 0x7da1b0fcd750>]
return[binary_operation[call[name[self].desc_tag.record, parameter[name[rec]]] + name[rec]]] | keyword[def] identifier[record] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
identifier[rec] = identifier[struct] . identifier[pack] ( identifier[self] . identifier[FMT] , literal[string] * literal[int] ,
identifier[self] . identifier[recording_date] . identifier[record] (), literal[int] , literal[int] , literal[int] , literal[int] ,
identifier[self] . identifier[file_set_num] , literal[int] , identifier[self] . identifier[log_vol_char_set] ,
identifier[self] . identifier[log_vol_ident] , identifier[self] . identifier[file_set_char_set] ,
identifier[self] . identifier[file_set_ident] , identifier[self] . identifier[copyright_file_ident] ,
identifier[self] . identifier[abstract_file_ident] , identifier[self] . identifier[root_dir_icb] . identifier[record] (),
identifier[self] . identifier[domain_ident] . identifier[record] (), literal[string] * literal[int] ,
literal[string] * literal[int] )[ literal[int] :]
keyword[return] identifier[self] . identifier[desc_tag] . identifier[record] ( identifier[rec] )+ identifier[rec] | def record(self):
# type: () -> bytes
'\n A method to generate the string representing this UDF File Set\n Descriptor.\n\n Parameters:\n None.\n Returns:\n A string representing this UDF File Set Descriptor.\n '
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Set Descriptor not initialized') # depends on [control=['if'], data=[]]
rec = struct.pack(self.FMT, b'\x00' * 16, self.recording_date.record(), 3, 3, 1, 1, self.file_set_num, 0, self.log_vol_char_set, self.log_vol_ident, self.file_set_char_set, self.file_set_ident, self.copyright_file_ident, self.abstract_file_ident, self.root_dir_icb.record(), self.domain_ident.record(), b'\x00' * 16, b'\x00' * 48)[16:]
return self.desc_tag.record(rec) + rec |
def do_video(self, args):
"""Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args)
else:
# No subcommand was provided, so call help
self.do_help('video') | def function[do_video, parameter[self, args]]:
constant[Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter]
variable[func] assign[=] call[name[getattr], parameter[name[args], constant[func], constant[None]]]
if compare[name[func] is_not constant[None]] begin[:]
call[name[func], parameter[name[self], name[args]]] | keyword[def] identifier[do_video] ( identifier[self] , identifier[args] ):
literal[string]
identifier[func] = identifier[getattr] ( identifier[args] , literal[string] , keyword[None] )
keyword[if] identifier[func] keyword[is] keyword[not] keyword[None] :
identifier[func] ( identifier[self] , identifier[args] )
keyword[else] :
identifier[self] . identifier[do_help] ( literal[string] ) | def do_video(self, args):
"""Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args) # depends on [control=['if'], data=['func']]
else:
# No subcommand was provided, so call help
self.do_help('video') |
def write(self, psd_data_or_future, time_start, time_stop, samples):
"""Write PSD of one frequency hop"""
try:
# Wait for result of future
f_array, pwr_array = psd_data_or_future.result()
except AttributeError:
f_array, pwr_array = psd_data_or_future
try:
step = f_array[1] - f_array[0]
row = [
time_stop.strftime('%Y-%m-%d'), time_stop.strftime('%H:%M:%S'),
f_array[0], f_array[-1] + step, step, samples
]
row += list(pwr_array)
self.output.write('{}\n'.format(', '.join(str(x) for x in row)))
self.output.flush()
except Exception as e:
logging.exception('Error writing to output file:') | def function[write, parameter[self, psd_data_or_future, time_start, time_stop, samples]]:
constant[Write PSD of one frequency hop]
<ast.Try object at 0x7da204620670>
<ast.Try object at 0x7da204621930> | keyword[def] identifier[write] ( identifier[self] , identifier[psd_data_or_future] , identifier[time_start] , identifier[time_stop] , identifier[samples] ):
literal[string]
keyword[try] :
identifier[f_array] , identifier[pwr_array] = identifier[psd_data_or_future] . identifier[result] ()
keyword[except] identifier[AttributeError] :
identifier[f_array] , identifier[pwr_array] = identifier[psd_data_or_future]
keyword[try] :
identifier[step] = identifier[f_array] [ literal[int] ]- identifier[f_array] [ literal[int] ]
identifier[row] =[
identifier[time_stop] . identifier[strftime] ( literal[string] ), identifier[time_stop] . identifier[strftime] ( literal[string] ),
identifier[f_array] [ literal[int] ], identifier[f_array] [- literal[int] ]+ identifier[step] , identifier[step] , identifier[samples]
]
identifier[row] += identifier[list] ( identifier[pwr_array] )
identifier[self] . identifier[output] . identifier[write] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[row] )))
identifier[self] . identifier[output] . identifier[flush] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[exception] ( literal[string] ) | def write(self, psd_data_or_future, time_start, time_stop, samples):
"""Write PSD of one frequency hop"""
try:
# Wait for result of future
(f_array, pwr_array) = psd_data_or_future.result() # depends on [control=['try'], data=[]]
except AttributeError:
(f_array, pwr_array) = psd_data_or_future # depends on [control=['except'], data=[]]
try:
step = f_array[1] - f_array[0]
row = [time_stop.strftime('%Y-%m-%d'), time_stop.strftime('%H:%M:%S'), f_array[0], f_array[-1] + step, step, samples]
row += list(pwr_array)
self.output.write('{}\n'.format(', '.join((str(x) for x in row))))
self.output.flush() # depends on [control=['try'], data=[]]
except Exception as e:
logging.exception('Error writing to output file:') # depends on [control=['except'], data=[]] |
def _site_percolation(network, pmask):
r"""
This private method is called by 'find_clusters'
"""
# Find throats that produce site percolation
conns = sp.copy(network['throat.conns'])
conns[:, 0] = pmask[conns[:, 0]]
conns[:, 1] = pmask[conns[:, 1]]
# Only if both pores are True is the throat set to True
tmask = sp.all(conns, axis=1)
# Perform the clustering using scipy.csgraph
csr = network.create_adjacency_matrix(weights=tmask, fmt='csr',
drop_zeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Adjust cluster numbers such that non-invaded pores are labelled -1
# Note: The following line also takes care of assigning cluster numbers
# to single isolated invaded pores
p_clusters = (clusters + 1)*(pmask) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[network['throat.conns']]
ind = (t_clusters[:, 0] == t_clusters[:, 1])
t_clusters = t_clusters[:, 0]
# Label non-invaded throats with -1
t_clusters[~ind] = -1
return (p_clusters, t_clusters) | def function[_site_percolation, parameter[network, pmask]]:
constant[
This private method is called by 'find_clusters'
]
variable[conns] assign[=] call[name[sp].copy, parameter[call[name[network]][constant[throat.conns]]]]
call[name[conns]][tuple[[<ast.Slice object at 0x7da18f00d1b0>, <ast.Constant object at 0x7da18f00d960>]]] assign[=] call[name[pmask]][call[name[conns]][tuple[[<ast.Slice object at 0x7da18f00e5f0>, <ast.Constant object at 0x7da18f00ce50>]]]]
call[name[conns]][tuple[[<ast.Slice object at 0x7da18f00ed40>, <ast.Constant object at 0x7da18f00c0a0>]]] assign[=] call[name[pmask]][call[name[conns]][tuple[[<ast.Slice object at 0x7da18f00dea0>, <ast.Constant object at 0x7da18f00dd50>]]]]
variable[tmask] assign[=] call[name[sp].all, parameter[name[conns]]]
variable[csr] assign[=] call[name[network].create_adjacency_matrix, parameter[]]
variable[clusters] assign[=] call[call[name[sprs].csgraph.connected_components, parameter[]]][constant[1]]
variable[p_clusters] assign[=] binary_operation[binary_operation[binary_operation[name[clusters] + constant[1]] * name[pmask]] - constant[1]]
variable[t_clusters] assign[=] call[name[clusters]][call[name[network]][constant[throat.conns]]]
variable[ind] assign[=] compare[call[name[t_clusters]][tuple[[<ast.Slice object at 0x7da207f99db0>, <ast.Constant object at 0x7da207f994e0>]]] equal[==] call[name[t_clusters]][tuple[[<ast.Slice object at 0x7da207f99510>, <ast.Constant object at 0x7da207f99ba0>]]]]
variable[t_clusters] assign[=] call[name[t_clusters]][tuple[[<ast.Slice object at 0x7da1b26ac7f0>, <ast.Constant object at 0x7da1b26af8b0>]]]
call[name[t_clusters]][<ast.UnaryOp object at 0x7da1b26af9d0>] assign[=] <ast.UnaryOp object at 0x7da1b26ad810>
return[tuple[[<ast.Name object at 0x7da1b26ad900>, <ast.Name object at 0x7da1b26ae5c0>]]] | keyword[def] identifier[_site_percolation] ( identifier[network] , identifier[pmask] ):
literal[string]
identifier[conns] = identifier[sp] . identifier[copy] ( identifier[network] [ literal[string] ])
identifier[conns] [:, literal[int] ]= identifier[pmask] [ identifier[conns] [:, literal[int] ]]
identifier[conns] [:, literal[int] ]= identifier[pmask] [ identifier[conns] [:, literal[int] ]]
identifier[tmask] = identifier[sp] . identifier[all] ( identifier[conns] , identifier[axis] = literal[int] )
identifier[csr] = identifier[network] . identifier[create_adjacency_matrix] ( identifier[weights] = identifier[tmask] , identifier[fmt] = literal[string] ,
identifier[drop_zeros] = keyword[True] )
identifier[clusters] = identifier[sprs] . identifier[csgraph] . identifier[connected_components] ( identifier[csgraph] = identifier[csr] ,
identifier[directed] = keyword[False] )[ literal[int] ]
identifier[p_clusters] =( identifier[clusters] + literal[int] )*( identifier[pmask] )- literal[int]
identifier[t_clusters] = identifier[clusters] [ identifier[network] [ literal[string] ]]
identifier[ind] =( identifier[t_clusters] [:, literal[int] ]== identifier[t_clusters] [:, literal[int] ])
identifier[t_clusters] = identifier[t_clusters] [:, literal[int] ]
identifier[t_clusters] [~ identifier[ind] ]=- literal[int]
keyword[return] ( identifier[p_clusters] , identifier[t_clusters] ) | def _site_percolation(network, pmask):
"""
This private method is called by 'find_clusters'
"""
# Find throats that produce site percolation
conns = sp.copy(network['throat.conns'])
conns[:, 0] = pmask[conns[:, 0]]
conns[:, 1] = pmask[conns[:, 1]]
# Only if both pores are True is the throat set to True
tmask = sp.all(conns, axis=1)
# Perform the clustering using scipy.csgraph
csr = network.create_adjacency_matrix(weights=tmask, fmt='csr', drop_zeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr, directed=False)[1]
# Adjust cluster numbers such that non-invaded pores are labelled -1
# Note: The following line also takes care of assigning cluster numbers
# to single isolated invaded pores
p_clusters = (clusters + 1) * pmask - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[network['throat.conns']]
ind = t_clusters[:, 0] == t_clusters[:, 1]
t_clusters = t_clusters[:, 0]
# Label non-invaded throats with -1
t_clusters[~ind] = -1
return (p_clusters, t_clusters) |
def load_mpii_pose_dataset(path='data', is_16_pos_only=False):
"""Load MPII Human Pose Dataset.
Parameters
-----------
path : str
The path that the data is downloaded to.
is_16_pos_only : boolean
If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)
Returns
----------
img_train_list : list of str
The image directories of training data.
ann_train_list : list of dict
The annotations of training data.
img_test_list : list of str
The image directories of testing data.
ann_test_list : list of dict
The annotations of testing data.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__
- `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__
- `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
"""
path = os.path.join(path, 'mpii_human_pose')
logging.info("Load or Download MPII Human Pose > {}".format(path))
# annotation
url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
tar_filename = "mpii_human_pose_v1_u12_2.zip"
extracted_filename = "mpii_human_pose_v1_u12_2"
if folder_exists(os.path.join(path, extracted_filename)) is False:
logging.info("[MPII] (annotation) {} is nonexistent in {}".format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename))
# images
url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
tar_filename = "mpii_human_pose_v1.tar.gz"
extracted_filename2 = "images"
if folder_exists(os.path.join(path, extracted_filename2)) is False:
logging.info("[MPII] (images) {} is nonexistent in {}".format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename))
# parse annotation, format see http://human-pose.mpi-inf.mpg.de/#download
import scipy.io as sio
logging.info("reading annotations from mat file ...")
# mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))
# def fix_wrong_joints(joint): # https://github.com/mitmul/deeppose/blob/master/datasets/mpii_dataset.py
# if '12' in joint and '13' in joint and '2' in joint and '3' in joint:
# if ((joint['12'][0] < joint['13'][0]) and
# (joint['3'][0] < joint['2'][0])):
# joint['2'], joint['3'] = joint['3'], joint['2']
# if ((joint['12'][0] > joint['13'][0]) and
# (joint['3'][0] > joint['2'][0])):
# joint['2'], joint['3'] = joint['3'], joint['2']
# return joint
ann_train_list = []
ann_test_list = []
img_train_list = []
img_test_list = []
def save_joints():
# joint_data_fn = os.path.join(path, 'data.json')
# fp = open(joint_data_fn, 'w')
mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))
for _, (anno, train_flag) in enumerate( # all images
zip(mat['RELEASE']['annolist'][0, 0][0], mat['RELEASE']['img_train'][0, 0][0])):
img_fn = anno['image']['name'][0, 0][0]
train_flag = int(train_flag)
# print(i, img_fn, train_flag) # DEBUG print all images
if train_flag:
img_train_list.append(img_fn)
ann_train_list.append([])
else:
img_test_list.append(img_fn)
ann_test_list.append([])
head_rect = []
if 'x1' in str(anno['annorect'].dtype):
head_rect = zip(
[x1[0, 0] for x1 in anno['annorect']['x1'][0]], [y1[0, 0] for y1 in anno['annorect']['y1'][0]],
[x2[0, 0] for x2 in anno['annorect']['x2'][0]], [y2[0, 0] for y2 in anno['annorect']['y2'][0]]
)
else:
head_rect = [] # TODO
if 'annopoints' in str(anno['annorect'].dtype):
annopoints = anno['annorect']['annopoints'][0]
head_x1s = anno['annorect']['x1'][0]
head_y1s = anno['annorect']['y1'][0]
head_x2s = anno['annorect']['x2'][0]
head_y2s = anno['annorect']['y2'][0]
for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s,
head_y2s):
# if annopoint != []:
# if len(annopoint) != 0:
if annopoint.size:
head_rect = [
float(head_x1[0, 0]),
float(head_y1[0, 0]),
float(head_x2[0, 0]),
float(head_y2[0, 0])
]
# joint coordinates
annopoint = annopoint['point'][0, 0]
j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
x = [x[0, 0] for x in annopoint['x'][0]]
y = [y[0, 0] for y in annopoint['y'][0]]
joint_pos = {}
for _j_id, (_x, _y) in zip(j_id, zip(x, y)):
joint_pos[int(_j_id)] = [float(_x), float(_y)]
# joint_pos = fix_wrong_joints(joint_pos)
# visibility list
if 'is_visible' in str(annopoint.dtype):
vis = [v[0] if v.size > 0 else [0] for v in annopoint['is_visible'][0]]
vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)])
else:
vis = None
# if len(joint_pos) == 16:
if ((is_16_pos_only ==True) and (len(joint_pos) == 16)) or (is_16_pos_only == False):
# only use image with 16 key points / or use all
data = {
'filename': img_fn,
'train': train_flag,
'head_rect': head_rect,
'is_visible': vis,
'joint_pos': joint_pos
}
# print(json.dumps(data), file=fp) # py3
if train_flag:
ann_train_list[-1].append(data)
else:
ann_test_list[-1].append(data)
# def write_line(datum, fp):
# joints = sorted([[int(k), v] for k, v in datum['joint_pos'].items()])
# joints = np.array([j for i, j in joints]).flatten()
#
# out = [datum['filename']]
# out.extend(joints)
# out = [str(o) for o in out]
# out = ','.join(out)
#
# print(out, file=fp)
# def split_train_test():
# # fp_test = open('data/mpii/test_joints.csv', 'w')
# fp_test = open(os.path.join(path, 'test_joints.csv'), 'w')
# # fp_train = open('data/mpii/train_joints.csv', 'w')
# fp_train = open(os.path.join(path, 'train_joints.csv'), 'w')
# # all_data = open('data/mpii/data.json').readlines()
# all_data = open(os.path.join(path, 'data.json')).readlines()
# N = len(all_data)
# N_test = int(N * 0.1)
# N_train = N - N_test
#
# print('N:{}'.format(N))
# print('N_train:{}'.format(N_train))
# print('N_test:{}'.format(N_test))
#
# np.random.seed(1701)
# perm = np.random.permutation(N)
# test_indices = perm[:N_test]
# train_indices = perm[N_test:]
#
# print('train_indices:{}'.format(len(train_indices)))
# print('test_indices:{}'.format(len(test_indices)))
#
# for i in train_indices:
# datum = json.loads(all_data[i].strip())
# write_line(datum, fp_train)
#
# for i in test_indices:
# datum = json.loads(all_data[i].strip())
# write_line(datum, fp_test)
save_joints()
# split_train_test() #
## read images dir
logging.info("reading images list ...")
img_dir = os.path.join(path, extracted_filename2)
_img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='\\.jpg', printable=False)
# ann_list = json.load(open(os.path.join(path, 'data.json')))
for i, im in enumerate(img_train_list):
if im not in _img_list:
print('missing training image {} in {} (remove from img(ann)_train_list)'.format(im, img_dir))
# img_train_list.remove(im)
del img_train_list[i]
del ann_train_list[i]
for i, im in enumerate(img_test_list):
if im not in _img_list:
print('missing testing image {} in {} (remove from img(ann)_test_list)'.format(im, img_dir))
# img_test_list.remove(im)
del img_train_list[i]
del ann_train_list[i]
## check annotation and images
n_train_images = len(img_train_list)
n_test_images = len(img_test_list)
n_images = n_train_images + n_test_images
logging.info("n_images: {} n_train_images: {} n_test_images: {}".format(n_images, n_train_images, n_test_images))
n_train_ann = len(ann_train_list)
n_test_ann = len(ann_test_list)
n_ann = n_train_ann + n_test_ann
logging.info("n_ann: {} n_train_ann: {} n_test_ann: {}".format(n_ann, n_train_ann, n_test_ann))
n_train_people = len(sum(ann_train_list, []))
n_test_people = len(sum(ann_test_list, []))
n_people = n_train_people + n_test_people
logging.info("n_people: {} n_train_people: {} n_test_people: {}".format(n_people, n_train_people, n_test_people))
# add path to all image file name
for i, value in enumerate(img_train_list):
img_train_list[i] = os.path.join(img_dir, value)
for i, value in enumerate(img_test_list):
img_test_list[i] = os.path.join(img_dir, value)
return img_train_list, ann_train_list, img_test_list, ann_test_list | def function[load_mpii_pose_dataset, parameter[path, is_16_pos_only]]:
constant[Load MPII Human Pose Dataset.
Parameters
-----------
path : str
The path that the data is downloaded to.
is_16_pos_only : boolean
If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)
Returns
----------
img_train_list : list of str
The image directories of training data.
ann_train_list : list of dict
The annotations of training data.
img_test_list : list of str
The image directories of testing data.
ann_test_list : list of dict
The annotations of testing data.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__
- `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__
- `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
]
variable[path] assign[=] call[name[os].path.join, parameter[name[path], constant[mpii_human_pose]]]
call[name[logging].info, parameter[call[constant[Load or Download MPII Human Pose > {}].format, parameter[name[path]]]]]
variable[url] assign[=] constant[http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/]
variable[tar_filename] assign[=] constant[mpii_human_pose_v1_u12_2.zip]
variable[extracted_filename] assign[=] constant[mpii_human_pose_v1_u12_2]
if compare[call[name[folder_exists], parameter[call[name[os].path.join, parameter[name[path], name[extracted_filename]]]]] is constant[False]] begin[:]
call[name[logging].info, parameter[call[constant[[MPII] (annotation) {} is nonexistent in {}].format, parameter[name[extracted_filename], name[path]]]]]
call[name[maybe_download_and_extract], parameter[name[tar_filename], name[path], name[url]]]
call[name[del_file], parameter[call[name[os].path.join, parameter[name[path], name[tar_filename]]]]]
variable[url] assign[=] constant[http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/]
variable[tar_filename] assign[=] constant[mpii_human_pose_v1.tar.gz]
variable[extracted_filename2] assign[=] constant[images]
if compare[call[name[folder_exists], parameter[call[name[os].path.join, parameter[name[path], name[extracted_filename2]]]]] is constant[False]] begin[:]
call[name[logging].info, parameter[call[constant[[MPII] (images) {} is nonexistent in {}].format, parameter[name[extracted_filename], name[path]]]]]
call[name[maybe_download_and_extract], parameter[name[tar_filename], name[path], name[url]]]
call[name[del_file], parameter[call[name[os].path.join, parameter[name[path], name[tar_filename]]]]]
import module[scipy.io] as alias[sio]
call[name[logging].info, parameter[constant[reading annotations from mat file ...]]]
variable[ann_train_list] assign[=] list[[]]
variable[ann_test_list] assign[=] list[[]]
variable[img_train_list] assign[=] list[[]]
variable[img_test_list] assign[=] list[[]]
def function[save_joints, parameter[]]:
variable[mat] assign[=] call[name[sio].loadmat, parameter[call[name[os].path.join, parameter[name[path], name[extracted_filename], constant[mpii_human_pose_v1_u12_1.mat]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0045b70>, <ast.Tuple object at 0x7da1b0045e10>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[call[call[call[call[name[mat]][constant[RELEASE]]][constant[annolist]]][tuple[[<ast.Constant object at 0x7da1b0045660>, <ast.Constant object at 0x7da1b0045810>]]]][constant[0]], call[call[call[call[name[mat]][constant[RELEASE]]][constant[img_train]]][tuple[[<ast.Constant object at 0x7da1b0046470>, <ast.Constant object at 0x7da1b00464a0>]]]][constant[0]]]]]]] begin[:]
variable[img_fn] assign[=] call[call[call[call[name[anno]][constant[image]]][constant[name]]][tuple[[<ast.Constant object at 0x7da1b00467a0>, <ast.Constant object at 0x7da1b00468f0>]]]][constant[0]]
variable[train_flag] assign[=] call[name[int], parameter[name[train_flag]]]
if name[train_flag] begin[:]
call[name[img_train_list].append, parameter[name[img_fn]]]
call[name[ann_train_list].append, parameter[list[[]]]]
variable[head_rect] assign[=] list[[]]
if compare[constant[x1] in call[name[str], parameter[call[name[anno]][constant[annorect]].dtype]]] begin[:]
variable[head_rect] assign[=] call[name[zip], parameter[<ast.ListComp object at 0x7da1b0047850>, <ast.ListComp object at 0x7da1b0047d90>, <ast.ListComp object at 0x7da1b0047160>, <ast.ListComp object at 0x7da1b0046e00>]]
if compare[constant[annopoints] in call[name[str], parameter[call[name[anno]][constant[annorect]].dtype]]] begin[:]
variable[annopoints] assign[=] call[call[call[name[anno]][constant[annorect]]][constant[annopoints]]][constant[0]]
variable[head_x1s] assign[=] call[call[call[name[anno]][constant[annorect]]][constant[x1]]][constant[0]]
variable[head_y1s] assign[=] call[call[call[name[anno]][constant[annorect]]][constant[y1]]][constant[0]]
variable[head_x2s] assign[=] call[call[call[name[anno]][constant[annorect]]][constant[x2]]][constant[0]]
variable[head_y2s] assign[=] call[call[call[name[anno]][constant[annorect]]][constant[y2]]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da1b000bf10>, <ast.Name object at 0x7da1b000ba60>, <ast.Name object at 0x7da1b0009450>, <ast.Name object at 0x7da1b000a410>, <ast.Name object at 0x7da1b0008400>]]] in starred[call[name[zip], parameter[name[annopoints], name[head_x1s], name[head_y1s], name[head_x2s], name[head_y2s]]]] begin[:]
if name[annopoint].size begin[:]
variable[head_rect] assign[=] list[[<ast.Call object at 0x7da1b0009120>, <ast.Call object at 0x7da1b0008be0>, <ast.Call object at 0x7da1b000b280>, <ast.Call object at 0x7da1b0008e20>]]
variable[annopoint] assign[=] call[call[name[annopoint]][constant[point]]][tuple[[<ast.Constant object at 0x7da1b00083d0>, <ast.Constant object at 0x7da1b00086d0>]]]
variable[j_id] assign[=] <ast.ListComp object at 0x7da1b00085b0>
variable[x] assign[=] <ast.ListComp object at 0x7da1b0009870>
variable[y] assign[=] <ast.ListComp object at 0x7da1b00083a0>
variable[joint_pos] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0009030>, <ast.Tuple object at 0x7da1b000b610>]]] in starred[call[name[zip], parameter[name[j_id], call[name[zip], parameter[name[x], name[y]]]]]] begin[:]
call[name[joint_pos]][call[name[int], parameter[name[_j_id]]]] assign[=] list[[<ast.Call object at 0x7da1b000b700>, <ast.Call object at 0x7da1b000add0>]]
if compare[constant[is_visible] in call[name[str], parameter[name[annopoint].dtype]]] begin[:]
variable[vis] assign[=] <ast.ListComp object at 0x7da1b000b430>
variable[vis] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b000a230>]]
if <ast.BoolOp object at 0x7da1b000ba00> begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b000ece0>, <ast.Constant object at 0x7da1b000f610>, <ast.Constant object at 0x7da1b000efe0>, <ast.Constant object at 0x7da1b000eef0>, <ast.Constant object at 0x7da1b000e800>], [<ast.Name object at 0x7da1b000eb00>, <ast.Name object at 0x7da1b000edd0>, <ast.Name object at 0x7da1b000c910>, <ast.Name object at 0x7da1b000cbe0>, <ast.Name object at 0x7da1b000c970>]]
if name[train_flag] begin[:]
call[call[name[ann_train_list]][<ast.UnaryOp object at 0x7da1b000cd00>].append, parameter[name[data]]]
call[name[save_joints], parameter[]]
call[name[logging].info, parameter[constant[reading images list ...]]]
variable[img_dir] assign[=] call[name[os].path.join, parameter[name[path], name[extracted_filename2]]]
variable[_img_list] assign[=] call[name[load_file_list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b000dd20>, <ast.Name object at 0x7da1b000ead0>]]] in starred[call[name[enumerate], parameter[name[img_train_list]]]] begin[:]
if compare[name[im] <ast.NotIn object at 0x7da2590d7190> name[_img_list]] begin[:]
call[name[print], parameter[call[constant[missing training image {} in {} (remove from img(ann)_train_list)].format, parameter[name[im], name[img_dir]]]]]
<ast.Delete object at 0x7da1b000fe20>
<ast.Delete object at 0x7da1b000dcc0>
for taget[tuple[[<ast.Name object at 0x7da1b000f2b0>, <ast.Name object at 0x7da1b000eb90>]]] in starred[call[name[enumerate], parameter[name[img_test_list]]]] begin[:]
if compare[name[im] <ast.NotIn object at 0x7da2590d7190> name[_img_list]] begin[:]
call[name[print], parameter[call[constant[missing testing image {} in {} (remove from img(ann)_test_list)].format, parameter[name[im], name[img_dir]]]]]
<ast.Delete object at 0x7da1b000d390>
<ast.Delete object at 0x7da1b000c850>
variable[n_train_images] assign[=] call[name[len], parameter[name[img_train_list]]]
variable[n_test_images] assign[=] call[name[len], parameter[name[img_test_list]]]
variable[n_images] assign[=] binary_operation[name[n_train_images] + name[n_test_images]]
call[name[logging].info, parameter[call[constant[n_images: {} n_train_images: {} n_test_images: {}].format, parameter[name[n_images], name[n_train_images], name[n_test_images]]]]]
variable[n_train_ann] assign[=] call[name[len], parameter[name[ann_train_list]]]
variable[n_test_ann] assign[=] call[name[len], parameter[name[ann_test_list]]]
variable[n_ann] assign[=] binary_operation[name[n_train_ann] + name[n_test_ann]]
call[name[logging].info, parameter[call[constant[n_ann: {} n_train_ann: {} n_test_ann: {}].format, parameter[name[n_ann], name[n_train_ann], name[n_test_ann]]]]]
variable[n_train_people] assign[=] call[name[len], parameter[call[name[sum], parameter[name[ann_train_list], list[[]]]]]]
variable[n_test_people] assign[=] call[name[len], parameter[call[name[sum], parameter[name[ann_test_list], list[[]]]]]]
variable[n_people] assign[=] binary_operation[name[n_train_people] + name[n_test_people]]
call[name[logging].info, parameter[call[constant[n_people: {} n_train_people: {} n_test_people: {}].format, parameter[name[n_people], name[n_train_people], name[n_test_people]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b02b5c30>, <ast.Name object at 0x7da1b02b7070>]]] in starred[call[name[enumerate], parameter[name[img_train_list]]]] begin[:]
call[name[img_train_list]][name[i]] assign[=] call[name[os].path.join, parameter[name[img_dir], name[value]]]
for taget[tuple[[<ast.Name object at 0x7da1b02b61a0>, <ast.Name object at 0x7da1b02b4f70>]]] in starred[call[name[enumerate], parameter[name[img_test_list]]]] begin[:]
call[name[img_test_list]][name[i]] assign[=] call[name[os].path.join, parameter[name[img_dir], name[value]]]
return[tuple[[<ast.Name object at 0x7da1b02b51b0>, <ast.Name object at 0x7da1b02b7e20>, <ast.Name object at 0x7da1b02b6890>, <ast.Name object at 0x7da1b02b4be0>]]] | keyword[def] identifier[load_mpii_pose_dataset] ( identifier[path] = literal[string] , identifier[is_16_pos_only] = keyword[False] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] )
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[url] = literal[string]
identifier[tar_filename] = literal[string]
identifier[extracted_filename] = literal[string]
keyword[if] identifier[folder_exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[extracted_filename] )) keyword[is] keyword[False] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[extracted_filename] , identifier[path] ))
identifier[maybe_download_and_extract] ( identifier[tar_filename] , identifier[path] , identifier[url] , identifier[extract] = keyword[True] )
identifier[del_file] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[tar_filename] ))
identifier[url] = literal[string]
identifier[tar_filename] = literal[string]
identifier[extracted_filename2] = literal[string]
keyword[if] identifier[folder_exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[extracted_filename2] )) keyword[is] keyword[False] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[extracted_filename] , identifier[path] ))
identifier[maybe_download_and_extract] ( identifier[tar_filename] , identifier[path] , identifier[url] , identifier[extract] = keyword[True] )
identifier[del_file] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[tar_filename] ))
keyword[import] identifier[scipy] . identifier[io] keyword[as] identifier[sio]
identifier[logging] . identifier[info] ( literal[string] )
identifier[ann_train_list] =[]
identifier[ann_test_list] =[]
identifier[img_train_list] =[]
identifier[img_test_list] =[]
keyword[def] identifier[save_joints] ():
identifier[mat] = identifier[sio] . identifier[loadmat] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[extracted_filename] , literal[string] ))
keyword[for] identifier[_] ,( identifier[anno] , identifier[train_flag] ) keyword[in] identifier[enumerate] (
identifier[zip] ( identifier[mat] [ literal[string] ][ literal[string] ][ literal[int] , literal[int] ][ literal[int] ], identifier[mat] [ literal[string] ][ literal[string] ][ literal[int] , literal[int] ][ literal[int] ])):
identifier[img_fn] = identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] , literal[int] ][ literal[int] ]
identifier[train_flag] = identifier[int] ( identifier[train_flag] )
keyword[if] identifier[train_flag] :
identifier[img_train_list] . identifier[append] ( identifier[img_fn] )
identifier[ann_train_list] . identifier[append] ([])
keyword[else] :
identifier[img_test_list] . identifier[append] ( identifier[img_fn] )
identifier[ann_test_list] . identifier[append] ([])
identifier[head_rect] =[]
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[anno] [ literal[string] ]. identifier[dtype] ):
identifier[head_rect] = identifier[zip] (
[ identifier[x1] [ literal[int] , literal[int] ] keyword[for] identifier[x1] keyword[in] identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]],[ identifier[y1] [ literal[int] , literal[int] ] keyword[for] identifier[y1] keyword[in] identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]],
[ identifier[x2] [ literal[int] , literal[int] ] keyword[for] identifier[x2] keyword[in] identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]],[ identifier[y2] [ literal[int] , literal[int] ] keyword[for] identifier[y2] keyword[in] identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]]
)
keyword[else] :
identifier[head_rect] =[]
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[anno] [ literal[string] ]. identifier[dtype] ):
identifier[annopoints] = identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[head_x1s] = identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[head_y1s] = identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[head_x2s] = identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[head_y2s] = identifier[anno] [ literal[string] ][ literal[string] ][ literal[int] ]
keyword[for] identifier[annopoint] , identifier[head_x1] , identifier[head_y1] , identifier[head_x2] , identifier[head_y2] keyword[in] identifier[zip] ( identifier[annopoints] , identifier[head_x1s] , identifier[head_y1s] , identifier[head_x2s] ,
identifier[head_y2s] ):
keyword[if] identifier[annopoint] . identifier[size] :
identifier[head_rect] =[
identifier[float] ( identifier[head_x1] [ literal[int] , literal[int] ]),
identifier[float] ( identifier[head_y1] [ literal[int] , literal[int] ]),
identifier[float] ( identifier[head_x2] [ literal[int] , literal[int] ]),
identifier[float] ( identifier[head_y2] [ literal[int] , literal[int] ])
]
identifier[annopoint] = identifier[annopoint] [ literal[string] ][ literal[int] , literal[int] ]
identifier[j_id] =[ identifier[str] ( identifier[j_i] [ literal[int] , literal[int] ]) keyword[for] identifier[j_i] keyword[in] identifier[annopoint] [ literal[string] ][ literal[int] ]]
identifier[x] =[ identifier[x] [ literal[int] , literal[int] ] keyword[for] identifier[x] keyword[in] identifier[annopoint] [ literal[string] ][ literal[int] ]]
identifier[y] =[ identifier[y] [ literal[int] , literal[int] ] keyword[for] identifier[y] keyword[in] identifier[annopoint] [ literal[string] ][ literal[int] ]]
identifier[joint_pos] ={}
keyword[for] identifier[_j_id] ,( identifier[_x] , identifier[_y] ) keyword[in] identifier[zip] ( identifier[j_id] , identifier[zip] ( identifier[x] , identifier[y] )):
identifier[joint_pos] [ identifier[int] ( identifier[_j_id] )]=[ identifier[float] ( identifier[_x] ), identifier[float] ( identifier[_y] )]
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[annopoint] . identifier[dtype] ):
identifier[vis] =[ identifier[v] [ literal[int] ] keyword[if] identifier[v] . identifier[size] > literal[int] keyword[else] [ literal[int] ] keyword[for] identifier[v] keyword[in] identifier[annopoint] [ literal[string] ][ literal[int] ]]
identifier[vis] = identifier[dict] ([( identifier[k] , identifier[int] ( identifier[v] [ literal[int] ])) keyword[if] identifier[len] ( identifier[v] )> literal[int] keyword[else] identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[zip] ( identifier[j_id] , identifier[vis] )])
keyword[else] :
identifier[vis] = keyword[None]
keyword[if] (( identifier[is_16_pos_only] == keyword[True] ) keyword[and] ( identifier[len] ( identifier[joint_pos] )== literal[int] )) keyword[or] ( identifier[is_16_pos_only] == keyword[False] ):
identifier[data] ={
literal[string] : identifier[img_fn] ,
literal[string] : identifier[train_flag] ,
literal[string] : identifier[head_rect] ,
literal[string] : identifier[vis] ,
literal[string] : identifier[joint_pos]
}
keyword[if] identifier[train_flag] :
identifier[ann_train_list] [- literal[int] ]. identifier[append] ( identifier[data] )
keyword[else] :
identifier[ann_test_list] [- literal[int] ]. identifier[append] ( identifier[data] )
identifier[save_joints] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[img_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[extracted_filename2] )
identifier[_img_list] = identifier[load_file_list] ( identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[extracted_filename2] ), identifier[regx] = literal[string] , identifier[printable] = keyword[False] )
keyword[for] identifier[i] , identifier[im] keyword[in] identifier[enumerate] ( identifier[img_train_list] ):
keyword[if] identifier[im] keyword[not] keyword[in] identifier[_img_list] :
identifier[print] ( literal[string] . identifier[format] ( identifier[im] , identifier[img_dir] ))
keyword[del] identifier[img_train_list] [ identifier[i] ]
keyword[del] identifier[ann_train_list] [ identifier[i] ]
keyword[for] identifier[i] , identifier[im] keyword[in] identifier[enumerate] ( identifier[img_test_list] ):
keyword[if] identifier[im] keyword[not] keyword[in] identifier[_img_list] :
identifier[print] ( literal[string] . identifier[format] ( identifier[im] , identifier[img_dir] ))
keyword[del] identifier[img_train_list] [ identifier[i] ]
keyword[del] identifier[ann_train_list] [ identifier[i] ]
identifier[n_train_images] = identifier[len] ( identifier[img_train_list] )
identifier[n_test_images] = identifier[len] ( identifier[img_test_list] )
identifier[n_images] = identifier[n_train_images] + identifier[n_test_images]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[n_images] , identifier[n_train_images] , identifier[n_test_images] ))
identifier[n_train_ann] = identifier[len] ( identifier[ann_train_list] )
identifier[n_test_ann] = identifier[len] ( identifier[ann_test_list] )
identifier[n_ann] = identifier[n_train_ann] + identifier[n_test_ann]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[n_ann] , identifier[n_train_ann] , identifier[n_test_ann] ))
identifier[n_train_people] = identifier[len] ( identifier[sum] ( identifier[ann_train_list] ,[]))
identifier[n_test_people] = identifier[len] ( identifier[sum] ( identifier[ann_test_list] ,[]))
identifier[n_people] = identifier[n_train_people] + identifier[n_test_people]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[n_people] , identifier[n_train_people] , identifier[n_test_people] ))
keyword[for] identifier[i] , identifier[value] keyword[in] identifier[enumerate] ( identifier[img_train_list] ):
identifier[img_train_list] [ identifier[i] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[img_dir] , identifier[value] )
keyword[for] identifier[i] , identifier[value] keyword[in] identifier[enumerate] ( identifier[img_test_list] ):
identifier[img_test_list] [ identifier[i] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[img_dir] , identifier[value] )
keyword[return] identifier[img_train_list] , identifier[ann_train_list] , identifier[img_test_list] , identifier[ann_test_list] | def load_mpii_pose_dataset(path='data', is_16_pos_only=False):
"""Load MPII Human Pose Dataset.
Parameters
-----------
path : str
The path that the data is downloaded to.
is_16_pos_only : boolean
If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)
Returns
----------
img_train_list : list of str
The image directories of training data.
ann_train_list : list of dict
The annotations of training data.
img_test_list : list of str
The image directories of testing data.
ann_test_list : list of dict
The annotations of testing data.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__
- `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__
- `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
"""
path = os.path.join(path, 'mpii_human_pose')
logging.info('Load or Download MPII Human Pose > {}'.format(path))
# annotation
url = 'http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/'
tar_filename = 'mpii_human_pose_v1_u12_2.zip'
extracted_filename = 'mpii_human_pose_v1_u12_2'
if folder_exists(os.path.join(path, extracted_filename)) is False:
logging.info('[MPII] (annotation) {} is nonexistent in {}'.format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename)) # depends on [control=['if'], data=[]]
# images
url = 'http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/'
tar_filename = 'mpii_human_pose_v1.tar.gz'
extracted_filename2 = 'images'
if folder_exists(os.path.join(path, extracted_filename2)) is False:
logging.info('[MPII] (images) {} is nonexistent in {}'.format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename)) # depends on [control=['if'], data=[]]
# parse annotation, format see http://human-pose.mpi-inf.mpg.de/#download
import scipy.io as sio
logging.info('reading annotations from mat file ...')
# mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))
# def fix_wrong_joints(joint): # https://github.com/mitmul/deeppose/blob/master/datasets/mpii_dataset.py
# if '12' in joint and '13' in joint and '2' in joint and '3' in joint:
# if ((joint['12'][0] < joint['13'][0]) and
# (joint['3'][0] < joint['2'][0])):
# joint['2'], joint['3'] = joint['3'], joint['2']
# if ((joint['12'][0] > joint['13'][0]) and
# (joint['3'][0] > joint['2'][0])):
# joint['2'], joint['3'] = joint['3'], joint['2']
# return joint
ann_train_list = []
ann_test_list = []
img_train_list = []
img_test_list = []
def save_joints():
# joint_data_fn = os.path.join(path, 'data.json')
# fp = open(joint_data_fn, 'w')
mat = sio.loadmat(os.path.join(path, extracted_filename, 'mpii_human_pose_v1_u12_1.mat'))
for (_, (anno, train_flag)) in enumerate(zip(mat['RELEASE']['annolist'][0, 0][0], mat['RELEASE']['img_train'][0, 0][0])): # all images
img_fn = anno['image']['name'][0, 0][0]
train_flag = int(train_flag)
# print(i, img_fn, train_flag) # DEBUG print all images
if train_flag:
img_train_list.append(img_fn)
ann_train_list.append([]) # depends on [control=['if'], data=[]]
else:
img_test_list.append(img_fn)
ann_test_list.append([])
head_rect = []
if 'x1' in str(anno['annorect'].dtype):
head_rect = zip([x1[0, 0] for x1 in anno['annorect']['x1'][0]], [y1[0, 0] for y1 in anno['annorect']['y1'][0]], [x2[0, 0] for x2 in anno['annorect']['x2'][0]], [y2[0, 0] for y2 in anno['annorect']['y2'][0]]) # depends on [control=['if'], data=[]]
else:
head_rect = [] # TODO
if 'annopoints' in str(anno['annorect'].dtype):
annopoints = anno['annorect']['annopoints'][0]
head_x1s = anno['annorect']['x1'][0]
head_y1s = anno['annorect']['y1'][0]
head_x2s = anno['annorect']['x2'][0]
head_y2s = anno['annorect']['y2'][0]
for (annopoint, head_x1, head_y1, head_x2, head_y2) in zip(annopoints, head_x1s, head_y1s, head_x2s, head_y2s):
# if annopoint != []:
# if len(annopoint) != 0:
if annopoint.size:
head_rect = [float(head_x1[0, 0]), float(head_y1[0, 0]), float(head_x2[0, 0]), float(head_y2[0, 0])]
# joint coordinates
annopoint = annopoint['point'][0, 0]
j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
x = [x[0, 0] for x in annopoint['x'][0]]
y = [y[0, 0] for y in annopoint['y'][0]]
joint_pos = {}
for (_j_id, (_x, _y)) in zip(j_id, zip(x, y)):
joint_pos[int(_j_id)] = [float(_x), float(_y)] # depends on [control=['for'], data=[]]
# joint_pos = fix_wrong_joints(joint_pos)
# visibility list
if 'is_visible' in str(annopoint.dtype):
vis = [v[0] if v.size > 0 else [0] for v in annopoint['is_visible'][0]]
vis = dict([(k, int(v[0])) if len(v) > 0 else v for (k, v) in zip(j_id, vis)]) # depends on [control=['if'], data=[]]
else:
vis = None
# if len(joint_pos) == 16:
if is_16_pos_only == True and len(joint_pos) == 16 or is_16_pos_only == False:
# only use image with 16 key points / or use all
data = {'filename': img_fn, 'train': train_flag, 'head_rect': head_rect, 'is_visible': vis, 'joint_pos': joint_pos}
# print(json.dumps(data), file=fp) # py3
if train_flag:
ann_train_list[-1].append(data) # depends on [control=['if'], data=[]]
else:
ann_test_list[-1].append(data) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# def write_line(datum, fp):
# joints = sorted([[int(k), v] for k, v in datum['joint_pos'].items()])
# joints = np.array([j for i, j in joints]).flatten()
#
# out = [datum['filename']]
# out.extend(joints)
# out = [str(o) for o in out]
# out = ','.join(out)
#
# print(out, file=fp)
# def split_train_test():
# # fp_test = open('data/mpii/test_joints.csv', 'w')
# fp_test = open(os.path.join(path, 'test_joints.csv'), 'w')
# # fp_train = open('data/mpii/train_joints.csv', 'w')
# fp_train = open(os.path.join(path, 'train_joints.csv'), 'w')
# # all_data = open('data/mpii/data.json').readlines()
# all_data = open(os.path.join(path, 'data.json')).readlines()
# N = len(all_data)
# N_test = int(N * 0.1)
# N_train = N - N_test
#
# print('N:{}'.format(N))
# print('N_train:{}'.format(N_train))
# print('N_test:{}'.format(N_test))
#
# np.random.seed(1701)
# perm = np.random.permutation(N)
# test_indices = perm[:N_test]
# train_indices = perm[N_test:]
#
# print('train_indices:{}'.format(len(train_indices)))
# print('test_indices:{}'.format(len(test_indices)))
#
# for i in train_indices:
# datum = json.loads(all_data[i].strip())
# write_line(datum, fp_train)
#
# for i in test_indices:
# datum = json.loads(all_data[i].strip())
# write_line(datum, fp_test)
save_joints()
# split_train_test() #
## read images dir
logging.info('reading images list ...')
img_dir = os.path.join(path, extracted_filename2)
_img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='\\.jpg', printable=False)
# ann_list = json.load(open(os.path.join(path, 'data.json')))
for (i, im) in enumerate(img_train_list):
if im not in _img_list:
print('missing training image {} in {} (remove from img(ann)_train_list)'.format(im, img_dir))
# img_train_list.remove(im)
del img_train_list[i]
del ann_train_list[i] # depends on [control=['if'], data=['im']] # depends on [control=['for'], data=[]]
for (i, im) in enumerate(img_test_list):
if im not in _img_list:
print('missing testing image {} in {} (remove from img(ann)_test_list)'.format(im, img_dir))
# img_test_list.remove(im)
del img_train_list[i]
del ann_train_list[i] # depends on [control=['if'], data=['im']] # depends on [control=['for'], data=[]]
## check annotation and images
n_train_images = len(img_train_list)
n_test_images = len(img_test_list)
n_images = n_train_images + n_test_images
logging.info('n_images: {} n_train_images: {} n_test_images: {}'.format(n_images, n_train_images, n_test_images))
n_train_ann = len(ann_train_list)
n_test_ann = len(ann_test_list)
n_ann = n_train_ann + n_test_ann
logging.info('n_ann: {} n_train_ann: {} n_test_ann: {}'.format(n_ann, n_train_ann, n_test_ann))
n_train_people = len(sum(ann_train_list, []))
n_test_people = len(sum(ann_test_list, []))
n_people = n_train_people + n_test_people
logging.info('n_people: {} n_train_people: {} n_test_people: {}'.format(n_people, n_train_people, n_test_people))
# add path to all image file name
for (i, value) in enumerate(img_train_list):
img_train_list[i] = os.path.join(img_dir, value) # depends on [control=['for'], data=[]]
for (i, value) in enumerate(img_test_list):
img_test_list[i] = os.path.join(img_dir, value) # depends on [control=['for'], data=[]]
return (img_train_list, ann_train_list, img_test_list, ann_test_list) |
def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to compute
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None,
e[2], e[3], None)
for e in extents]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for lidx, uidx in inds:
lower = [v for v in arr[lidx] if v is not None and not is_nan(v)]
upper = [v for v in arr[uidx] if v is not None and not is_nan(v)]
if lower and isinstance(lower[0], datetime_types):
extents[lidx] = np.min(lower)
elif any(isinstance(l, basestring) for l in lower):
extents[lidx] = np.sort(lower)[0]
elif lower:
extents[lidx] = np.nanmin(lower)
if upper and isinstance(upper[0], datetime_types):
extents[uidx] = np.max(upper)
elif any(isinstance(u, basestring) for u in upper):
extents[uidx] = np.sort(upper)[-1]
elif upper:
extents[uidx] = np.nanmax(upper)
return tuple(extents) | def function[max_extents, parameter[extents, zrange]]:
constant[
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to compute
x-, y- and z-limits.
]
if name[zrange] begin[:]
variable[num] assign[=] constant[6]
variable[inds] assign[=] list[[<ast.Tuple object at 0x7da18f58f8b0>, <ast.Tuple object at 0x7da18f58f0d0>, <ast.Tuple object at 0x7da18f58df00>]]
variable[extents] assign[=] <ast.ListComp object at 0x7da18f58f700>
variable[arr] assign[=] <ast.IfExp object at 0x7da18f58d960>
variable[extents] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da18f58ca90>]] * name[num]]
if compare[call[name[len], parameter[name[arr]]] equal[==] constant[0]] begin[:]
return[name[extents]]
with call[name[warnings].catch_warnings, parameter[]] begin[:]
call[name[warnings].filterwarnings, parameter[constant[ignore], constant[All-NaN (slice|axis) encountered]]]
for taget[tuple[[<ast.Name object at 0x7da18f58faf0>, <ast.Name object at 0x7da18f58e020>]]] in starred[name[inds]] begin[:]
variable[lower] assign[=] <ast.ListComp object at 0x7da18f58ff70>
variable[upper] assign[=] <ast.ListComp object at 0x7da18f58c5b0>
if <ast.BoolOp object at 0x7da18f58eb90> begin[:]
call[name[extents]][name[lidx]] assign[=] call[name[np].min, parameter[name[lower]]]
if <ast.BoolOp object at 0x7da204565270> begin[:]
call[name[extents]][name[uidx]] assign[=] call[name[np].max, parameter[name[upper]]]
return[call[name[tuple], parameter[name[extents]]]] | keyword[def] identifier[max_extents] ( identifier[extents] , identifier[zrange] = keyword[False] ):
literal[string]
keyword[if] identifier[zrange] :
identifier[num] = literal[int]
identifier[inds] =[( literal[int] , literal[int] ),( literal[int] , literal[int] ),( literal[int] , literal[int] )]
identifier[extents] =[ identifier[e] keyword[if] identifier[len] ( identifier[e] )== literal[int] keyword[else] ( identifier[e] [ literal[int] ], identifier[e] [ literal[int] ], keyword[None] ,
identifier[e] [ literal[int] ], identifier[e] [ literal[int] ], keyword[None] )
keyword[for] identifier[e] keyword[in] identifier[extents] ]
keyword[else] :
identifier[num] = literal[int]
identifier[inds] =[( literal[int] , literal[int] ),( literal[int] , literal[int] )]
identifier[arr] = identifier[list] ( identifier[zip] (* identifier[extents] )) keyword[if] identifier[extents] keyword[else] []
identifier[extents] =[ identifier[np] . identifier[NaN] ]* identifier[num]
keyword[if] identifier[len] ( identifier[arr] )== literal[int] :
keyword[return] identifier[extents]
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[filterwarnings] ( literal[string] , literal[string] )
keyword[for] identifier[lidx] , identifier[uidx] keyword[in] identifier[inds] :
identifier[lower] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[arr] [ identifier[lidx] ] keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[is_nan] ( identifier[v] )]
identifier[upper] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[arr] [ identifier[uidx] ] keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[is_nan] ( identifier[v] )]
keyword[if] identifier[lower] keyword[and] identifier[isinstance] ( identifier[lower] [ literal[int] ], identifier[datetime_types] ):
identifier[extents] [ identifier[lidx] ]= identifier[np] . identifier[min] ( identifier[lower] )
keyword[elif] identifier[any] ( identifier[isinstance] ( identifier[l] , identifier[basestring] ) keyword[for] identifier[l] keyword[in] identifier[lower] ):
identifier[extents] [ identifier[lidx] ]= identifier[np] . identifier[sort] ( identifier[lower] )[ literal[int] ]
keyword[elif] identifier[lower] :
identifier[extents] [ identifier[lidx] ]= identifier[np] . identifier[nanmin] ( identifier[lower] )
keyword[if] identifier[upper] keyword[and] identifier[isinstance] ( identifier[upper] [ literal[int] ], identifier[datetime_types] ):
identifier[extents] [ identifier[uidx] ]= identifier[np] . identifier[max] ( identifier[upper] )
keyword[elif] identifier[any] ( identifier[isinstance] ( identifier[u] , identifier[basestring] ) keyword[for] identifier[u] keyword[in] identifier[upper] ):
identifier[extents] [ identifier[uidx] ]= identifier[np] . identifier[sort] ( identifier[upper] )[- literal[int] ]
keyword[elif] identifier[upper] :
identifier[extents] [ identifier[uidx] ]= identifier[np] . identifier[nanmax] ( identifier[upper] )
keyword[return] identifier[tuple] ( identifier[extents] ) | def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to compute
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None, e[2], e[3], None) for e in extents] # depends on [control=['if'], data=[]]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents # depends on [control=['if'], data=[]]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN (slice|axis) encountered')
for (lidx, uidx) in inds:
lower = [v for v in arr[lidx] if v is not None and (not is_nan(v))]
upper = [v for v in arr[uidx] if v is not None and (not is_nan(v))]
if lower and isinstance(lower[0], datetime_types):
extents[lidx] = np.min(lower) # depends on [control=['if'], data=[]]
elif any((isinstance(l, basestring) for l in lower)):
extents[lidx] = np.sort(lower)[0] # depends on [control=['if'], data=[]]
elif lower:
extents[lidx] = np.nanmin(lower) # depends on [control=['if'], data=[]]
if upper and isinstance(upper[0], datetime_types):
extents[uidx] = np.max(upper) # depends on [control=['if'], data=[]]
elif any((isinstance(u, basestring) for u in upper)):
extents[uidx] = np.sort(upper)[-1] # depends on [control=['if'], data=[]]
elif upper:
extents[uidx] = np.nanmax(upper) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
return tuple(extents) |
def list_container_object_names(self, container, limit=None, marker=None,
prefix=None, delimiter=None, full_listing=False):
"""
Returns the names of all the objects in the specified container,
optionally limited by the pagination parameters.
"""
return self._manager.list_object_names(container, marker=marker,
limit=limit, prefix=prefix, delimiter=delimiter,
full_listing=full_listing) | def function[list_container_object_names, parameter[self, container, limit, marker, prefix, delimiter, full_listing]]:
constant[
Returns the names of all the objects in the specified container,
optionally limited by the pagination parameters.
]
return[call[name[self]._manager.list_object_names, parameter[name[container]]]] | keyword[def] identifier[list_container_object_names] ( identifier[self] , identifier[container] , identifier[limit] = keyword[None] , identifier[marker] = keyword[None] ,
identifier[prefix] = keyword[None] , identifier[delimiter] = keyword[None] , identifier[full_listing] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_manager] . identifier[list_object_names] ( identifier[container] , identifier[marker] = identifier[marker] ,
identifier[limit] = identifier[limit] , identifier[prefix] = identifier[prefix] , identifier[delimiter] = identifier[delimiter] ,
identifier[full_listing] = identifier[full_listing] ) | def list_container_object_names(self, container, limit=None, marker=None, prefix=None, delimiter=None, full_listing=False):
"""
Returns the names of all the objects in the specified container,
optionally limited by the pagination parameters.
"""
return self._manager.list_object_names(container, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, full_listing=full_listing) |
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs | def function[targeted_dropout, parameter[inputs, k, keep_prob, targeting_fn, is_training, do_prune]]:
constant[Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
]
if <ast.BoolOp object at 0x7da1b205a2f0> begin[:]
variable[k] assign[=] call[name[tf].round, parameter[binary_operation[call[name[to_float], parameter[name[k]]] * call[name[to_float], parameter[binary_operation[constant[1.0] - name[keep_prob]]]]]]]
variable[mask] assign[=] call[name[targeting_fn], parameter[name[inputs], name[k]]]
variable[mask] assign[=] call[name[tf].cast, parameter[name[mask], name[inputs].dtype]]
if name[is_training] begin[:]
return[binary_operation[binary_operation[name[inputs] * binary_operation[constant[1] - name[mask]]] + binary_operation[call[name[tf].nn.dropout, parameter[name[inputs], name[keep_prob]]] * name[mask]]]] | keyword[def] identifier[targeted_dropout] ( identifier[inputs] ,
identifier[k] ,
identifier[keep_prob] ,
identifier[targeting_fn] ,
identifier[is_training] ,
identifier[do_prune] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[is_training] keyword[and] identifier[do_prune] :
identifier[k] = identifier[tf] . identifier[round] ( identifier[to_float] ( identifier[k] )* identifier[to_float] ( literal[int] - identifier[keep_prob] ))
identifier[mask] = identifier[targeting_fn] ( identifier[inputs] , identifier[k] )
identifier[mask] = identifier[tf] . identifier[cast] ( identifier[mask] , identifier[inputs] . identifier[dtype] )
keyword[if] identifier[is_training] :
keyword[return] identifier[inputs] *( literal[int] - identifier[mask] )+ identifier[tf] . identifier[nn] . identifier[dropout] ( identifier[inputs] , identifier[keep_prob] )* identifier[mask]
keyword[elif] identifier[do_prune] :
keyword[return] identifier[inputs] *( literal[int] - identifier[mask] )
keyword[else] :
keyword[return] identifier[inputs] | def targeted_dropout(inputs, k, keep_prob, targeting_fn, is_training, do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1.0 - keep_prob)) # depends on [control=['if'], data=[]]
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask # depends on [control=['if'], data=[]]
elif do_prune:
return inputs * (1 - mask) # depends on [control=['if'], data=[]]
else:
return inputs |
def theme_lookup(self):
"""
Returns theme based on site
Returns None and sets settings.THEME if able to find a theme object by site.
Otherwise, returns False.
"""
# check cache
cache_key = 'theme:%s' % self.domain_unsplit
theme = cache.get(cache_key)
if theme:
THEME.value = theme
return None
# check database
if hasattr(self.site, 'themes'):
try:
themes = [theme.name for theme in self.site.themes.all()]
THEME.value = themes[0]
cache.set(cache_key, THEME.value, 5*60)
except:
return False
return None | def function[theme_lookup, parameter[self]]:
constant[
Returns theme based on site
Returns None and sets settings.THEME if able to find a theme object by site.
Otherwise, returns False.
]
variable[cache_key] assign[=] binary_operation[constant[theme:%s] <ast.Mod object at 0x7da2590d6920> name[self].domain_unsplit]
variable[theme] assign[=] call[name[cache].get, parameter[name[cache_key]]]
if name[theme] begin[:]
name[THEME].value assign[=] name[theme]
return[constant[None]]
if call[name[hasattr], parameter[name[self].site, constant[themes]]] begin[:]
<ast.Try object at 0x7da18f00d120>
return[constant[None]] | keyword[def] identifier[theme_lookup] ( identifier[self] ):
literal[string]
identifier[cache_key] = literal[string] % identifier[self] . identifier[domain_unsplit]
identifier[theme] = identifier[cache] . identifier[get] ( identifier[cache_key] )
keyword[if] identifier[theme] :
identifier[THEME] . identifier[value] = identifier[theme]
keyword[return] keyword[None]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[site] , literal[string] ):
keyword[try] :
identifier[themes] =[ identifier[theme] . identifier[name] keyword[for] identifier[theme] keyword[in] identifier[self] . identifier[site] . identifier[themes] . identifier[all] ()]
identifier[THEME] . identifier[value] = identifier[themes] [ literal[int] ]
identifier[cache] . identifier[set] ( identifier[cache_key] , identifier[THEME] . identifier[value] , literal[int] * literal[int] )
keyword[except] :
keyword[return] keyword[False]
keyword[return] keyword[None] | def theme_lookup(self):
"""
Returns theme based on site
Returns None and sets settings.THEME if able to find a theme object by site.
Otherwise, returns False.
"""
# check cache
cache_key = 'theme:%s' % self.domain_unsplit
theme = cache.get(cache_key)
if theme:
THEME.value = theme
return None # depends on [control=['if'], data=[]]
# check database
if hasattr(self.site, 'themes'):
try:
themes = [theme.name for theme in self.site.themes.all()]
THEME.value = themes[0]
cache.set(cache_key, THEME.value, 5 * 60) # depends on [control=['try'], data=[]]
except:
return False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return None |
def annotate(self, records, **kwargs):
"""Annotate a set of records with stored fields.
Args:
records: A list or iterator (can be a Query object)
chunk_size: The number of records to annotate at once (max 500).
Returns:
A generator that yields one annotated record at a time.
"""
# Update annotator_params with any kwargs
self.annotator_params.update(**kwargs)
chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE)
chunk = []
for i, record in enumerate(records):
chunk.append(record)
if (i + 1) % chunk_size == 0:
for r in self._execute(chunk):
yield r
chunk = []
if chunk:
for r in self._execute(chunk):
yield r
chunk = [] | def function[annotate, parameter[self, records]]:
constant[Annotate a set of records with stored fields.
Args:
records: A list or iterator (can be a Query object)
chunk_size: The number of records to annotate at once (max 500).
Returns:
A generator that yields one annotated record at a time.
]
call[name[self].annotator_params.update, parameter[]]
variable[chunk_size] assign[=] call[name[self].annotator_params.get, parameter[constant[chunk_size], name[self].CHUNK_SIZE]]
variable[chunk] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2347670>, <ast.Name object at 0x7da1b2347880>]]] in starred[call[name[enumerate], parameter[name[records]]]] begin[:]
call[name[chunk].append, parameter[name[record]]]
if compare[binary_operation[binary_operation[name[i] + constant[1]] <ast.Mod object at 0x7da2590d6920> name[chunk_size]] equal[==] constant[0]] begin[:]
for taget[name[r]] in starred[call[name[self]._execute, parameter[name[chunk]]]] begin[:]
<ast.Yield object at 0x7da1b2345420>
variable[chunk] assign[=] list[[]]
if name[chunk] begin[:]
for taget[name[r]] in starred[call[name[self]._execute, parameter[name[chunk]]]] begin[:]
<ast.Yield object at 0x7da1b2345fc0>
variable[chunk] assign[=] list[[]] | keyword[def] identifier[annotate] ( identifier[self] , identifier[records] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[annotator_params] . identifier[update] (** identifier[kwargs] )
identifier[chunk_size] = identifier[self] . identifier[annotator_params] . identifier[get] ( literal[string] , identifier[self] . identifier[CHUNK_SIZE] )
identifier[chunk] =[]
keyword[for] identifier[i] , identifier[record] keyword[in] identifier[enumerate] ( identifier[records] ):
identifier[chunk] . identifier[append] ( identifier[record] )
keyword[if] ( identifier[i] + literal[int] )% identifier[chunk_size] == literal[int] :
keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_execute] ( identifier[chunk] ):
keyword[yield] identifier[r]
identifier[chunk] =[]
keyword[if] identifier[chunk] :
keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_execute] ( identifier[chunk] ):
keyword[yield] identifier[r]
identifier[chunk] =[] | def annotate(self, records, **kwargs):
"""Annotate a set of records with stored fields.
Args:
records: A list or iterator (can be a Query object)
chunk_size: The number of records to annotate at once (max 500).
Returns:
A generator that yields one annotated record at a time.
"""
# Update annotator_params with any kwargs
self.annotator_params.update(**kwargs)
chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE)
chunk = []
for (i, record) in enumerate(records):
chunk.append(record)
if (i + 1) % chunk_size == 0:
for r in self._execute(chunk):
yield r # depends on [control=['for'], data=['r']]
chunk = [] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if chunk:
for r in self._execute(chunk):
yield r # depends on [control=['for'], data=['r']]
chunk = [] # depends on [control=['if'], data=[]] |
def create_release_settings_action(target, source, env):
"""Copy module_settings.json and add release and build information
"""
with open(str(source[0]), "r") as fileobj:
settings = json.load(fileobj)
settings['release'] = True
settings['release_date'] = datetime.datetime.utcnow().isoformat()
settings['dependency_versions'] = {}
#Also insert the versions of every dependency that we used to build this component
for dep in env['TILE'].dependencies:
tile = IOTile(os.path.join('build', 'deps', dep['unique_id']))
settings['dependency_versions'][dep['unique_id']] = str(tile.parsed_version)
with open(str(target[0]), "w") as fileobj:
json.dump(settings, fileobj, indent=4) | def function[create_release_settings_action, parameter[target, source, env]]:
constant[Copy module_settings.json and add release and build information
]
with call[name[open], parameter[call[name[str], parameter[call[name[source]][constant[0]]]], constant[r]]] begin[:]
variable[settings] assign[=] call[name[json].load, parameter[name[fileobj]]]
call[name[settings]][constant[release]] assign[=] constant[True]
call[name[settings]][constant[release_date]] assign[=] call[call[name[datetime].datetime.utcnow, parameter[]].isoformat, parameter[]]
call[name[settings]][constant[dependency_versions]] assign[=] dictionary[[], []]
for taget[name[dep]] in starred[call[name[env]][constant[TILE]].dependencies] begin[:]
variable[tile] assign[=] call[name[IOTile], parameter[call[name[os].path.join, parameter[constant[build], constant[deps], call[name[dep]][constant[unique_id]]]]]]
call[call[name[settings]][constant[dependency_versions]]][call[name[dep]][constant[unique_id]]] assign[=] call[name[str], parameter[name[tile].parsed_version]]
with call[name[open], parameter[call[name[str], parameter[call[name[target]][constant[0]]]], constant[w]]] begin[:]
call[name[json].dump, parameter[name[settings], name[fileobj]]] | keyword[def] identifier[create_release_settings_action] ( identifier[target] , identifier[source] , identifier[env] ):
literal[string]
keyword[with] identifier[open] ( identifier[str] ( identifier[source] [ literal[int] ]), literal[string] ) keyword[as] identifier[fileobj] :
identifier[settings] = identifier[json] . identifier[load] ( identifier[fileobj] )
identifier[settings] [ literal[string] ]= keyword[True]
identifier[settings] [ literal[string] ]= identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[isoformat] ()
identifier[settings] [ literal[string] ]={}
keyword[for] identifier[dep] keyword[in] identifier[env] [ literal[string] ]. identifier[dependencies] :
identifier[tile] = identifier[IOTile] ( identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] , identifier[dep] [ literal[string] ]))
identifier[settings] [ literal[string] ][ identifier[dep] [ literal[string] ]]= identifier[str] ( identifier[tile] . identifier[parsed_version] )
keyword[with] identifier[open] ( identifier[str] ( identifier[target] [ literal[int] ]), literal[string] ) keyword[as] identifier[fileobj] :
identifier[json] . identifier[dump] ( identifier[settings] , identifier[fileobj] , identifier[indent] = literal[int] ) | def create_release_settings_action(target, source, env):
"""Copy module_settings.json and add release and build information
"""
with open(str(source[0]), 'r') as fileobj:
settings = json.load(fileobj) # depends on [control=['with'], data=['fileobj']]
settings['release'] = True
settings['release_date'] = datetime.datetime.utcnow().isoformat()
settings['dependency_versions'] = {}
#Also insert the versions of every dependency that we used to build this component
for dep in env['TILE'].dependencies:
tile = IOTile(os.path.join('build', 'deps', dep['unique_id']))
settings['dependency_versions'][dep['unique_id']] = str(tile.parsed_version) # depends on [control=['for'], data=['dep']]
with open(str(target[0]), 'w') as fileobj:
json.dump(settings, fileobj, indent=4) # depends on [control=['with'], data=['fileobj']] |
def from_moment_relative_to_crystal_axes(cls, moment, lattice):
"""
Obtaining a Magmom object from a magnetic moment provided
relative to crystal axes.
Used for obtaining moments from magCIF file.
:param magmom: list of floats specifying vector magmom
:param lattice: Lattice
:return: Magmom
"""
# get matrix representing unit lattice vectors
unit_m = lattice.matrix / np.linalg.norm(lattice.matrix, axis=1)[:, None]
moment = np.matmul(list(moment), unit_m)
# round small values to zero
moment[np.abs(moment) < 1e-8] = 0
return cls(moment) | def function[from_moment_relative_to_crystal_axes, parameter[cls, moment, lattice]]:
constant[
Obtaining a Magmom object from a magnetic moment provided
relative to crystal axes.
Used for obtaining moments from magCIF file.
:param magmom: list of floats specifying vector magmom
:param lattice: Lattice
:return: Magmom
]
variable[unit_m] assign[=] binary_operation[name[lattice].matrix / call[call[name[np].linalg.norm, parameter[name[lattice].matrix]]][tuple[[<ast.Slice object at 0x7da18f813ee0>, <ast.Constant object at 0x7da18f8106d0>]]]]
variable[moment] assign[=] call[name[np].matmul, parameter[call[name[list], parameter[name[moment]]], name[unit_m]]]
call[name[moment]][compare[call[name[np].abs, parameter[name[moment]]] less[<] constant[1e-08]]] assign[=] constant[0]
return[call[name[cls], parameter[name[moment]]]] | keyword[def] identifier[from_moment_relative_to_crystal_axes] ( identifier[cls] , identifier[moment] , identifier[lattice] ):
literal[string]
identifier[unit_m] = identifier[lattice] . identifier[matrix] / identifier[np] . identifier[linalg] . identifier[norm] ( identifier[lattice] . identifier[matrix] , identifier[axis] = literal[int] )[:, keyword[None] ]
identifier[moment] = identifier[np] . identifier[matmul] ( identifier[list] ( identifier[moment] ), identifier[unit_m] )
identifier[moment] [ identifier[np] . identifier[abs] ( identifier[moment] )< literal[int] ]= literal[int]
keyword[return] identifier[cls] ( identifier[moment] ) | def from_moment_relative_to_crystal_axes(cls, moment, lattice):
"""
Obtaining a Magmom object from a magnetic moment provided
relative to crystal axes.
Used for obtaining moments from magCIF file.
:param magmom: list of floats specifying vector magmom
:param lattice: Lattice
:return: Magmom
"""
# get matrix representing unit lattice vectors
unit_m = lattice.matrix / np.linalg.norm(lattice.matrix, axis=1)[:, None]
moment = np.matmul(list(moment), unit_m)
# round small values to zero
moment[np.abs(moment) < 1e-08] = 0
return cls(moment) |
def flatten_params(self, data, base_key=None):
""" Flatten out nested arrays and dicts in query params into correct format """
result = {}
if data is None:
return result
map_data = None
if not isinstance(data, collections.Mapping):
map_data = []
for idx, val in enumerate(data):
map_data.append([str(idx), val])
else:
map_data = list(data.items())
for key, value in map_data:
if not base_key is None:
key = base_key + "[" + key + "]"
if isinstance(value, basestring) or not hasattr(value, "__iter__"):
result[key] = value
else:
result.update(self.flatten_params(value, key))
return result | def function[flatten_params, parameter[self, data, base_key]]:
constant[ Flatten out nested arrays and dicts in query params into correct format ]
variable[result] assign[=] dictionary[[], []]
if compare[name[data] is constant[None]] begin[:]
return[name[result]]
variable[map_data] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da20c795030> begin[:]
variable[map_data] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c795180>, <ast.Name object at 0x7da20c795630>]]] in starred[call[name[enumerate], parameter[name[data]]]] begin[:]
call[name[map_data].append, parameter[list[[<ast.Call object at 0x7da20c795d50>, <ast.Name object at 0x7da20c794be0>]]]]
for taget[tuple[[<ast.Name object at 0x7da20c795d80>, <ast.Name object at 0x7da20c795bd0>]]] in starred[name[map_data]] begin[:]
if <ast.UnaryOp object at 0x7da20c794af0> begin[:]
variable[key] assign[=] binary_operation[binary_operation[binary_operation[name[base_key] + constant[[]] + name[key]] + constant[]]]
if <ast.BoolOp object at 0x7da20c794490> begin[:]
call[name[result]][name[key]] assign[=] name[value]
return[name[result]] | keyword[def] identifier[flatten_params] ( identifier[self] , identifier[data] , identifier[base_key] = keyword[None] ):
literal[string]
identifier[result] ={}
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] identifier[result]
identifier[map_data] = keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[collections] . identifier[Mapping] ):
identifier[map_data] =[]
keyword[for] identifier[idx] , identifier[val] keyword[in] identifier[enumerate] ( identifier[data] ):
identifier[map_data] . identifier[append] ([ identifier[str] ( identifier[idx] ), identifier[val] ])
keyword[else] :
identifier[map_data] = identifier[list] ( identifier[data] . identifier[items] ())
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[map_data] :
keyword[if] keyword[not] identifier[base_key] keyword[is] keyword[None] :
identifier[key] = identifier[base_key] + literal[string] + identifier[key] + literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[basestring] ) keyword[or] keyword[not] identifier[hasattr] ( identifier[value] , literal[string] ):
identifier[result] [ identifier[key] ]= identifier[value]
keyword[else] :
identifier[result] . identifier[update] ( identifier[self] . identifier[flatten_params] ( identifier[value] , identifier[key] ))
keyword[return] identifier[result] | def flatten_params(self, data, base_key=None):
""" Flatten out nested arrays and dicts in query params into correct format """
result = {}
if data is None:
return result # depends on [control=['if'], data=[]]
map_data = None
if not isinstance(data, collections.Mapping):
map_data = []
for (idx, val) in enumerate(data):
map_data.append([str(idx), val]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
map_data = list(data.items())
for (key, value) in map_data:
if not base_key is None:
key = base_key + '[' + key + ']' # depends on [control=['if'], data=[]]
if isinstance(value, basestring) or not hasattr(value, '__iter__'):
result[key] = value # depends on [control=['if'], data=[]]
else:
result.update(self.flatten_params(value, key)) # depends on [control=['for'], data=[]]
return result |
def get_actuators(self):
"""
Get actuators as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.actuators} | def function[get_actuators, parameter[self]]:
constant[
Get actuators as a dictionary of format ``{name: status}``
]
return[<ast.DictComp object at 0x7da1b255c5e0>] | keyword[def] identifier[get_actuators] ( identifier[self] ):
literal[string]
keyword[return] { identifier[i] . identifier[name] : identifier[i] . identifier[status] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[system] . identifier[actuators] } | def get_actuators(self):
"""
Get actuators as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.actuators} |
def metropolis_hastings_step(current_state: State,
proposed_state: State,
energy_change: FloatTensor,
seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]:
"""Metropolis-Hastings step.
This probabilistically chooses between `current_state` and `proposed_state`
based on the `energy_change` so as to preserve detailed balance.
Energy change is the negative of `log_accept_ratio`.
Args:
current_state: Current state.
proposed_state: Proposed state.
energy_change: E(proposed_state) - E(previous_state).
seed: For reproducibility.
Returns:
new_state: The chosen state.
is_accepted: Whether the proposed state was accepted.
log_uniform: The random number that was used to select between the two
states.
"""
flat_current = tf.nest.flatten(current_state)
flat_proposed = nest.flatten_up_to(current_state, proposed_state)
# Impute the None's in the current state.
flat_current = [
p if c is None else c for p, c in zip(flat_proposed, flat_current)
]
current_state = tf.nest.pack_sequence_as(current_state, flat_current)
current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state)
proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state)
energy_change = tf.convert_to_tensor(value=energy_change)
log_accept_ratio = -energy_change
log_uniform = tf.math.log(
tf.random.uniform(
shape=tf.shape(input=log_accept_ratio),
dtype=log_accept_ratio.dtype.base_dtype,
seed=seed))
is_accepted = log_uniform < log_accept_ratio
next_state = mcmc_util.choose(
is_accepted, proposed_state, current_state, name='choose_next_state')
return next_state, is_accepted, log_uniform | def function[metropolis_hastings_step, parameter[current_state, proposed_state, energy_change, seed]]:
constant[Metropolis-Hastings step.
This probabilistically chooses between `current_state` and `proposed_state`
based on the `energy_change` so as to preserve detailed balance.
Energy change is the negative of `log_accept_ratio`.
Args:
current_state: Current state.
proposed_state: Proposed state.
energy_change: E(proposed_state) - E(previous_state).
seed: For reproducibility.
Returns:
new_state: The chosen state.
is_accepted: Whether the proposed state was accepted.
log_uniform: The random number that was used to select between the two
states.
]
variable[flat_current] assign[=] call[name[tf].nest.flatten, parameter[name[current_state]]]
variable[flat_proposed] assign[=] call[name[nest].flatten_up_to, parameter[name[current_state], name[proposed_state]]]
variable[flat_current] assign[=] <ast.ListComp object at 0x7da1b0322c50>
variable[current_state] assign[=] call[name[tf].nest.pack_sequence_as, parameter[name[current_state], name[flat_current]]]
variable[current_state] assign[=] call[name[tf].nest.map_structure, parameter[name[tf].convert_to_tensor, name[current_state]]]
variable[proposed_state] assign[=] call[name[tf].nest.map_structure, parameter[name[tf].convert_to_tensor, name[proposed_state]]]
variable[energy_change] assign[=] call[name[tf].convert_to_tensor, parameter[]]
variable[log_accept_ratio] assign[=] <ast.UnaryOp object at 0x7da1b0320760>
variable[log_uniform] assign[=] call[name[tf].math.log, parameter[call[name[tf].random.uniform, parameter[]]]]
variable[is_accepted] assign[=] compare[name[log_uniform] less[<] name[log_accept_ratio]]
variable[next_state] assign[=] call[name[mcmc_util].choose, parameter[name[is_accepted], name[proposed_state], name[current_state]]]
return[tuple[[<ast.Name object at 0x7da1b0322110>, <ast.Name object at 0x7da1b0320bb0>, <ast.Name object at 0x7da1b0320fd0>]]] | keyword[def] identifier[metropolis_hastings_step] ( identifier[current_state] : identifier[State] ,
identifier[proposed_state] : identifier[State] ,
identifier[energy_change] : identifier[FloatTensor] ,
identifier[seed] = keyword[None] )-> identifier[Tuple] [ identifier[State] , identifier[tf] . identifier[Tensor] , identifier[tf] . identifier[Tensor] ]:
literal[string]
identifier[flat_current] = identifier[tf] . identifier[nest] . identifier[flatten] ( identifier[current_state] )
identifier[flat_proposed] = identifier[nest] . identifier[flatten_up_to] ( identifier[current_state] , identifier[proposed_state] )
identifier[flat_current] =[
identifier[p] keyword[if] identifier[c] keyword[is] keyword[None] keyword[else] identifier[c] keyword[for] identifier[p] , identifier[c] keyword[in] identifier[zip] ( identifier[flat_proposed] , identifier[flat_current] )
]
identifier[current_state] = identifier[tf] . identifier[nest] . identifier[pack_sequence_as] ( identifier[current_state] , identifier[flat_current] )
identifier[current_state] = identifier[tf] . identifier[nest] . identifier[map_structure] ( identifier[tf] . identifier[convert_to_tensor] , identifier[current_state] )
identifier[proposed_state] = identifier[tf] . identifier[nest] . identifier[map_structure] ( identifier[tf] . identifier[convert_to_tensor] , identifier[proposed_state] )
identifier[energy_change] = identifier[tf] . identifier[convert_to_tensor] ( identifier[value] = identifier[energy_change] )
identifier[log_accept_ratio] =- identifier[energy_change]
identifier[log_uniform] = identifier[tf] . identifier[math] . identifier[log] (
identifier[tf] . identifier[random] . identifier[uniform] (
identifier[shape] = identifier[tf] . identifier[shape] ( identifier[input] = identifier[log_accept_ratio] ),
identifier[dtype] = identifier[log_accept_ratio] . identifier[dtype] . identifier[base_dtype] ,
identifier[seed] = identifier[seed] ))
identifier[is_accepted] = identifier[log_uniform] < identifier[log_accept_ratio]
identifier[next_state] = identifier[mcmc_util] . identifier[choose] (
identifier[is_accepted] , identifier[proposed_state] , identifier[current_state] , identifier[name] = literal[string] )
keyword[return] identifier[next_state] , identifier[is_accepted] , identifier[log_uniform] | def metropolis_hastings_step(current_state: State, proposed_state: State, energy_change: FloatTensor, seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]:
"""Metropolis-Hastings step.
This probabilistically chooses between `current_state` and `proposed_state`
based on the `energy_change` so as to preserve detailed balance.
Energy change is the negative of `log_accept_ratio`.
Args:
current_state: Current state.
proposed_state: Proposed state.
energy_change: E(proposed_state) - E(previous_state).
seed: For reproducibility.
Returns:
new_state: The chosen state.
is_accepted: Whether the proposed state was accepted.
log_uniform: The random number that was used to select between the two
states.
"""
flat_current = tf.nest.flatten(current_state)
flat_proposed = nest.flatten_up_to(current_state, proposed_state)
# Impute the None's in the current state.
flat_current = [p if c is None else c for (p, c) in zip(flat_proposed, flat_current)]
current_state = tf.nest.pack_sequence_as(current_state, flat_current)
current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state)
proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state)
energy_change = tf.convert_to_tensor(value=energy_change)
log_accept_ratio = -energy_change
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(input=log_accept_ratio), dtype=log_accept_ratio.dtype.base_dtype, seed=seed))
is_accepted = log_uniform < log_accept_ratio
next_state = mcmc_util.choose(is_accepted, proposed_state, current_state, name='choose_next_state')
return (next_state, is_accepted, log_uniform) |
def next_cron(
previous_dt: datetime,
*,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
):
"""
Find the next datetime matching the given parameters.
"""
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower())
options = dict(
month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond
)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt
dt = next_dt | def function[next_cron, parameter[previous_dt]]:
constant[
Find the next datetime matching the given parameters.
]
variable[dt] assign[=] binary_operation[name[previous_dt] + call[name[timedelta], parameter[]]]
if call[name[isinstance], parameter[name[weekday], name[str]]] begin[:]
variable[weekday] assign[=] call[name[weekdays].index, parameter[call[name[weekday].lower, parameter[]]]]
variable[options] assign[=] call[name[dict], parameter[]]
while constant[True] begin[:]
variable[next_dt] assign[=] call[name[_get_next_dt], parameter[name[dt], name[options]]]
if compare[name[next_dt] is constant[None]] begin[:]
return[name[dt]]
variable[dt] assign[=] name[next_dt] | keyword[def] identifier[next_cron] (
identifier[previous_dt] : identifier[datetime] ,
*,
identifier[month] : identifier[Union] [ keyword[None] , identifier[set] , identifier[int] ]= keyword[None] ,
identifier[day] : identifier[Union] [ keyword[None] , identifier[set] , identifier[int] ]= keyword[None] ,
identifier[weekday] : identifier[Union] [ keyword[None] , identifier[set] , identifier[int] , identifier[str] ]= keyword[None] ,
identifier[hour] : identifier[Union] [ keyword[None] , identifier[set] , identifier[int] ]= keyword[None] ,
identifier[minute] : identifier[Union] [ keyword[None] , identifier[set] , identifier[int] ]= keyword[None] ,
identifier[second] : identifier[Union] [ keyword[None] , identifier[set] , identifier[int] ]= literal[int] ,
identifier[microsecond] : identifier[int] = literal[int] ,
):
literal[string]
identifier[dt] = identifier[previous_dt] + identifier[timedelta] ( identifier[seconds] = literal[int] )
keyword[if] identifier[isinstance] ( identifier[weekday] , identifier[str] ):
identifier[weekday] = identifier[weekdays] . identifier[index] ( identifier[weekday] . identifier[lower] ())
identifier[options] = identifier[dict] (
identifier[month] = identifier[month] , identifier[day] = identifier[day] , identifier[weekday] = identifier[weekday] , identifier[hour] = identifier[hour] , identifier[minute] = identifier[minute] , identifier[second] = identifier[second] , identifier[microsecond] = identifier[microsecond]
)
keyword[while] keyword[True] :
identifier[next_dt] = identifier[_get_next_dt] ( identifier[dt] , identifier[options] )
keyword[if] identifier[next_dt] keyword[is] keyword[None] :
keyword[return] identifier[dt]
identifier[dt] = identifier[next_dt] | def next_cron(previous_dt: datetime, *, month: Union[None, set, int]=None, day: Union[None, set, int]=None, weekday: Union[None, set, int, str]=None, hour: Union[None, set, int]=None, minute: Union[None, set, int]=None, second: Union[None, set, int]=0, microsecond: int=123456):
"""
Find the next datetime matching the given parameters.
"""
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower()) # depends on [control=['if'], data=[]]
options = dict(month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt # depends on [control=['if'], data=[]]
dt = next_dt # depends on [control=['while'], data=[]] |
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True | def function[adduser, parameter[name, username]]:
constant[
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
]
<ast.Try object at 0x7da1b1ca0790>
variable[existing_members] assign[=] <ast.ListComp object at 0x7da1b1ca1480>
variable[username] assign[=] call[name[salt].utils.win_functions.get_sam_name, parameter[name[username]]]
<ast.Try object at 0x7da1b1ca23e0>
return[constant[True]] | keyword[def] identifier[adduser] ( identifier[name] , identifier[username] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[group_obj] = identifier[_get_group_object] ( identifier[name] )
keyword[except] identifier[pywintypes] . identifier[com_error] keyword[as] identifier[exc] :
identifier[msg] = literal[string] . identifier[format] (
identifier[name] , identifier[win32api] . identifier[FormatMessage] ( identifier[exc] . identifier[excepinfo] [ literal[int] ]))
identifier[log] . identifier[error] ( identifier[msg] )
keyword[return] keyword[False]
identifier[existing_members] =[ identifier[_get_username] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[group_obj] . identifier[members] ()]
identifier[username] = identifier[salt] . identifier[utils] . identifier[win_functions] . identifier[get_sam_name] ( identifier[username] )
keyword[try] :
keyword[if] identifier[username] keyword[not] keyword[in] identifier[existing_members] :
identifier[group_obj] . identifier[Add] ( literal[string] + identifier[username] . identifier[replace] ( literal[string] , literal[string] ))
identifier[log] . identifier[info] ( literal[string] , identifier[username] )
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] , identifier[username] , identifier[name] )
keyword[return] keyword[False]
keyword[except] identifier[pywintypes] . identifier[com_error] keyword[as] identifier[exc] :
identifier[msg] = literal[string] . identifier[format] (
identifier[username] , identifier[name] , identifier[win32api] . identifier[FormatMessage] ( identifier[exc] . identifier[excepinfo] [ literal[int] ]))
identifier[log] . identifier[error] ( identifier[msg] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def adduser(name, username, **kwargs):
"""
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
"""
try:
group_obj = _get_group_object(name) # depends on [control=['try'], data=[]]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False # depends on [control=['except'], data=['exc']]
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username) # depends on [control=['if'], data=['username']]
else:
log.warning('User %s is already a member of %s', username, name)
return False # depends on [control=['try'], data=[]]
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False # depends on [control=['except'], data=['exc']]
return True |
def validate_split_runs_file(split_runs_file):
"""Check if structure of file is as expected and return dictionary linking names to run_IDs."""
try:
content = [l.strip() for l in split_runs_file.readlines()]
if content[0].upper().split('\t') == ['NAME', 'RUN_ID']:
return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c}
else:
sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
except IndexError:
sys.exit("ERROR: Format of --split_runs tab separated file not as expected")
logging.error("ERROR: Format of --split_runs tab separated file not as expected") | def function[validate_split_runs_file, parameter[split_runs_file]]:
constant[Check if structure of file is as expected and return dictionary linking names to run_IDs.]
<ast.Try object at 0x7da18bc71b70> | keyword[def] identifier[validate_split_runs_file] ( identifier[split_runs_file] ):
literal[string]
keyword[try] :
identifier[content] =[ identifier[l] . identifier[strip] () keyword[for] identifier[l] keyword[in] identifier[split_runs_file] . identifier[readlines] ()]
keyword[if] identifier[content] [ literal[int] ]. identifier[upper] (). identifier[split] ( literal[string] )==[ literal[string] , literal[string] ]:
keyword[return] { identifier[c] . identifier[split] ( literal[string] )[ literal[int] ]: identifier[c] . identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[c] keyword[in] identifier[content] [ literal[int] :] keyword[if] identifier[c] }
keyword[else] :
identifier[sys] . identifier[exit] ( literal[string] )
identifier[logging] . identifier[error] ( literal[string] )
keyword[except] identifier[IndexError] :
identifier[sys] . identifier[exit] ( literal[string] )
identifier[logging] . identifier[error] ( literal[string] ) | def validate_split_runs_file(split_runs_file):
"""Check if structure of file is as expected and return dictionary linking names to run_IDs."""
try:
content = [l.strip() for l in split_runs_file.readlines()]
if content[0].upper().split('\t') == ['NAME', 'RUN_ID']:
return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c} # depends on [control=['if'], data=[]]
else:
sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") # depends on [control=['try'], data=[]]
except IndexError:
sys.exit('ERROR: Format of --split_runs tab separated file not as expected')
logging.error('ERROR: Format of --split_runs tab separated file not as expected') # depends on [control=['except'], data=[]] |
def create_from_ll(cls, lls:LabelLists, bs:int=64, val_bs:int=None, ds_tfms:Optional[TfmList]=None,
num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None,
test:Optional[PathOrStr]=None, collate_fn:Callable=data_collate, size:int=None, no_check:bool=False,
resize_method:ResizeMethod=None, mult:int=None, padding_mode:str='reflection',
mode:str='bilinear', tfm_y:bool=False)->'ImageDataBunch':
"Create an `ImageDataBunch` from `LabelLists` `lls` with potential `ds_tfms`."
lls = lls.transform(tfms=ds_tfms, size=size, resize_method=resize_method, mult=mult, padding_mode=padding_mode,
mode=mode, tfm_y=tfm_y)
if test is not None: lls.add_test_folder(test)
return lls.databunch(bs=bs, val_bs=val_bs, dl_tfms=dl_tfms, num_workers=num_workers, collate_fn=collate_fn,
device=device, no_check=no_check) | def function[create_from_ll, parameter[cls, lls, bs, val_bs, ds_tfms, num_workers, dl_tfms, device, test, collate_fn, size, no_check, resize_method, mult, padding_mode, mode, tfm_y]]:
constant[Create an `ImageDataBunch` from `LabelLists` `lls` with potential `ds_tfms`.]
variable[lls] assign[=] call[name[lls].transform, parameter[]]
if compare[name[test] is_not constant[None]] begin[:]
call[name[lls].add_test_folder, parameter[name[test]]]
return[call[name[lls].databunch, parameter[]]] | keyword[def] identifier[create_from_ll] ( identifier[cls] , identifier[lls] : identifier[LabelLists] , identifier[bs] : identifier[int] = literal[int] , identifier[val_bs] : identifier[int] = keyword[None] , identifier[ds_tfms] : identifier[Optional] [ identifier[TfmList] ]= keyword[None] ,
identifier[num_workers] : identifier[int] = identifier[defaults] . identifier[cpus] , identifier[dl_tfms] : identifier[Optional] [ identifier[Collection] [ identifier[Callable] ]]= keyword[None] , identifier[device] : identifier[torch] . identifier[device] = keyword[None] ,
identifier[test] : identifier[Optional] [ identifier[PathOrStr] ]= keyword[None] , identifier[collate_fn] : identifier[Callable] = identifier[data_collate] , identifier[size] : identifier[int] = keyword[None] , identifier[no_check] : identifier[bool] = keyword[False] ,
identifier[resize_method] : identifier[ResizeMethod] = keyword[None] , identifier[mult] : identifier[int] = keyword[None] , identifier[padding_mode] : identifier[str] = literal[string] ,
identifier[mode] : identifier[str] = literal[string] , identifier[tfm_y] : identifier[bool] = keyword[False] )-> literal[string] :
literal[string]
identifier[lls] = identifier[lls] . identifier[transform] ( identifier[tfms] = identifier[ds_tfms] , identifier[size] = identifier[size] , identifier[resize_method] = identifier[resize_method] , identifier[mult] = identifier[mult] , identifier[padding_mode] = identifier[padding_mode] ,
identifier[mode] = identifier[mode] , identifier[tfm_y] = identifier[tfm_y] )
keyword[if] identifier[test] keyword[is] keyword[not] keyword[None] : identifier[lls] . identifier[add_test_folder] ( identifier[test] )
keyword[return] identifier[lls] . identifier[databunch] ( identifier[bs] = identifier[bs] , identifier[val_bs] = identifier[val_bs] , identifier[dl_tfms] = identifier[dl_tfms] , identifier[num_workers] = identifier[num_workers] , identifier[collate_fn] = identifier[collate_fn] ,
identifier[device] = identifier[device] , identifier[no_check] = identifier[no_check] ) | def create_from_ll(cls, lls: LabelLists, bs: int=64, val_bs: int=None, ds_tfms: Optional[TfmList]=None, num_workers: int=defaults.cpus, dl_tfms: Optional[Collection[Callable]]=None, device: torch.device=None, test: Optional[PathOrStr]=None, collate_fn: Callable=data_collate, size: int=None, no_check: bool=False, resize_method: ResizeMethod=None, mult: int=None, padding_mode: str='reflection', mode: str='bilinear', tfm_y: bool=False) -> 'ImageDataBunch':
"""Create an `ImageDataBunch` from `LabelLists` `lls` with potential `ds_tfms`."""
lls = lls.transform(tfms=ds_tfms, size=size, resize_method=resize_method, mult=mult, padding_mode=padding_mode, mode=mode, tfm_y=tfm_y)
if test is not None:
lls.add_test_folder(test) # depends on [control=['if'], data=['test']]
return lls.databunch(bs=bs, val_bs=val_bs, dl_tfms=dl_tfms, num_workers=num_workers, collate_fn=collate_fn, device=device, no_check=no_check) |
def get_tabs(self, request, **kwargs):
"""Returns the initialized tab group for this view."""
if self._tab_group is None:
self._tab_group = self.tab_group_class(request, **kwargs)
return self._tab_group | def function[get_tabs, parameter[self, request]]:
constant[Returns the initialized tab group for this view.]
if compare[name[self]._tab_group is constant[None]] begin[:]
name[self]._tab_group assign[=] call[name[self].tab_group_class, parameter[name[request]]]
return[name[self]._tab_group] | keyword[def] identifier[get_tabs] ( identifier[self] , identifier[request] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_tab_group] keyword[is] keyword[None] :
identifier[self] . identifier[_tab_group] = identifier[self] . identifier[tab_group_class] ( identifier[request] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[_tab_group] | def get_tabs(self, request, **kwargs):
"""Returns the initialized tab group for this view."""
if self._tab_group is None:
self._tab_group = self.tab_group_class(request, **kwargs) # depends on [control=['if'], data=[]]
return self._tab_group |
def auth_expired(self):
"""
Compare the expiration value of our current token including a CLOCK_SKEW.
:return: true if the token has expired
"""
if self._auth and self._expires:
now_with_skew = time.time() + AUTH_TOKEN_CLOCK_SKEW_MAX
return now_with_skew > self._expires
return True | def function[auth_expired, parameter[self]]:
constant[
Compare the expiration value of our current token including a CLOCK_SKEW.
:return: true if the token has expired
]
if <ast.BoolOp object at 0x7da18fe90c70> begin[:]
variable[now_with_skew] assign[=] binary_operation[call[name[time].time, parameter[]] + name[AUTH_TOKEN_CLOCK_SKEW_MAX]]
return[compare[name[now_with_skew] greater[>] name[self]._expires]]
return[constant[True]] | keyword[def] identifier[auth_expired] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_auth] keyword[and] identifier[self] . identifier[_expires] :
identifier[now_with_skew] = identifier[time] . identifier[time] ()+ identifier[AUTH_TOKEN_CLOCK_SKEW_MAX]
keyword[return] identifier[now_with_skew] > identifier[self] . identifier[_expires]
keyword[return] keyword[True] | def auth_expired(self):
"""
Compare the expiration value of our current token including a CLOCK_SKEW.
:return: true if the token has expired
"""
if self._auth and self._expires:
now_with_skew = time.time() + AUTH_TOKEN_CLOCK_SKEW_MAX
return now_with_skew > self._expires # depends on [control=['if'], data=[]]
return True |
def create_model(self,
base_model_id,
forced_glossary=None,
parallel_corpus=None,
name=None,
**kwargs):
"""
Create model.
Uploads Translation Memory eXchange (TMX) files to customize a translation model.
You can either customize a model with a forced glossary or with a corpus that
contains parallel sentences. To create a model that is customized with a parallel
corpus <b>and</b> a forced glossary, proceed in two steps: customize with a
parallel corpus first and then customize the resulting model with a glossary.
Depending on the type of customization and the size of the uploaded corpora,
training can range from minutes for a glossary to several hours for a large
parallel corpus. You can upload a single forced glossary file and this file must
be less than <b>10 MB</b>. You can upload multiple parallel corpora tmx files. The
cumulative file size of all uploaded files is limited to <b>250 MB</b>. To
successfully train with a parallel corpus you must have at least <b>5,000 parallel
sentences</b> in your corpus.
You can have a <b>maxium of 10 custom models per language pair</b>.
:param str base_model_id: The model ID of the model to use as the base for
customization. To see available models, use the `List models` method. Usually all
IBM provided models are customizable. In addition, all your models that have been
created via parallel corpus customization, can be further customized with a forced
glossary.
:param file forced_glossary: A TMX file with your customizations. The
customizations in the file completely overwrite the domain translaton data,
including high frequency or high confidence phrase translations. You can upload
only one glossary with a file size less than 10 MB per call. A forced glossary
should contain single words or short phrases.
:param file parallel_corpus: A TMX file with parallel sentences for source and
target language. You can upload multiple parallel_corpus files in one request. All
uploaded parallel_corpus files combined, your parallel corpus must contain at
least 5,000 parallel sentences to train successfully.
:param str name: An optional model name that you can use to identify the model.
Valid characters are letters, numbers, dashes, underscores, spaces and
apostrophes. The maximum length is 32 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if base_model_id is None:
raise ValueError('base_model_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3',
'create_model')
headers.update(sdk_headers)
params = {
'version': self.version,
'base_model_id': base_model_id,
'name': name
}
form_data = {}
if forced_glossary:
form_data['forced_glossary'] = (None, forced_glossary,
'application/octet-stream')
if parallel_corpus:
form_data['parallel_corpus'] = (None, parallel_corpus,
'application/octet-stream')
url = '/v3/models'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response | def function[create_model, parameter[self, base_model_id, forced_glossary, parallel_corpus, name]]:
constant[
Create model.
Uploads Translation Memory eXchange (TMX) files to customize a translation model.
You can either customize a model with a forced glossary or with a corpus that
contains parallel sentences. To create a model that is customized with a parallel
corpus <b>and</b> a forced glossary, proceed in two steps: customize with a
parallel corpus first and then customize the resulting model with a glossary.
Depending on the type of customization and the size of the uploaded corpora,
training can range from minutes for a glossary to several hours for a large
parallel corpus. You can upload a single forced glossary file and this file must
be less than <b>10 MB</b>. You can upload multiple parallel corpora tmx files. The
cumulative file size of all uploaded files is limited to <b>250 MB</b>. To
successfully train with a parallel corpus you must have at least <b>5,000 parallel
sentences</b> in your corpus.
You can have a <b>maxium of 10 custom models per language pair</b>.
:param str base_model_id: The model ID of the model to use as the base for
customization. To see available models, use the `List models` method. Usually all
IBM provided models are customizable. In addition, all your models that have been
created via parallel corpus customization, can be further customized with a forced
glossary.
:param file forced_glossary: A TMX file with your customizations. The
customizations in the file completely overwrite the domain translaton data,
including high frequency or high confidence phrase translations. You can upload
only one glossary with a file size less than 10 MB per call. A forced glossary
should contain single words or short phrases.
:param file parallel_corpus: A TMX file with parallel sentences for source and
target language. You can upload multiple parallel_corpus files in one request. All
uploaded parallel_corpus files combined, your parallel corpus must contain at
least 5,000 parallel sentences to train successfully.
:param str name: An optional model name that you can use to identify the model.
Valid characters are letters, numbers, dashes, underscores, spaces and
apostrophes. The maximum length is 32 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
]
if compare[name[base_model_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da18bccbf10>
variable[headers] assign[=] dictionary[[], []]
if compare[constant[headers] in name[kwargs]] begin[:]
call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]]
variable[sdk_headers] assign[=] call[name[get_sdk_headers], parameter[constant[language_translator], constant[V3], constant[create_model]]]
call[name[headers].update, parameter[name[sdk_headers]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2044c3850>, <ast.Constant object at 0x7da2044c11b0>, <ast.Constant object at 0x7da2044c1db0>], [<ast.Attribute object at 0x7da2044c2920>, <ast.Name object at 0x7da2044c3d30>, <ast.Name object at 0x7da2044c2440>]]
variable[form_data] assign[=] dictionary[[], []]
if name[forced_glossary] begin[:]
call[name[form_data]][constant[forced_glossary]] assign[=] tuple[[<ast.Constant object at 0x7da2044c1f60>, <ast.Name object at 0x7da2044c0eb0>, <ast.Constant object at 0x7da2044c38e0>]]
if name[parallel_corpus] begin[:]
call[name[form_data]][constant[parallel_corpus]] assign[=] tuple[[<ast.Constant object at 0x7da2044c3b50>, <ast.Name object at 0x7da2044c0040>, <ast.Constant object at 0x7da2044c2b30>]]
variable[url] assign[=] constant[/v3/models]
variable[response] assign[=] call[name[self].request, parameter[]]
return[name[response]] | keyword[def] identifier[create_model] ( identifier[self] ,
identifier[base_model_id] ,
identifier[forced_glossary] = keyword[None] ,
identifier[parallel_corpus] = keyword[None] ,
identifier[name] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[base_model_id] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[headers] ={}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[sdk_headers] = identifier[get_sdk_headers] ( literal[string] , literal[string] ,
literal[string] )
identifier[headers] . identifier[update] ( identifier[sdk_headers] )
identifier[params] ={
literal[string] : identifier[self] . identifier[version] ,
literal[string] : identifier[base_model_id] ,
literal[string] : identifier[name]
}
identifier[form_data] ={}
keyword[if] identifier[forced_glossary] :
identifier[form_data] [ literal[string] ]=( keyword[None] , identifier[forced_glossary] ,
literal[string] )
keyword[if] identifier[parallel_corpus] :
identifier[form_data] [ literal[string] ]=( keyword[None] , identifier[parallel_corpus] ,
literal[string] )
identifier[url] = literal[string]
identifier[response] = identifier[self] . identifier[request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[headers] = identifier[headers] ,
identifier[params] = identifier[params] ,
identifier[files] = identifier[form_data] ,
identifier[accept_json] = keyword[True] )
keyword[return] identifier[response] | def create_model(self, base_model_id, forced_glossary=None, parallel_corpus=None, name=None, **kwargs):
"""
Create model.
Uploads Translation Memory eXchange (TMX) files to customize a translation model.
You can either customize a model with a forced glossary or with a corpus that
contains parallel sentences. To create a model that is customized with a parallel
corpus <b>and</b> a forced glossary, proceed in two steps: customize with a
parallel corpus first and then customize the resulting model with a glossary.
Depending on the type of customization and the size of the uploaded corpora,
training can range from minutes for a glossary to several hours for a large
parallel corpus. You can upload a single forced glossary file and this file must
be less than <b>10 MB</b>. You can upload multiple parallel corpora tmx files. The
cumulative file size of all uploaded files is limited to <b>250 MB</b>. To
successfully train with a parallel corpus you must have at least <b>5,000 parallel
sentences</b> in your corpus.
You can have a <b>maxium of 10 custom models per language pair</b>.
:param str base_model_id: The model ID of the model to use as the base for
customization. To see available models, use the `List models` method. Usually all
IBM provided models are customizable. In addition, all your models that have been
created via parallel corpus customization, can be further customized with a forced
glossary.
:param file forced_glossary: A TMX file with your customizations. The
customizations in the file completely overwrite the domain translaton data,
including high frequency or high confidence phrase translations. You can upload
only one glossary with a file size less than 10 MB per call. A forced glossary
should contain single words or short phrases.
:param file parallel_corpus: A TMX file with parallel sentences for source and
target language. You can upload multiple parallel_corpus files in one request. All
uploaded parallel_corpus files combined, your parallel corpus must contain at
least 5,000 parallel sentences to train successfully.
:param str name: An optional model name that you can use to identify the model.
Valid characters are letters, numbers, dashes, underscores, spaces and
apostrophes. The maximum length is 32 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if base_model_id is None:
raise ValueError('base_model_id must be provided') # depends on [control=['if'], data=[]]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']]
sdk_headers = get_sdk_headers('language_translator', 'V3', 'create_model')
headers.update(sdk_headers)
params = {'version': self.version, 'base_model_id': base_model_id, 'name': name}
form_data = {}
if forced_glossary:
form_data['forced_glossary'] = (None, forced_glossary, 'application/octet-stream') # depends on [control=['if'], data=[]]
if parallel_corpus:
form_data['parallel_corpus'] = (None, parallel_corpus, 'application/octet-stream') # depends on [control=['if'], data=[]]
url = '/v3/models'
response = self.request(method='POST', url=url, headers=headers, params=params, files=form_data, accept_json=True)
return response |
def touch(self, mode=0o666, exist_ok=True):
"""
Create a file if it doesn't exist.
Mode is ignored by Artifactory.
"""
if self.exists() and not exist_ok:
raise OSError(17, "File exists", str(self))
self._accessor.touch(self) | def function[touch, parameter[self, mode, exist_ok]]:
constant[
Create a file if it doesn't exist.
Mode is ignored by Artifactory.
]
if <ast.BoolOp object at 0x7da1b0926140> begin[:]
<ast.Raise object at 0x7da1b0926dd0>
call[name[self]._accessor.touch, parameter[name[self]]] | keyword[def] identifier[touch] ( identifier[self] , identifier[mode] = literal[int] , identifier[exist_ok] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[exists] () keyword[and] keyword[not] identifier[exist_ok] :
keyword[raise] identifier[OSError] ( literal[int] , literal[string] , identifier[str] ( identifier[self] ))
identifier[self] . identifier[_accessor] . identifier[touch] ( identifier[self] ) | def touch(self, mode=438, exist_ok=True):
"""
Create a file if it doesn't exist.
Mode is ignored by Artifactory.
"""
if self.exists() and (not exist_ok):
raise OSError(17, 'File exists', str(self)) # depends on [control=['if'], data=[]]
self._accessor.touch(self) |
def _getTarball(url, into_directory, cache_key, origin_info=None):
'''unpack the specified tarball url into the specified directory'''
try:
access_common.unpackFromCache(cache_key, into_directory)
except KeyError as e:
tok = settings.getProperty('github', 'authtoken')
headers = {}
if tok is not None:
headers['Authorization'] = 'token ' + str(tok)
logger.debug('GET %s', url)
response = requests.get(url, allow_redirects=True, stream=True, headers=headers)
response.raise_for_status()
logger.debug('getting file: %s', url)
logger.debug('headers: %s', response.headers)
response.raise_for_status()
# github doesn't exposes hashes of the archives being downloaded as far
# as I can tell :(
access_common.unpackTarballStream(
stream = response,
into_directory = into_directory,
hash = {},
cache_key = cache_key,
origin_info = origin_info
) | def function[_getTarball, parameter[url, into_directory, cache_key, origin_info]]:
constant[unpack the specified tarball url into the specified directory]
<ast.Try object at 0x7da1b0089a50> | keyword[def] identifier[_getTarball] ( identifier[url] , identifier[into_directory] , identifier[cache_key] , identifier[origin_info] = keyword[None] ):
literal[string]
keyword[try] :
identifier[access_common] . identifier[unpackFromCache] ( identifier[cache_key] , identifier[into_directory] )
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
identifier[tok] = identifier[settings] . identifier[getProperty] ( literal[string] , literal[string] )
identifier[headers] ={}
keyword[if] identifier[tok] keyword[is] keyword[not] keyword[None] :
identifier[headers] [ literal[string] ]= literal[string] + identifier[str] ( identifier[tok] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[url] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[allow_redirects] = keyword[True] , identifier[stream] = keyword[True] , identifier[headers] = identifier[headers] )
identifier[response] . identifier[raise_for_status] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[url] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[response] . identifier[headers] )
identifier[response] . identifier[raise_for_status] ()
identifier[access_common] . identifier[unpackTarballStream] (
identifier[stream] = identifier[response] ,
identifier[into_directory] = identifier[into_directory] ,
identifier[hash] ={},
identifier[cache_key] = identifier[cache_key] ,
identifier[origin_info] = identifier[origin_info]
) | def _getTarball(url, into_directory, cache_key, origin_info=None):
"""unpack the specified tarball url into the specified directory"""
try:
access_common.unpackFromCache(cache_key, into_directory) # depends on [control=['try'], data=[]]
except KeyError as e:
tok = settings.getProperty('github', 'authtoken')
headers = {}
if tok is not None:
headers['Authorization'] = 'token ' + str(tok) # depends on [control=['if'], data=['tok']]
logger.debug('GET %s', url)
response = requests.get(url, allow_redirects=True, stream=True, headers=headers)
response.raise_for_status()
logger.debug('getting file: %s', url)
logger.debug('headers: %s', response.headers)
response.raise_for_status()
# github doesn't exposes hashes of the archives being downloaded as far
# as I can tell :(
access_common.unpackTarballStream(stream=response, into_directory=into_directory, hash={}, cache_key=cache_key, origin_info=origin_info) # depends on [control=['except'], data=[]] |
def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return iter(children) | def function[get_children, parameter[self]]:
constant[Return an iterator for accessing the children of this cursor.]
def function[visitor, parameter[child, parent, children]]:
assert[compare[name[child] not_equal[!=] call[name[conf].lib.clang_getNullCursor, parameter[]]]]
name[child]._tu assign[=] name[self]._tu
call[name[children].append, parameter[name[child]]]
return[constant[1]]
variable[children] assign[=] list[[]]
call[name[conf].lib.clang_visitChildren, parameter[name[self], call[call[name[callbacks]][constant[cursor_visit]], parameter[name[visitor]]], name[children]]]
return[call[name[iter], parameter[name[children]]]] | keyword[def] identifier[get_children] ( identifier[self] ):
literal[string]
keyword[def] identifier[visitor] ( identifier[child] , identifier[parent] , identifier[children] ):
keyword[assert] identifier[child] != identifier[conf] . identifier[lib] . identifier[clang_getNullCursor] ()
identifier[child] . identifier[_tu] = identifier[self] . identifier[_tu]
identifier[children] . identifier[append] ( identifier[child] )
keyword[return] literal[int]
identifier[children] =[]
identifier[conf] . identifier[lib] . identifier[clang_visitChildren] ( identifier[self] , identifier[callbacks] [ literal[string] ]( identifier[visitor] ),
identifier[children] )
keyword[return] identifier[iter] ( identifier[children] ) | def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor), children)
return iter(children) |
def rmlinenumber(linenumber, infile, dryrun=False):
"""
Sed-like line deletion function based on given line number..
Usage: pysed.rmlinenumber(<Unwanted Line Number>, <Text File>)
Example: pysed.rmlinenumber(10, '/path/to/file.txt')
Example 'DRYRUN': pysed.rmlinenumber(10, '/path/to/file.txt', dryrun=True)
#This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
linecounter = 0
if isinstance(linenumber, int):
exit("""'linenumber' argument must be an integer.""")
with open(infile) as reader:
for item in reader:
linecounter = linecounter + 1
if linecounter != linenumber:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit("""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>.""") | def function[rmlinenumber, parameter[linenumber, infile, dryrun]]:
constant[
Sed-like line deletion function based on given line number..
Usage: pysed.rmlinenumber(<Unwanted Line Number>, <Text File>)
Example: pysed.rmlinenumber(10, '/path/to/file.txt')
Example 'DRYRUN': pysed.rmlinenumber(10, '/path/to/file.txt', dryrun=True)
#This will dump the output to STDOUT instead of changing the input file.
]
variable[linelist] assign[=] list[[]]
variable[linecounter] assign[=] constant[0]
if call[name[isinstance], parameter[name[linenumber], name[int]]] begin[:]
call[name[exit], parameter[constant['linenumber' argument must be an integer.]]]
with call[name[open], parameter[name[infile]]] begin[:]
for taget[name[item]] in starred[name[reader]] begin[:]
variable[linecounter] assign[=] binary_operation[name[linecounter] + constant[1]]
if compare[name[linecounter] not_equal[!=] name[linenumber]] begin[:]
call[name[linelist].append, parameter[name[item]]]
if compare[name[dryrun] is constant[False]] begin[:]
with call[name[open], parameter[name[infile], constant[w]]] begin[:]
call[name[writer].truncate, parameter[]]
for taget[name[line]] in starred[name[linelist]] begin[:]
call[name[writer].writelines, parameter[name[line]]] | keyword[def] identifier[rmlinenumber] ( identifier[linenumber] , identifier[infile] , identifier[dryrun] = keyword[False] ):
literal[string]
identifier[linelist] =[]
identifier[linecounter] = literal[int]
keyword[if] identifier[isinstance] ( identifier[linenumber] , identifier[int] ):
identifier[exit] ( literal[string] )
keyword[with] identifier[open] ( identifier[infile] ) keyword[as] identifier[reader] :
keyword[for] identifier[item] keyword[in] identifier[reader] :
identifier[linecounter] = identifier[linecounter] + literal[int]
keyword[if] identifier[linecounter] != identifier[linenumber] :
identifier[linelist] . identifier[append] ( identifier[item] )
keyword[if] identifier[dryrun] keyword[is] keyword[False] :
keyword[with] identifier[open] ( identifier[infile] , literal[string] ) keyword[as] identifier[writer] :
identifier[writer] . identifier[truncate] ()
keyword[for] identifier[line] keyword[in] identifier[linelist] :
identifier[writer] . identifier[writelines] ( identifier[line] )
keyword[elif] identifier[dryrun] keyword[is] keyword[True] :
keyword[for] identifier[line] keyword[in] identifier[linelist] :
identifier[print] ( identifier[line] , identifier[end] = literal[string] )
keyword[else] :
identifier[exit] ( literal[string] ) | def rmlinenumber(linenumber, infile, dryrun=False):
"""
Sed-like line deletion function based on given line number..
Usage: pysed.rmlinenumber(<Unwanted Line Number>, <Text File>)
Example: pysed.rmlinenumber(10, '/path/to/file.txt')
Example 'DRYRUN': pysed.rmlinenumber(10, '/path/to/file.txt', dryrun=True)
#This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
linecounter = 0
if isinstance(linenumber, int):
exit("'linenumber' argument must be an integer.") # depends on [control=['if'], data=[]]
with open(infile) as reader:
for item in reader:
linecounter = linecounter + 1
if linecounter != linenumber:
linelist.append(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['with'], data=['reader']]
if dryrun is False:
with open(infile, 'w') as writer:
writer.truncate()
for line in linelist:
writer.writelines(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['writer']] # depends on [control=['if'], data=[]]
elif dryrun is True:
for line in linelist:
print(line, end='') # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
else:
exit("Unknown option specified to 'dryrun' argument,\n Usage: dryrun=<True|False>.") |
def get_label_mapping(train_labels):
"""
Create the mapping from label to numeric label
"""
sorted_labels = np.sort(np.unique(train_labels))
label_mapping = {}
for i, label in enumerate(sorted_labels):
label_mapping[label] = i
logging.info('Label mapping:%s', format(label_mapping))
return label_mapping | def function[get_label_mapping, parameter[train_labels]]:
constant[
Create the mapping from label to numeric label
]
variable[sorted_labels] assign[=] call[name[np].sort, parameter[call[name[np].unique, parameter[name[train_labels]]]]]
variable[label_mapping] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b26adff0>, <ast.Name object at 0x7da1b26aeec0>]]] in starred[call[name[enumerate], parameter[name[sorted_labels]]]] begin[:]
call[name[label_mapping]][name[label]] assign[=] name[i]
call[name[logging].info, parameter[constant[Label mapping:%s], call[name[format], parameter[name[label_mapping]]]]]
return[name[label_mapping]] | keyword[def] identifier[get_label_mapping] ( identifier[train_labels] ):
literal[string]
identifier[sorted_labels] = identifier[np] . identifier[sort] ( identifier[np] . identifier[unique] ( identifier[train_labels] ))
identifier[label_mapping] ={}
keyword[for] identifier[i] , identifier[label] keyword[in] identifier[enumerate] ( identifier[sorted_labels] ):
identifier[label_mapping] [ identifier[label] ]= identifier[i]
identifier[logging] . identifier[info] ( literal[string] , identifier[format] ( identifier[label_mapping] ))
keyword[return] identifier[label_mapping] | def get_label_mapping(train_labels):
"""
Create the mapping from label to numeric label
"""
sorted_labels = np.sort(np.unique(train_labels))
label_mapping = {}
for (i, label) in enumerate(sorted_labels):
label_mapping[label] = i # depends on [control=['for'], data=[]]
logging.info('Label mapping:%s', format(label_mapping))
return label_mapping |
def call(self, args, devnull=False):
"""Call other processes.
args - list of command args
devnull - whether to pipe stdout to /dev/null (or equivalent)
"""
if self.debug:
click.echo(subprocess.list2cmdline(args))
click.confirm('Continue?', default=True, abort=True)
try:
kwargs = {}
if devnull:
# Pipe to /dev/null (or equivalent).
kwargs['stderr'] = subprocess.STDOUT
kwargs['stdout'] = self.FNULL
ret_code = subprocess.call(args, **kwargs)
except subprocess.CalledProcessError:
return False
return ret_code | def function[call, parameter[self, args, devnull]]:
constant[Call other processes.
args - list of command args
devnull - whether to pipe stdout to /dev/null (or equivalent)
]
if name[self].debug begin[:]
call[name[click].echo, parameter[call[name[subprocess].list2cmdline, parameter[name[args]]]]]
call[name[click].confirm, parameter[constant[Continue?]]]
<ast.Try object at 0x7da20c7cb670>
return[name[ret_code]] | keyword[def] identifier[call] ( identifier[self] , identifier[args] , identifier[devnull] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[debug] :
identifier[click] . identifier[echo] ( identifier[subprocess] . identifier[list2cmdline] ( identifier[args] ))
identifier[click] . identifier[confirm] ( literal[string] , identifier[default] = keyword[True] , identifier[abort] = keyword[True] )
keyword[try] :
identifier[kwargs] ={}
keyword[if] identifier[devnull] :
identifier[kwargs] [ literal[string] ]= identifier[subprocess] . identifier[STDOUT]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[FNULL]
identifier[ret_code] = identifier[subprocess] . identifier[call] ( identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] :
keyword[return] keyword[False]
keyword[return] identifier[ret_code] | def call(self, args, devnull=False):
"""Call other processes.
args - list of command args
devnull - whether to pipe stdout to /dev/null (or equivalent)
"""
if self.debug:
click.echo(subprocess.list2cmdline(args))
click.confirm('Continue?', default=True, abort=True) # depends on [control=['if'], data=[]]
try:
kwargs = {}
if devnull:
# Pipe to /dev/null (or equivalent).
kwargs['stderr'] = subprocess.STDOUT
kwargs['stdout'] = self.FNULL # depends on [control=['if'], data=[]]
ret_code = subprocess.call(args, **kwargs) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError:
return False # depends on [control=['except'], data=[]]
return ret_code |
def compute_number_edges(function):
"""
Compute the number of edges of the CFG
Args:
function (core.declarations.function.Function)
Returns:
int
"""
n = 0
for node in function.nodes:
n += len(node.sons)
return n | def function[compute_number_edges, parameter[function]]:
constant[
Compute the number of edges of the CFG
Args:
function (core.declarations.function.Function)
Returns:
int
]
variable[n] assign[=] constant[0]
for taget[name[node]] in starred[name[function].nodes] begin[:]
<ast.AugAssign object at 0x7da20c7c8970>
return[name[n]] | keyword[def] identifier[compute_number_edges] ( identifier[function] ):
literal[string]
identifier[n] = literal[int]
keyword[for] identifier[node] keyword[in] identifier[function] . identifier[nodes] :
identifier[n] += identifier[len] ( identifier[node] . identifier[sons] )
keyword[return] identifier[n] | def compute_number_edges(function):
"""
Compute the number of edges of the CFG
Args:
function (core.declarations.function.Function)
Returns:
int
"""
n = 0
for node in function.nodes:
n += len(node.sons) # depends on [control=['for'], data=['node']]
return n |
def expand_row(table_fields,fields,values):
"helper for insert. turn (field_names, values) into the full-width, properly-ordered row"
table_fieldnames=[f.name for f in table_fields]
reverse_indexes={table_fieldnames.index(f):i for i,f in enumerate(fields)}
indexes=[reverse_indexes.get(i) for i in range(len(table_fields))]
return [(Missing if i is None else values[i]) for i in indexes] | def function[expand_row, parameter[table_fields, fields, values]]:
constant[helper for insert. turn (field_names, values) into the full-width, properly-ordered row]
variable[table_fieldnames] assign[=] <ast.ListComp object at 0x7da20c76f5b0>
variable[reverse_indexes] assign[=] <ast.DictComp object at 0x7da20c76ce50>
variable[indexes] assign[=] <ast.ListComp object at 0x7da20c76c8e0>
return[<ast.ListComp object at 0x7da20c76d300>] | keyword[def] identifier[expand_row] ( identifier[table_fields] , identifier[fields] , identifier[values] ):
literal[string]
identifier[table_fieldnames] =[ identifier[f] . identifier[name] keyword[for] identifier[f] keyword[in] identifier[table_fields] ]
identifier[reverse_indexes] ={ identifier[table_fieldnames] . identifier[index] ( identifier[f] ): identifier[i] keyword[for] identifier[i] , identifier[f] keyword[in] identifier[enumerate] ( identifier[fields] )}
identifier[indexes] =[ identifier[reverse_indexes] . identifier[get] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[table_fields] ))]
keyword[return] [( identifier[Missing] keyword[if] identifier[i] keyword[is] keyword[None] keyword[else] identifier[values] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[indexes] ] | def expand_row(table_fields, fields, values):
"""helper for insert. turn (field_names, values) into the full-width, properly-ordered row"""
table_fieldnames = [f.name for f in table_fields]
reverse_indexes = {table_fieldnames.index(f): i for (i, f) in enumerate(fields)}
indexes = [reverse_indexes.get(i) for i in range(len(table_fields))]
return [Missing if i is None else values[i] for i in indexes] |
def export_keys(output_path, stash, passphrase, backend):
"""Export all keys to a file
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Exporting stash to {0}...'.format(output_path))
stash.export(output_path=output_path)
click.echo('Export complete!')
except GhostError as ex:
sys.exit(ex) | def function[export_keys, parameter[output_path, stash, passphrase, backend]]:
constant[Export all keys to a file
]
variable[stash] assign[=] call[name[_get_stash], parameter[name[backend], name[stash], name[passphrase]]]
<ast.Try object at 0x7da1b00c9240> | keyword[def] identifier[export_keys] ( identifier[output_path] , identifier[stash] , identifier[passphrase] , identifier[backend] ):
literal[string]
identifier[stash] = identifier[_get_stash] ( identifier[backend] , identifier[stash] , identifier[passphrase] )
keyword[try] :
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[output_path] ))
identifier[stash] . identifier[export] ( identifier[output_path] = identifier[output_path] )
identifier[click] . identifier[echo] ( literal[string] )
keyword[except] identifier[GhostError] keyword[as] identifier[ex] :
identifier[sys] . identifier[exit] ( identifier[ex] ) | def export_keys(output_path, stash, passphrase, backend):
"""Export all keys to a file
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Exporting stash to {0}...'.format(output_path))
stash.export(output_path=output_path)
click.echo('Export complete!') # depends on [control=['try'], data=[]]
except GhostError as ex:
sys.exit(ex) # depends on [control=['except'], data=['ex']] |
def place(vertices_resources, nets, machine, constraints,
random=default_random):
"""A random placer.
This algorithm performs uniform-random placement of vertices (completely
ignoring connectivty) and thus in the general case is likely to produce
very poor quality placements. It exists primarily as a baseline comparison
for placement quality and is probably of little value to most users.
Parameters
----------
random : :py:class:`random.Random`
Defaults to ``import random`` but can be set to your own instance of
:py:class:`random.Random` to allow you to control the seed and produce
deterministic results. For results to be deterministic,
vertices_resources must be supplied as an
:py:class:`collections.OrderedDict`.
"""
# Within the algorithm we modify the resource availability values in the
# machine to account for the effects of the current placement. As a result,
# an internal copy of the structure must be made.
machine = machine.copy()
# {vertex: (x, y), ...} gives the location of all vertices, updated
# throughout the function.
placements = {}
# Handle constraints
vertices_resources, nets, constraints, substitutions = \
apply_same_chip_constraints(vertices_resources, nets, constraints)
for constraint in constraints:
if isinstance(constraint, LocationConstraint):
# Location constraints are handled by recording the set of fixed
# vertex locations and subtracting their resources from the chips
# they're allocated to.
location = constraint.location
if location not in machine:
raise InvalidConstraintError(
"Chip requested by {} unavailable".format(machine))
vertex = constraint.vertex
# Record the constrained vertex's location
placements[vertex] = location
# Make sure the vertex fits at the requested location (updating the
# resource availability after placement)
resources = vertices_resources[vertex]
machine[location] = subtract_resources(machine[location],
resources)
if overallocated(machine[location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
elif isinstance(constraint, # pragma: no branch
ReserveResourceConstraint):
apply_reserve_resource_constraint(machine, constraint)
# The set of vertices which have not been constrained.
movable_vertices = [v for v in vertices_resources
if v not in placements]
locations = set(machine)
for vertex in movable_vertices:
# Keep choosing random chips until we find one where the vertex fits.
while True:
if len(locations) == 0:
raise InsufficientResourceError(
"Ran out of chips while attempting to place vertex "
"{}".format(vertex))
location = random.sample(locations, 1)[0]
resources_if_placed = subtract_resources(
machine[location], vertices_resources[vertex])
if overallocated(resources_if_placed):
# The vertex won't fit on this chip, we'll assume it is full
# and not try it in the future.
locations.remove(location)
else:
# The vertex fits: record the resources consumed and move on to
# the next vertex.
placements[vertex] = location
machine[location] = resources_if_placed
break
finalise_same_chip_constraints(substitutions, placements)
return placements | def function[place, parameter[vertices_resources, nets, machine, constraints, random]]:
constant[A random placer.
This algorithm performs uniform-random placement of vertices (completely
ignoring connectivty) and thus in the general case is likely to produce
very poor quality placements. It exists primarily as a baseline comparison
for placement quality and is probably of little value to most users.
Parameters
----------
random : :py:class:`random.Random`
Defaults to ``import random`` but can be set to your own instance of
:py:class:`random.Random` to allow you to control the seed and produce
deterministic results. For results to be deterministic,
vertices_resources must be supplied as an
:py:class:`collections.OrderedDict`.
]
variable[machine] assign[=] call[name[machine].copy, parameter[]]
variable[placements] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da1b1969210> assign[=] call[name[apply_same_chip_constraints], parameter[name[vertices_resources], name[nets], name[constraints]]]
for taget[name[constraint]] in starred[name[constraints]] begin[:]
if call[name[isinstance], parameter[name[constraint], name[LocationConstraint]]] begin[:]
variable[location] assign[=] name[constraint].location
if compare[name[location] <ast.NotIn object at 0x7da2590d7190> name[machine]] begin[:]
<ast.Raise object at 0x7da1b1969de0>
variable[vertex] assign[=] name[constraint].vertex
call[name[placements]][name[vertex]] assign[=] name[location]
variable[resources] assign[=] call[name[vertices_resources]][name[vertex]]
call[name[machine]][name[location]] assign[=] call[name[subtract_resources], parameter[call[name[machine]][name[location]], name[resources]]]
if call[name[overallocated], parameter[call[name[machine]][name[location]]]] begin[:]
<ast.Raise object at 0x7da1b196b9a0>
variable[movable_vertices] assign[=] <ast.ListComp object at 0x7da1b196b910>
variable[locations] assign[=] call[name[set], parameter[name[machine]]]
for taget[name[vertex]] in starred[name[movable_vertices]] begin[:]
while constant[True] begin[:]
if compare[call[name[len], parameter[name[locations]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b195c8e0>
variable[location] assign[=] call[call[name[random].sample, parameter[name[locations], constant[1]]]][constant[0]]
variable[resources_if_placed] assign[=] call[name[subtract_resources], parameter[call[name[machine]][name[location]], call[name[vertices_resources]][name[vertex]]]]
if call[name[overallocated], parameter[name[resources_if_placed]]] begin[:]
call[name[locations].remove, parameter[name[location]]]
call[name[finalise_same_chip_constraints], parameter[name[substitutions], name[placements]]]
return[name[placements]] | keyword[def] identifier[place] ( identifier[vertices_resources] , identifier[nets] , identifier[machine] , identifier[constraints] ,
identifier[random] = identifier[default_random] ):
literal[string]
identifier[machine] = identifier[machine] . identifier[copy] ()
identifier[placements] ={}
identifier[vertices_resources] , identifier[nets] , identifier[constraints] , identifier[substitutions] = identifier[apply_same_chip_constraints] ( identifier[vertices_resources] , identifier[nets] , identifier[constraints] )
keyword[for] identifier[constraint] keyword[in] identifier[constraints] :
keyword[if] identifier[isinstance] ( identifier[constraint] , identifier[LocationConstraint] ):
identifier[location] = identifier[constraint] . identifier[location]
keyword[if] identifier[location] keyword[not] keyword[in] identifier[machine] :
keyword[raise] identifier[InvalidConstraintError] (
literal[string] . identifier[format] ( identifier[machine] ))
identifier[vertex] = identifier[constraint] . identifier[vertex]
identifier[placements] [ identifier[vertex] ]= identifier[location]
identifier[resources] = identifier[vertices_resources] [ identifier[vertex] ]
identifier[machine] [ identifier[location] ]= identifier[subtract_resources] ( identifier[machine] [ identifier[location] ],
identifier[resources] )
keyword[if] identifier[overallocated] ( identifier[machine] [ identifier[location] ]):
keyword[raise] identifier[InsufficientResourceError] (
literal[string] . identifier[format] ( identifier[constraint] ))
keyword[elif] identifier[isinstance] ( identifier[constraint] ,
identifier[ReserveResourceConstraint] ):
identifier[apply_reserve_resource_constraint] ( identifier[machine] , identifier[constraint] )
identifier[movable_vertices] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[vertices_resources]
keyword[if] identifier[v] keyword[not] keyword[in] identifier[placements] ]
identifier[locations] = identifier[set] ( identifier[machine] )
keyword[for] identifier[vertex] keyword[in] identifier[movable_vertices] :
keyword[while] keyword[True] :
keyword[if] identifier[len] ( identifier[locations] )== literal[int] :
keyword[raise] identifier[InsufficientResourceError] (
literal[string]
literal[string] . identifier[format] ( identifier[vertex] ))
identifier[location] = identifier[random] . identifier[sample] ( identifier[locations] , literal[int] )[ literal[int] ]
identifier[resources_if_placed] = identifier[subtract_resources] (
identifier[machine] [ identifier[location] ], identifier[vertices_resources] [ identifier[vertex] ])
keyword[if] identifier[overallocated] ( identifier[resources_if_placed] ):
identifier[locations] . identifier[remove] ( identifier[location] )
keyword[else] :
identifier[placements] [ identifier[vertex] ]= identifier[location]
identifier[machine] [ identifier[location] ]= identifier[resources_if_placed]
keyword[break]
identifier[finalise_same_chip_constraints] ( identifier[substitutions] , identifier[placements] )
keyword[return] identifier[placements] | def place(vertices_resources, nets, machine, constraints, random=default_random):
"""A random placer.
This algorithm performs uniform-random placement of vertices (completely
ignoring connectivty) and thus in the general case is likely to produce
very poor quality placements. It exists primarily as a baseline comparison
for placement quality and is probably of little value to most users.
Parameters
----------
random : :py:class:`random.Random`
Defaults to ``import random`` but can be set to your own instance of
:py:class:`random.Random` to allow you to control the seed and produce
deterministic results. For results to be deterministic,
vertices_resources must be supplied as an
:py:class:`collections.OrderedDict`.
"""
# Within the algorithm we modify the resource availability values in the
# machine to account for the effects of the current placement. As a result,
# an internal copy of the structure must be made.
machine = machine.copy()
# {vertex: (x, y), ...} gives the location of all vertices, updated
# throughout the function.
placements = {}
# Handle constraints
(vertices_resources, nets, constraints, substitutions) = apply_same_chip_constraints(vertices_resources, nets, constraints)
for constraint in constraints:
if isinstance(constraint, LocationConstraint):
# Location constraints are handled by recording the set of fixed
# vertex locations and subtracting their resources from the chips
# they're allocated to.
location = constraint.location
if location not in machine:
raise InvalidConstraintError('Chip requested by {} unavailable'.format(machine)) # depends on [control=['if'], data=['machine']]
vertex = constraint.vertex
# Record the constrained vertex's location
placements[vertex] = location
# Make sure the vertex fits at the requested location (updating the
# resource availability after placement)
resources = vertices_resources[vertex]
machine[location] = subtract_resources(machine[location], resources)
if overallocated(machine[location]):
raise InsufficientResourceError('Cannot meet {}'.format(constraint)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(constraint, ReserveResourceConstraint): # pragma: no branch
apply_reserve_resource_constraint(machine, constraint) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['constraint']]
# The set of vertices which have not been constrained.
movable_vertices = [v for v in vertices_resources if v not in placements]
locations = set(machine)
for vertex in movable_vertices:
# Keep choosing random chips until we find one where the vertex fits.
while True:
if len(locations) == 0:
raise InsufficientResourceError('Ran out of chips while attempting to place vertex {}'.format(vertex)) # depends on [control=['if'], data=[]]
location = random.sample(locations, 1)[0]
resources_if_placed = subtract_resources(machine[location], vertices_resources[vertex])
if overallocated(resources_if_placed):
# The vertex won't fit on this chip, we'll assume it is full
# and not try it in the future.
locations.remove(location) # depends on [control=['if'], data=[]]
else:
# The vertex fits: record the resources consumed and move on to
# the next vertex.
placements[vertex] = location
machine[location] = resources_if_placed
break # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['vertex']]
finalise_same_chip_constraints(substitutions, placements)
return placements |
async def json(self, *, encoding: str=None,
loads: JSONDecoder=DEFAULT_JSON_DECODER,
content_type: Optional[str]='application/json') -> Any:
"""Read and decodes JSON response."""
if self._body is None:
await self.read()
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if not _is_expected_content_type(ctype, content_type):
raise ContentTypeError(
self.request_info,
self.history,
message=('Attempt to decode JSON with '
'unexpected mimetype: %s' % ctype),
headers=self.headers)
if encoding is None:
encoding = self.get_encoding()
return loads(self._body.decode(encoding)) | <ast.AsyncFunctionDef object at 0x7da1b20c9210> | keyword[async] keyword[def] identifier[json] ( identifier[self] ,*, identifier[encoding] : identifier[str] = keyword[None] ,
identifier[loads] : identifier[JSONDecoder] = identifier[DEFAULT_JSON_DECODER] ,
identifier[content_type] : identifier[Optional] [ identifier[str] ]= literal[string] )-> identifier[Any] :
literal[string]
keyword[if] identifier[self] . identifier[_body] keyword[is] keyword[None] :
keyword[await] identifier[self] . identifier[read] ()
keyword[if] identifier[content_type] :
identifier[ctype] = identifier[self] . identifier[headers] . identifier[get] ( identifier[hdrs] . identifier[CONTENT_TYPE] , literal[string] ). identifier[lower] ()
keyword[if] keyword[not] identifier[_is_expected_content_type] ( identifier[ctype] , identifier[content_type] ):
keyword[raise] identifier[ContentTypeError] (
identifier[self] . identifier[request_info] ,
identifier[self] . identifier[history] ,
identifier[message] =( literal[string]
literal[string] % identifier[ctype] ),
identifier[headers] = identifier[self] . identifier[headers] )
keyword[if] identifier[encoding] keyword[is] keyword[None] :
identifier[encoding] = identifier[self] . identifier[get_encoding] ()
keyword[return] identifier[loads] ( identifier[self] . identifier[_body] . identifier[decode] ( identifier[encoding] )) | async def json(self, *, encoding: str=None, loads: JSONDecoder=DEFAULT_JSON_DECODER, content_type: Optional[str]='application/json') -> Any:
"""Read and decodes JSON response."""
if self._body is None:
await self.read() # depends on [control=['if'], data=[]]
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if not _is_expected_content_type(ctype, content_type):
raise ContentTypeError(self.request_info, self.history, message='Attempt to decode JSON with unexpected mimetype: %s' % ctype, headers=self.headers) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if encoding is None:
encoding = self.get_encoding() # depends on [control=['if'], data=['encoding']]
return loads(self._body.decode(encoding)) |
def has_port_profile_to_delete(self, profile_name, device_id):
"""Returns True if port profile delete table containes PP."""
count = self.session.query(ucsm_model.PortProfileDelete).filter_by(
profile_id=profile_name, device_id=device_id).count()
return count != 0 | def function[has_port_profile_to_delete, parameter[self, profile_name, device_id]]:
constant[Returns True if port profile delete table containes PP.]
variable[count] assign[=] call[call[call[name[self].session.query, parameter[name[ucsm_model].PortProfileDelete]].filter_by, parameter[]].count, parameter[]]
return[compare[name[count] not_equal[!=] constant[0]]] | keyword[def] identifier[has_port_profile_to_delete] ( identifier[self] , identifier[profile_name] , identifier[device_id] ):
literal[string]
identifier[count] = identifier[self] . identifier[session] . identifier[query] ( identifier[ucsm_model] . identifier[PortProfileDelete] ). identifier[filter_by] (
identifier[profile_id] = identifier[profile_name] , identifier[device_id] = identifier[device_id] ). identifier[count] ()
keyword[return] identifier[count] != literal[int] | def has_port_profile_to_delete(self, profile_name, device_id):
"""Returns True if port profile delete table containes PP."""
count = self.session.query(ucsm_model.PortProfileDelete).filter_by(profile_id=profile_name, device_id=device_id).count()
return count != 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.