code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def print_start_command(self, command): '''Set print command Args: command: the type of command you desire. Returns: None Raises: RuntimeError: Command too long. ''' size = len(command) if size > 20: raise RuntimeError('Command too long') n1 = size/10 n2 = size%10 self.send('^PS'+chr(n1)+chr(n2)+command)
def function[print_start_command, parameter[self, command]]: constant[Set print command Args: command: the type of command you desire. Returns: None Raises: RuntimeError: Command too long. ] variable[size] assign[=] call[name[len], parameter[name[command]]] if compare[name[size] greater[>] constant[20]] begin[:] <ast.Raise object at 0x7da1b0a6c760> variable[n1] assign[=] binary_operation[name[size] / constant[10]] variable[n2] assign[=] binary_operation[name[size] <ast.Mod object at 0x7da2590d6920> constant[10]] call[name[self].send, parameter[binary_operation[binary_operation[binary_operation[constant[^PS] + call[name[chr], parameter[name[n1]]]] + call[name[chr], parameter[name[n2]]]] + name[command]]]]
keyword[def] identifier[print_start_command] ( identifier[self] , identifier[command] ): literal[string] identifier[size] = identifier[len] ( identifier[command] ) keyword[if] identifier[size] > literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[n1] = identifier[size] / literal[int] identifier[n2] = identifier[size] % literal[int] identifier[self] . identifier[send] ( literal[string] + identifier[chr] ( identifier[n1] )+ identifier[chr] ( identifier[n2] )+ identifier[command] )
def print_start_command(self, command): """Set print command Args: command: the type of command you desire. Returns: None Raises: RuntimeError: Command too long. """ size = len(command) if size > 20: raise RuntimeError('Command too long') # depends on [control=['if'], data=[]] n1 = size / 10 n2 = size % 10 self.send('^PS' + chr(n1) + chr(n2) + command)
def _api_call(self, endpoint, model=None, method=None, data=None, filters=None): """ Makes a call to the linode api. Data should only be given if the method is POST or PUT, and should be a dictionary """ if not self.token: raise RuntimeError("You do not have an API token!") if not method: raise ValueError("Method is required for API calls!") if model: endpoint = endpoint.format(**vars(model)) url = '{}{}'.format(self.base_url, endpoint) headers = { 'Authorization': "Bearer {}".format(self.token), 'Content-Type': 'application/json', 'User-Agent': self._user_agent, } if filters: headers['X-Filter'] = json.dumps(filters) body = None if data is not None: body = json.dumps(data) response = method(url, headers=headers, data=body) warning = response.headers.get('Warning', None) if warning: logger.warning('Received warning from server: {}'.format(warning)) if 399 < response.status_code < 600: j = None error_msg = '{}: '.format(response.status_code) try: j = response.json() if 'errors' in j.keys(): for e in j['errors']: error_msg += '{}; '.format(e['reason']) \ if 'reason' in e.keys() else '' except: pass raise ApiError(error_msg, status=response.status_code, json=j) if response.status_code != 204: j = response.json() else: j = None # handle no response body return j
def function[_api_call, parameter[self, endpoint, model, method, data, filters]]: constant[ Makes a call to the linode api. Data should only be given if the method is POST or PUT, and should be a dictionary ] if <ast.UnaryOp object at 0x7da18dc98670> begin[:] <ast.Raise object at 0x7da18dc990c0> if <ast.UnaryOp object at 0x7da18dc9bac0> begin[:] <ast.Raise object at 0x7da18dc99720> if name[model] begin[:] variable[endpoint] assign[=] call[name[endpoint].format, parameter[]] variable[url] assign[=] call[constant[{}{}].format, parameter[name[self].base_url, name[endpoint]]] variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9af50>, <ast.Constant object at 0x7da18dc9b5b0>, <ast.Constant object at 0x7da18dc9bfd0>], [<ast.Call object at 0x7da18dc9ab00>, <ast.Constant object at 0x7da18dc9a080>, <ast.Attribute object at 0x7da18dc98be0>]] if name[filters] begin[:] call[name[headers]][constant[X-Filter]] assign[=] call[name[json].dumps, parameter[name[filters]]] variable[body] assign[=] constant[None] if compare[name[data] is_not constant[None]] begin[:] variable[body] assign[=] call[name[json].dumps, parameter[name[data]]] variable[response] assign[=] call[name[method], parameter[name[url]]] variable[warning] assign[=] call[name[response].headers.get, parameter[constant[Warning], constant[None]]] if name[warning] begin[:] call[name[logger].warning, parameter[call[constant[Received warning from server: {}].format, parameter[name[warning]]]]] if compare[constant[399] less[<] name[response].status_code] begin[:] variable[j] assign[=] constant[None] variable[error_msg] assign[=] call[constant[{}: ].format, parameter[name[response].status_code]] <ast.Try object at 0x7da1b0f057e0> <ast.Raise object at 0x7da1b0f052d0> if compare[name[response].status_code not_equal[!=] constant[204]] begin[:] variable[j] assign[=] call[name[response].json, parameter[]] return[name[j]]
keyword[def] identifier[_api_call] ( identifier[self] , identifier[endpoint] , identifier[model] = keyword[None] , identifier[method] = keyword[None] , identifier[data] = keyword[None] , identifier[filters] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[token] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[if] keyword[not] identifier[method] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[model] : identifier[endpoint] = identifier[endpoint] . identifier[format] (** identifier[vars] ( identifier[model] )) identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[base_url] , identifier[endpoint] ) identifier[headers] ={ literal[string] : literal[string] . identifier[format] ( identifier[self] . identifier[token] ), literal[string] : literal[string] , literal[string] : identifier[self] . identifier[_user_agent] , } keyword[if] identifier[filters] : identifier[headers] [ literal[string] ]= identifier[json] . identifier[dumps] ( identifier[filters] ) identifier[body] = keyword[None] keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] : identifier[body] = identifier[json] . identifier[dumps] ( identifier[data] ) identifier[response] = identifier[method] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[data] = identifier[body] ) identifier[warning] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[warning] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[warning] )) keyword[if] literal[int] < identifier[response] . identifier[status_code] < literal[int] : identifier[j] = keyword[None] identifier[error_msg] = literal[string] . identifier[format] ( identifier[response] . identifier[status_code] ) keyword[try] : identifier[j] = identifier[response] . identifier[json] () keyword[if] literal[string] keyword[in] identifier[j] . identifier[keys] (): keyword[for] identifier[e] keyword[in] identifier[j] [ literal[string] ]: identifier[error_msg] += literal[string] . identifier[format] ( identifier[e] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[e] . identifier[keys] () keyword[else] literal[string] keyword[except] : keyword[pass] keyword[raise] identifier[ApiError] ( identifier[error_msg] , identifier[status] = identifier[response] . identifier[status_code] , identifier[json] = identifier[j] ) keyword[if] identifier[response] . identifier[status_code] != literal[int] : identifier[j] = identifier[response] . identifier[json] () keyword[else] : identifier[j] = keyword[None] keyword[return] identifier[j]
def _api_call(self, endpoint, model=None, method=None, data=None, filters=None): """ Makes a call to the linode api. Data should only be given if the method is POST or PUT, and should be a dictionary """ if not self.token: raise RuntimeError('You do not have an API token!') # depends on [control=['if'], data=[]] if not method: raise ValueError('Method is required for API calls!') # depends on [control=['if'], data=[]] if model: endpoint = endpoint.format(**vars(model)) # depends on [control=['if'], data=[]] url = '{}{}'.format(self.base_url, endpoint) headers = {'Authorization': 'Bearer {}'.format(self.token), 'Content-Type': 'application/json', 'User-Agent': self._user_agent} if filters: headers['X-Filter'] = json.dumps(filters) # depends on [control=['if'], data=[]] body = None if data is not None: body = json.dumps(data) # depends on [control=['if'], data=['data']] response = method(url, headers=headers, data=body) warning = response.headers.get('Warning', None) if warning: logger.warning('Received warning from server: {}'.format(warning)) # depends on [control=['if'], data=[]] if 399 < response.status_code < 600: j = None error_msg = '{}: '.format(response.status_code) try: j = response.json() if 'errors' in j.keys(): for e in j['errors']: error_msg += '{}; '.format(e['reason']) if 'reason' in e.keys() else '' # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] raise ApiError(error_msg, status=response.status_code, json=j) # depends on [control=['if'], data=[]] if response.status_code != 204: j = response.json() # depends on [control=['if'], data=[]] else: j = None # handle no response body return j
def after_event(self, event): """Ensure all lines on a screen have proper width (:attr:`columns`). Extra characters are truncated, missing characters are filled with whitespace. :param str event: event name, for example ``"linefeed"``. """ if event in ["prev_page", "next_page"]: for line in self.buffer.values(): for x in line: if x > self.columns: line.pop(x) # If we're at the bottom of the history buffer and `DECTCEM` # mode is set -- show the cursor. self.cursor.hidden = not ( self.history.position == self.history.size and mo.DECTCEM in self.mode )
def function[after_event, parameter[self, event]]: constant[Ensure all lines on a screen have proper width (:attr:`columns`). Extra characters are truncated, missing characters are filled with whitespace. :param str event: event name, for example ``"linefeed"``. ] if compare[name[event] in list[[<ast.Constant object at 0x7da1b07af760>, <ast.Constant object at 0x7da1b07af580>]]] begin[:] for taget[name[line]] in starred[call[name[self].buffer.values, parameter[]]] begin[:] for taget[name[x]] in starred[name[line]] begin[:] if compare[name[x] greater[>] name[self].columns] begin[:] call[name[line].pop, parameter[name[x]]] name[self].cursor.hidden assign[=] <ast.UnaryOp object at 0x7da1b067a980>
keyword[def] identifier[after_event] ( identifier[self] , identifier[event] ): literal[string] keyword[if] identifier[event] keyword[in] [ literal[string] , literal[string] ]: keyword[for] identifier[line] keyword[in] identifier[self] . identifier[buffer] . identifier[values] (): keyword[for] identifier[x] keyword[in] identifier[line] : keyword[if] identifier[x] > identifier[self] . identifier[columns] : identifier[line] . identifier[pop] ( identifier[x] ) identifier[self] . identifier[cursor] . identifier[hidden] = keyword[not] ( identifier[self] . identifier[history] . identifier[position] == identifier[self] . identifier[history] . identifier[size] keyword[and] identifier[mo] . identifier[DECTCEM] keyword[in] identifier[self] . identifier[mode] )
def after_event(self, event): """Ensure all lines on a screen have proper width (:attr:`columns`). Extra characters are truncated, missing characters are filled with whitespace. :param str event: event name, for example ``"linefeed"``. """ if event in ['prev_page', 'next_page']: for line in self.buffer.values(): for x in line: if x > self.columns: line.pop(x) # depends on [control=['if'], data=['x']] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]] # If we're at the bottom of the history buffer and `DECTCEM` # mode is set -- show the cursor. self.cursor.hidden = not (self.history.position == self.history.size and mo.DECTCEM in self.mode)
def get_bucket(bucket_name, include_created=None, flags=FLAGS.ALL ^ FLAGS.CREATED_DATE, **conn): """ Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket. """ if type(include_created) is bool: # coerce the legacy param "include_created" into the flags param. if include_created: flags = flags | FLAGS.CREATED_DATE else: flags = flags & ~FLAGS.CREATED_DATE region = get_bucket_region(Bucket=bucket_name, **conn) if not region: return dict(Error='Unauthorized') conn['region'] = region return registry.build_out(flags, bucket_name, **conn)
def function[get_bucket, parameter[bucket_name, include_created, flags]]: constant[ Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket. ] if compare[call[name[type], parameter[name[include_created]]] is name[bool]] begin[:] if name[include_created] begin[:] variable[flags] assign[=] binary_operation[name[flags] <ast.BitOr object at 0x7da2590d6aa0> name[FLAGS].CREATED_DATE] variable[region] assign[=] call[name[get_bucket_region], parameter[]] if <ast.UnaryOp object at 0x7da1b01c2140> begin[:] return[call[name[dict], parameter[]]] call[name[conn]][constant[region]] assign[=] name[region] return[call[name[registry].build_out, parameter[name[flags], name[bucket_name]]]]
keyword[def] identifier[get_bucket] ( identifier[bucket_name] , identifier[include_created] = keyword[None] , identifier[flags] = identifier[FLAGS] . identifier[ALL] ^ identifier[FLAGS] . identifier[CREATED_DATE] ,** identifier[conn] ): literal[string] keyword[if] identifier[type] ( identifier[include_created] ) keyword[is] identifier[bool] : keyword[if] identifier[include_created] : identifier[flags] = identifier[flags] | identifier[FLAGS] . identifier[CREATED_DATE] keyword[else] : identifier[flags] = identifier[flags] &~ identifier[FLAGS] . identifier[CREATED_DATE] identifier[region] = identifier[get_bucket_region] ( identifier[Bucket] = identifier[bucket_name] ,** identifier[conn] ) keyword[if] keyword[not] identifier[region] : keyword[return] identifier[dict] ( identifier[Error] = literal[string] ) identifier[conn] [ literal[string] ]= identifier[region] keyword[return] identifier[registry] . identifier[build_out] ( identifier[flags] , identifier[bucket_name] ,** identifier[conn] )
def get_bucket(bucket_name, include_created=None, flags=FLAGS.ALL ^ FLAGS.CREATED_DATE, **conn): """ Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket. """ if type(include_created) is bool: # coerce the legacy param "include_created" into the flags param. if include_created: flags = flags | FLAGS.CREATED_DATE # depends on [control=['if'], data=[]] else: flags = flags & ~FLAGS.CREATED_DATE # depends on [control=['if'], data=[]] region = get_bucket_region(Bucket=bucket_name, **conn) if not region: return dict(Error='Unauthorized') # depends on [control=['if'], data=[]] conn['region'] = region return registry.build_out(flags, bucket_name, **conn)
def hosts(self, **kwargs): """ Convenience wrapper around listHosts(...) for this channel ID. :param **kwargs: keyword arguments to the listHosts RPC. :returns: deferred that when fired returns a list of hosts (dicts). """ kwargs['channelID'] = self.id return self.connection.listHosts(**kwargs)
def function[hosts, parameter[self]]: constant[ Convenience wrapper around listHosts(...) for this channel ID. :param **kwargs: keyword arguments to the listHosts RPC. :returns: deferred that when fired returns a list of hosts (dicts). ] call[name[kwargs]][constant[channelID]] assign[=] name[self].id return[call[name[self].connection.listHosts, parameter[]]]
keyword[def] identifier[hosts] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[id] keyword[return] identifier[self] . identifier[connection] . identifier[listHosts] (** identifier[kwargs] )
def hosts(self, **kwargs): """ Convenience wrapper around listHosts(...) for this channel ID. :param **kwargs: keyword arguments to the listHosts RPC. :returns: deferred that when fired returns a list of hosts (dicts). """ kwargs['channelID'] = self.id return self.connection.listHosts(**kwargs)
def normalize_object_slot(self, value=_nothing, prop=None, obj=None): """This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value. """ if value is not _nothing and hasattr(prop, "compare_as"): method, nargs = getattr(prop, "compare_as_info", (False, 1)) args = [] if method: args.append(obj) if nargs: args.append(value) value = prop.compare_as(*args) return self.normalize_slot(value, prop)
def function[normalize_object_slot, parameter[self, value, prop, obj]]: constant[This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value. ] if <ast.BoolOp object at 0x7da20c6c78e0> begin[:] <ast.Tuple object at 0x7da18eb56260> assign[=] call[name[getattr], parameter[name[prop], constant[compare_as_info], tuple[[<ast.Constant object at 0x7da18eb57fa0>, <ast.Constant object at 0x7da18eb55f00>]]]] variable[args] assign[=] list[[]] if name[method] begin[:] call[name[args].append, parameter[name[obj]]] if name[nargs] begin[:] call[name[args].append, parameter[name[value]]] variable[value] assign[=] call[name[prop].compare_as, parameter[<ast.Starred object at 0x7da18eb56b90>]] return[call[name[self].normalize_slot, parameter[name[value], name[prop]]]]
keyword[def] identifier[normalize_object_slot] ( identifier[self] , identifier[value] = identifier[_nothing] , identifier[prop] = keyword[None] , identifier[obj] = keyword[None] ): literal[string] keyword[if] identifier[value] keyword[is] keyword[not] identifier[_nothing] keyword[and] identifier[hasattr] ( identifier[prop] , literal[string] ): identifier[method] , identifier[nargs] = identifier[getattr] ( identifier[prop] , literal[string] ,( keyword[False] , literal[int] )) identifier[args] =[] keyword[if] identifier[method] : identifier[args] . identifier[append] ( identifier[obj] ) keyword[if] identifier[nargs] : identifier[args] . identifier[append] ( identifier[value] ) identifier[value] = identifier[prop] . identifier[compare_as] (* identifier[args] ) keyword[return] identifier[self] . identifier[normalize_slot] ( identifier[value] , identifier[prop] )
def normalize_object_slot(self, value=_nothing, prop=None, obj=None): """This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value. """ if value is not _nothing and hasattr(prop, 'compare_as'): (method, nargs) = getattr(prop, 'compare_as_info', (False, 1)) args = [] if method: args.append(obj) # depends on [control=['if'], data=[]] if nargs: args.append(value) # depends on [control=['if'], data=[]] value = prop.compare_as(*args) # depends on [control=['if'], data=[]] return self.normalize_slot(value, prop)
def pts_shift(pts=[], shift=[0.0, 0.0]): '''Return given points shifted in N dimensions. ''' assert isinstance(pts, list) and len(pts) > 0 l_pt_prev = None for pt in pts: assert isinstance(pt, tuple) l_pt = len(pt) assert l_pt > 1 for i in pt: assert isinstance(i, float) if l_pt_prev is not None: assert l_pt == l_pt_prev l_pt_prev = l_pt assert isinstance(shift, list) l_sh = len(shift) assert l_sh == l_pt for i in shift: assert isinstance(i, float) return [pt_shift(pt, shift) for pt in pts]
def function[pts_shift, parameter[pts, shift]]: constant[Return given points shifted in N dimensions. ] assert[<ast.BoolOp object at 0x7da2054a5810>] variable[l_pt_prev] assign[=] constant[None] for taget[name[pt]] in starred[name[pts]] begin[:] assert[call[name[isinstance], parameter[name[pt], name[tuple]]]] variable[l_pt] assign[=] call[name[len], parameter[name[pt]]] assert[compare[name[l_pt] greater[>] constant[1]]] for taget[name[i]] in starred[name[pt]] begin[:] assert[call[name[isinstance], parameter[name[i], name[float]]]] if compare[name[l_pt_prev] is_not constant[None]] begin[:] assert[compare[name[l_pt] equal[==] name[l_pt_prev]]] variable[l_pt_prev] assign[=] name[l_pt] assert[call[name[isinstance], parameter[name[shift], name[list]]]] variable[l_sh] assign[=] call[name[len], parameter[name[shift]]] assert[compare[name[l_sh] equal[==] name[l_pt]]] for taget[name[i]] in starred[name[shift]] begin[:] assert[call[name[isinstance], parameter[name[i], name[float]]]] return[<ast.ListComp object at 0x7da1b23467d0>]
keyword[def] identifier[pts_shift] ( identifier[pts] =[], identifier[shift] =[ literal[int] , literal[int] ]): literal[string] keyword[assert] identifier[isinstance] ( identifier[pts] , identifier[list] ) keyword[and] identifier[len] ( identifier[pts] )> literal[int] identifier[l_pt_prev] = keyword[None] keyword[for] identifier[pt] keyword[in] identifier[pts] : keyword[assert] identifier[isinstance] ( identifier[pt] , identifier[tuple] ) identifier[l_pt] = identifier[len] ( identifier[pt] ) keyword[assert] identifier[l_pt] > literal[int] keyword[for] identifier[i] keyword[in] identifier[pt] : keyword[assert] identifier[isinstance] ( identifier[i] , identifier[float] ) keyword[if] identifier[l_pt_prev] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[l_pt] == identifier[l_pt_prev] identifier[l_pt_prev] = identifier[l_pt] keyword[assert] identifier[isinstance] ( identifier[shift] , identifier[list] ) identifier[l_sh] = identifier[len] ( identifier[shift] ) keyword[assert] identifier[l_sh] == identifier[l_pt] keyword[for] identifier[i] keyword[in] identifier[shift] : keyword[assert] identifier[isinstance] ( identifier[i] , identifier[float] ) keyword[return] [ identifier[pt_shift] ( identifier[pt] , identifier[shift] ) keyword[for] identifier[pt] keyword[in] identifier[pts] ]
def pts_shift(pts=[], shift=[0.0, 0.0]): """Return given points shifted in N dimensions. """ assert isinstance(pts, list) and len(pts) > 0 l_pt_prev = None for pt in pts: assert isinstance(pt, tuple) l_pt = len(pt) assert l_pt > 1 for i in pt: assert isinstance(i, float) # depends on [control=['for'], data=['i']] if l_pt_prev is not None: assert l_pt == l_pt_prev # depends on [control=['if'], data=['l_pt_prev']] l_pt_prev = l_pt # depends on [control=['for'], data=['pt']] assert isinstance(shift, list) l_sh = len(shift) assert l_sh == l_pt for i in shift: assert isinstance(i, float) # depends on [control=['for'], data=['i']] return [pt_shift(pt, shift) for pt in pts]
def get_groups_of_user(self, user_id, **kwargs): # noqa: E501 """Get groups of the user. # noqa: E501 An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_user(user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str user_id: The ID of the user whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501 else: (data) = self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501 return data
def function[get_groups_of_user, parameter[self, user_id]]: constant[Get groups of the user. # noqa: E501 An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_user(user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str user_id: The ID of the user whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:] return[call[name[self].get_groups_of_user_with_http_info, parameter[name[user_id]]]]
keyword[def] identifier[get_groups_of_user] ( identifier[self] , identifier[user_id] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[get_groups_of_user_with_http_info] ( identifier[user_id] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[get_groups_of_user_with_http_info] ( identifier[user_id] ,** identifier[kwargs] ) keyword[return] identifier[data]
def get_groups_of_user(self, user_id, **kwargs): # noqa: E501 "Get groups of the user. # noqa: E501\n\n An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.get_groups_of_user(user_id, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str user_id: The ID of the user whose details are retrieved. (required)\n :param int limit: The number of results to return (2-1000), default is 50.\n :param str after: The entity ID to fetch after the given one.\n :param str order: The order of the records based on creation time, ASC or DESC; by default ASC\n :param str include: Comma separated additional data to return. Currently supported: total_count\n :return: GroupSummaryList\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501 return data
def _decompose_from_posterior_marginals( model, posterior_means, posterior_covs, parameter_samples): """Utility method to decompose a joint posterior into components. Args: model: `tfp.sts.Sum` instance defining an additive STS model. posterior_means: float `Tensor` of shape `concat( [[num_posterior_draws], batch_shape, num_timesteps, latent_size])` representing the posterior mean over latents in an `AdditiveStateSpaceModel`. posterior_covs: float `Tensor` of shape `concat( [[num_posterior_draws], batch_shape, num_timesteps, latent_size, latent_size])` representing the posterior marginal covariances over latents in an `AdditiveStateSpaceModel`. parameter_samples: Python `list` of `Tensors` representing posterior samples of model parameters, with shapes `[concat([ [num_posterior_draws], param.prior.batch_shape, param.prior.event_shape]) for param in model.parameters]`. This may optionally also be a map (Python `dict`) of parameter names to `Tensor` values. Returns: component_dists: A `collections.OrderedDict` instance mapping component StructuralTimeSeries instances (elements of `model.components`) to `tfd.Distribution` instances representing the posterior marginal distributions on the process modeled by each component. Each distribution has batch shape matching that of `posterior_means`/`posterior_covs`, and event shape of `[num_timesteps]`. """ try: model.components except AttributeError: raise ValueError('Model decomposed into components must be an instance of' '`tfp.sts.Sum` (passed model {})'.format(model)) with tf.compat.v1.name_scope('decompose_from_posterior_marginals'): # Extract the component means/covs from the joint latent posterior. latent_sizes = [component.latent_size for component in model.components] component_means = tf.split(posterior_means, latent_sizes, axis=-1) component_covs = _split_covariance_into_marginals( posterior_covs, latent_sizes) # Instantiate per-component state space models, and use them to push the # posterior means/covs through the observation model for each component. num_timesteps = dist_util.prefer_static_value( tf.shape(input=posterior_means))[-2] component_ssms = model.make_component_state_space_models( num_timesteps=num_timesteps, param_vals=parameter_samples) component_predictive_dists = collections.OrderedDict() for (component, component_ssm, component_mean, component_cov) in zip(model.components, component_ssms, component_means, component_covs): component_obs_mean, component_obs_cov = ( component_ssm.latents_to_observations( latent_means=component_mean, latent_covs=component_cov)) # Using the observation means and covs, build a mixture distribution # that integrates over the posterior draws. component_predictive_dists[component] = sts_util.mix_over_posterior_draws( means=component_obs_mean[..., 0], variances=component_obs_cov[..., 0, 0]) return component_predictive_dists
def function[_decompose_from_posterior_marginals, parameter[model, posterior_means, posterior_covs, parameter_samples]]: constant[Utility method to decompose a joint posterior into components. Args: model: `tfp.sts.Sum` instance defining an additive STS model. posterior_means: float `Tensor` of shape `concat( [[num_posterior_draws], batch_shape, num_timesteps, latent_size])` representing the posterior mean over latents in an `AdditiveStateSpaceModel`. posterior_covs: float `Tensor` of shape `concat( [[num_posterior_draws], batch_shape, num_timesteps, latent_size, latent_size])` representing the posterior marginal covariances over latents in an `AdditiveStateSpaceModel`. parameter_samples: Python `list` of `Tensors` representing posterior samples of model parameters, with shapes `[concat([ [num_posterior_draws], param.prior.batch_shape, param.prior.event_shape]) for param in model.parameters]`. This may optionally also be a map (Python `dict`) of parameter names to `Tensor` values. Returns: component_dists: A `collections.OrderedDict` instance mapping component StructuralTimeSeries instances (elements of `model.components`) to `tfd.Distribution` instances representing the posterior marginal distributions on the process modeled by each component. Each distribution has batch shape matching that of `posterior_means`/`posterior_covs`, and event shape of `[num_timesteps]`. ] <ast.Try object at 0x7da1b03e2bf0> with call[name[tf].compat.v1.name_scope, parameter[constant[decompose_from_posterior_marginals]]] begin[:] variable[latent_sizes] assign[=] <ast.ListComp object at 0x7da1b03e1150> variable[component_means] assign[=] call[name[tf].split, parameter[name[posterior_means], name[latent_sizes]]] variable[component_covs] assign[=] call[name[_split_covariance_into_marginals], parameter[name[posterior_covs], name[latent_sizes]]] variable[num_timesteps] assign[=] call[call[name[dist_util].prefer_static_value, parameter[call[name[tf].shape, parameter[]]]]][<ast.UnaryOp object at 0x7da1b03e1a80>] variable[component_ssms] assign[=] call[name[model].make_component_state_space_models, parameter[]] variable[component_predictive_dists] assign[=] call[name[collections].OrderedDict, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b03e09d0>, <ast.Name object at 0x7da1b03e2ec0>, <ast.Name object at 0x7da1b03e2e90>, <ast.Name object at 0x7da1b03e2500>]]] in starred[call[name[zip], parameter[name[model].components, name[component_ssms], name[component_means], name[component_covs]]]] begin[:] <ast.Tuple object at 0x7da1b03e34c0> assign[=] call[name[component_ssm].latents_to_observations, parameter[]] call[name[component_predictive_dists]][name[component]] assign[=] call[name[sts_util].mix_over_posterior_draws, parameter[]] return[name[component_predictive_dists]]
keyword[def] identifier[_decompose_from_posterior_marginals] ( identifier[model] , identifier[posterior_means] , identifier[posterior_covs] , identifier[parameter_samples] ): literal[string] keyword[try] : identifier[model] . identifier[components] keyword[except] identifier[AttributeError] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[model] )) keyword[with] identifier[tf] . identifier[compat] . identifier[v1] . identifier[name_scope] ( literal[string] ): identifier[latent_sizes] =[ identifier[component] . identifier[latent_size] keyword[for] identifier[component] keyword[in] identifier[model] . identifier[components] ] identifier[component_means] = identifier[tf] . identifier[split] ( identifier[posterior_means] , identifier[latent_sizes] , identifier[axis] =- literal[int] ) identifier[component_covs] = identifier[_split_covariance_into_marginals] ( identifier[posterior_covs] , identifier[latent_sizes] ) identifier[num_timesteps] = identifier[dist_util] . identifier[prefer_static_value] ( identifier[tf] . identifier[shape] ( identifier[input] = identifier[posterior_means] ))[- literal[int] ] identifier[component_ssms] = identifier[model] . identifier[make_component_state_space_models] ( identifier[num_timesteps] = identifier[num_timesteps] , identifier[param_vals] = identifier[parameter_samples] ) identifier[component_predictive_dists] = identifier[collections] . identifier[OrderedDict] () keyword[for] ( identifier[component] , identifier[component_ssm] , identifier[component_mean] , identifier[component_cov] ) keyword[in] identifier[zip] ( identifier[model] . identifier[components] , identifier[component_ssms] , identifier[component_means] , identifier[component_covs] ): identifier[component_obs_mean] , identifier[component_obs_cov] =( identifier[component_ssm] . identifier[latents_to_observations] ( identifier[latent_means] = identifier[component_mean] , identifier[latent_covs] = identifier[component_cov] )) identifier[component_predictive_dists] [ identifier[component] ]= identifier[sts_util] . identifier[mix_over_posterior_draws] ( identifier[means] = identifier[component_obs_mean] [..., literal[int] ], identifier[variances] = identifier[component_obs_cov] [..., literal[int] , literal[int] ]) keyword[return] identifier[component_predictive_dists]
def _decompose_from_posterior_marginals(model, posterior_means, posterior_covs, parameter_samples): """Utility method to decompose a joint posterior into components. Args: model: `tfp.sts.Sum` instance defining an additive STS model. posterior_means: float `Tensor` of shape `concat( [[num_posterior_draws], batch_shape, num_timesteps, latent_size])` representing the posterior mean over latents in an `AdditiveStateSpaceModel`. posterior_covs: float `Tensor` of shape `concat( [[num_posterior_draws], batch_shape, num_timesteps, latent_size, latent_size])` representing the posterior marginal covariances over latents in an `AdditiveStateSpaceModel`. parameter_samples: Python `list` of `Tensors` representing posterior samples of model parameters, with shapes `[concat([ [num_posterior_draws], param.prior.batch_shape, param.prior.event_shape]) for param in model.parameters]`. This may optionally also be a map (Python `dict`) of parameter names to `Tensor` values. Returns: component_dists: A `collections.OrderedDict` instance mapping component StructuralTimeSeries instances (elements of `model.components`) to `tfd.Distribution` instances representing the posterior marginal distributions on the process modeled by each component. Each distribution has batch shape matching that of `posterior_means`/`posterior_covs`, and event shape of `[num_timesteps]`. """ try: model.components # depends on [control=['try'], data=[]] except AttributeError: raise ValueError('Model decomposed into components must be an instance of`tfp.sts.Sum` (passed model {})'.format(model)) # depends on [control=['except'], data=[]] with tf.compat.v1.name_scope('decompose_from_posterior_marginals'): # Extract the component means/covs from the joint latent posterior. latent_sizes = [component.latent_size for component in model.components] component_means = tf.split(posterior_means, latent_sizes, axis=-1) component_covs = _split_covariance_into_marginals(posterior_covs, latent_sizes) # Instantiate per-component state space models, and use them to push the # posterior means/covs through the observation model for each component. num_timesteps = dist_util.prefer_static_value(tf.shape(input=posterior_means))[-2] component_ssms = model.make_component_state_space_models(num_timesteps=num_timesteps, param_vals=parameter_samples) component_predictive_dists = collections.OrderedDict() for (component, component_ssm, component_mean, component_cov) in zip(model.components, component_ssms, component_means, component_covs): (component_obs_mean, component_obs_cov) = component_ssm.latents_to_observations(latent_means=component_mean, latent_covs=component_cov) # Using the observation means and covs, build a mixture distribution # that integrates over the posterior draws. component_predictive_dists[component] = sts_util.mix_over_posterior_draws(means=component_obs_mean[..., 0], variances=component_obs_cov[..., 0, 0]) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]] return component_predictive_dists
def make_simple(): """ Create a L{SimpleAuthenticator} instance using values read from coilmq configuration. @return: The configured L{SimpleAuthenticator} @rtype: L{SimpleAuthenticator} @raise ConfigError: If there is a configuration error. """ authfile = config.get('coilmq', 'auth.simple.file') if not authfile: raise ConfigError('Missing configuration parameter: auth.simple.file') sa = SimpleAuthenticator() sa.from_configfile(authfile) return sa
def function[make_simple, parameter[]]: constant[ Create a L{SimpleAuthenticator} instance using values read from coilmq configuration. @return: The configured L{SimpleAuthenticator} @rtype: L{SimpleAuthenticator} @raise ConfigError: If there is a configuration error. ] variable[authfile] assign[=] call[name[config].get, parameter[constant[coilmq], constant[auth.simple.file]]] if <ast.UnaryOp object at 0x7da1b19395a0> begin[:] <ast.Raise object at 0x7da1b1938490> variable[sa] assign[=] call[name[SimpleAuthenticator], parameter[]] call[name[sa].from_configfile, parameter[name[authfile]]] return[name[sa]]
keyword[def] identifier[make_simple] (): literal[string] identifier[authfile] = identifier[config] . identifier[get] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[authfile] : keyword[raise] identifier[ConfigError] ( literal[string] ) identifier[sa] = identifier[SimpleAuthenticator] () identifier[sa] . identifier[from_configfile] ( identifier[authfile] ) keyword[return] identifier[sa]
def make_simple(): """ Create a L{SimpleAuthenticator} instance using values read from coilmq configuration. @return: The configured L{SimpleAuthenticator} @rtype: L{SimpleAuthenticator} @raise ConfigError: If there is a configuration error. """ authfile = config.get('coilmq', 'auth.simple.file') if not authfile: raise ConfigError('Missing configuration parameter: auth.simple.file') # depends on [control=['if'], data=[]] sa = SimpleAuthenticator() sa.from_configfile(authfile) return sa
def netconf_state_sessions_session_login_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring") sessions = ET.SubElement(netconf_state, "sessions") session = ET.SubElement(sessions, "session") session_id_key = ET.SubElement(session, "session-id") session_id_key.text = kwargs.pop('session_id') login_time = ET.SubElement(session, "login-time") login_time.text = kwargs.pop('login_time') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[netconf_state_sessions_session_login_time, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[netconf_state] assign[=] call[name[ET].SubElement, parameter[name[config], constant[netconf-state]]] variable[sessions] assign[=] call[name[ET].SubElement, parameter[name[netconf_state], constant[sessions]]] variable[session] assign[=] call[name[ET].SubElement, parameter[name[sessions], constant[session]]] variable[session_id_key] assign[=] call[name[ET].SubElement, parameter[name[session], constant[session-id]]] name[session_id_key].text assign[=] call[name[kwargs].pop, parameter[constant[session_id]]] variable[login_time] assign[=] call[name[ET].SubElement, parameter[name[session], constant[login-time]]] name[login_time].text assign[=] call[name[kwargs].pop, parameter[constant[login_time]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[netconf_state_sessions_session_login_time] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[netconf_state] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[sessions] = identifier[ET] . identifier[SubElement] ( identifier[netconf_state] , literal[string] ) identifier[session] = identifier[ET] . identifier[SubElement] ( identifier[sessions] , literal[string] ) identifier[session_id_key] = identifier[ET] . identifier[SubElement] ( identifier[session] , literal[string] ) identifier[session_id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[login_time] = identifier[ET] . identifier[SubElement] ( identifier[session] , literal[string] ) identifier[login_time] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def netconf_state_sessions_session_login_time(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') netconf_state = ET.SubElement(config, 'netconf-state', xmlns='urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring') sessions = ET.SubElement(netconf_state, 'sessions') session = ET.SubElement(sessions, 'session') session_id_key = ET.SubElement(session, 'session-id') session_id_key.text = kwargs.pop('session_id') login_time = ET.SubElement(session, 'login-time') login_time.text = kwargs.pop('login_time') callback = kwargs.pop('callback', self._callback) return callback(config)
def NewFromContent(cls, content, urn, chunk_size=1024, token=None, private_key=None, public_key=None): """Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written. """ aff4.FACTORY.Delete(urn, token=token) with data_store.DB.GetMutationPool() as pool: with aff4.FACTORY.Create( urn, cls, mode="w", mutation_pool=pool, token=token) as fd: for start_of_chunk in range(0, len(content), chunk_size): chunk = content[start_of_chunk:start_of_chunk + chunk_size] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, public_key) fd.Add(blob_rdf, mutation_pool=pool) return urn
def function[NewFromContent, parameter[cls, content, urn, chunk_size, token, private_key, public_key]]: constant[Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written. ] call[name[aff4].FACTORY.Delete, parameter[name[urn]]] with call[name[data_store].DB.GetMutationPool, parameter[]] begin[:] with call[name[aff4].FACTORY.Create, parameter[name[urn], name[cls]]] begin[:] for taget[name[start_of_chunk]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[content]]], name[chunk_size]]]] begin[:] variable[chunk] assign[=] call[name[content]][<ast.Slice object at 0x7da2044c01f0>] variable[blob_rdf] assign[=] call[name[rdf_crypto].SignedBlob, parameter[]] call[name[blob_rdf].Sign, parameter[name[chunk], name[private_key], name[public_key]]] call[name[fd].Add, parameter[name[blob_rdf]]] return[name[urn]]
keyword[def] identifier[NewFromContent] ( identifier[cls] , identifier[content] , identifier[urn] , identifier[chunk_size] = literal[int] , identifier[token] = keyword[None] , identifier[private_key] = keyword[None] , identifier[public_key] = keyword[None] ): literal[string] identifier[aff4] . identifier[FACTORY] . identifier[Delete] ( identifier[urn] , identifier[token] = identifier[token] ) keyword[with] identifier[data_store] . identifier[DB] . identifier[GetMutationPool] () keyword[as] identifier[pool] : keyword[with] identifier[aff4] . identifier[FACTORY] . identifier[Create] ( identifier[urn] , identifier[cls] , identifier[mode] = literal[string] , identifier[mutation_pool] = identifier[pool] , identifier[token] = identifier[token] ) keyword[as] identifier[fd] : keyword[for] identifier[start_of_chunk] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[content] ), identifier[chunk_size] ): identifier[chunk] = identifier[content] [ identifier[start_of_chunk] : identifier[start_of_chunk] + identifier[chunk_size] ] identifier[blob_rdf] = identifier[rdf_crypto] . identifier[SignedBlob] () identifier[blob_rdf] . identifier[Sign] ( identifier[chunk] , identifier[private_key] , identifier[public_key] ) identifier[fd] . identifier[Add] ( identifier[blob_rdf] , identifier[mutation_pool] = identifier[pool] ) keyword[return] identifier[urn]
def NewFromContent(cls, content, urn, chunk_size=1024, token=None, private_key=None, public_key=None): """Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written. """ aff4.FACTORY.Delete(urn, token=token) with data_store.DB.GetMutationPool() as pool: with aff4.FACTORY.Create(urn, cls, mode='w', mutation_pool=pool, token=token) as fd: for start_of_chunk in range(0, len(content), chunk_size): chunk = content[start_of_chunk:start_of_chunk + chunk_size] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, public_key) fd.Add(blob_rdf, mutation_pool=pool) # depends on [control=['for'], data=['start_of_chunk']] # depends on [control=['with'], data=['fd']] # depends on [control=['with'], data=['pool']] return urn
def get_cameras_rules(self): """Return the camera rules.""" resource = "rules" rules_event = self.publish_and_get_event(resource) if rules_event: return rules_event.get('properties') return None
def function[get_cameras_rules, parameter[self]]: constant[Return the camera rules.] variable[resource] assign[=] constant[rules] variable[rules_event] assign[=] call[name[self].publish_and_get_event, parameter[name[resource]]] if name[rules_event] begin[:] return[call[name[rules_event].get, parameter[constant[properties]]]] return[constant[None]]
keyword[def] identifier[get_cameras_rules] ( identifier[self] ): literal[string] identifier[resource] = literal[string] identifier[rules_event] = identifier[self] . identifier[publish_and_get_event] ( identifier[resource] ) keyword[if] identifier[rules_event] : keyword[return] identifier[rules_event] . identifier[get] ( literal[string] ) keyword[return] keyword[None]
def get_cameras_rules(self): """Return the camera rules.""" resource = 'rules' rules_event = self.publish_and_get_event(resource) if rules_event: return rules_event.get('properties') # depends on [control=['if'], data=[]] return None
def get_symm_bands(self, structure, efermi, kpt_line=None, labels_dict=None): """ Function useful to read bands from Boltztrap output and get a BandStructureSymmLine object comparable with that one from a DFT calculation (if the same kpt_line is provided). Default kpt_line and labels_dict is the standard path of high symmetry k-point for the specified structure. They could be extracted from the BandStructureSymmLine object that you want to compare with. efermi variable must be specified to create the BandStructureSymmLine object (usually it comes from DFT or Boltztrap calc) """ try: if kpt_line is None: kpath = HighSymmKpath(structure) kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for k in kpath.get_kpoints(coords_are_cartesian=False)[0]] labels_dict = {l: k for k, l in zip( *kpath.get_kpoints(coords_are_cartesian=False)) if l} kpt_line = [kp.frac_coords for kp in kpt_line] elif type(kpt_line[0]) == Kpoint: kpt_line = [kp.frac_coords for kp in kpt_line] labels_dict = {k: labels_dict[k].frac_coords for k in labels_dict} idx_list = [] # kpt_dense=np.array([kp for kp in self._bz_kpoints]) for i, kp in enumerate(kpt_line): w = [] prec = 1e-05 while len(w) == 0: w = np.where(np.all( np.abs(kp - self._bz_kpoints) < [prec] * 3, axis=1))[0] prec *= 10 # print( prec ) idx_list.append([i, w[0]]) # if len(w)>0: # idx_list.append([i,w[0]]) # else: # w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints) # <[1e-04,1e-04,1e-04],axis=1))[0] # idx_list.append([i,w[0]]) idx_list = np.array(idx_list) # print( idx_list.shape ) bands_dict = {Spin.up: (self._bz_bands * Energy(1, "Ry").to( "eV") + efermi).T[:, idx_list[:, 1]].tolist()} # bz_kpoints = bz_kpoints[idx_list[:,1]].tolist() sbs = BandStructureSymmLine(kpt_line, bands_dict, structure.lattice.reciprocal_lattice, efermi, labels_dict=labels_dict) return sbs except: raise BoltztrapError( "Bands are not in output of BoltzTraP.\nBolztrapRunner must " "be run with run_type=BANDS")
def function[get_symm_bands, parameter[self, structure, efermi, kpt_line, labels_dict]]: constant[ Function useful to read bands from Boltztrap output and get a BandStructureSymmLine object comparable with that one from a DFT calculation (if the same kpt_line is provided). Default kpt_line and labels_dict is the standard path of high symmetry k-point for the specified structure. They could be extracted from the BandStructureSymmLine object that you want to compare with. efermi variable must be specified to create the BandStructureSymmLine object (usually it comes from DFT or Boltztrap calc) ] <ast.Try object at 0x7da1b1cd5f30>
keyword[def] identifier[get_symm_bands] ( identifier[self] , identifier[structure] , identifier[efermi] , identifier[kpt_line] = keyword[None] , identifier[labels_dict] = keyword[None] ): literal[string] keyword[try] : keyword[if] identifier[kpt_line] keyword[is] keyword[None] : identifier[kpath] = identifier[HighSymmKpath] ( identifier[structure] ) identifier[kpt_line] =[ identifier[Kpoint] ( identifier[k] , identifier[structure] . identifier[lattice] . identifier[reciprocal_lattice] ) keyword[for] identifier[k] keyword[in] identifier[kpath] . identifier[get_kpoints] ( identifier[coords_are_cartesian] = keyword[False] )[ literal[int] ]] identifier[labels_dict] ={ identifier[l] : identifier[k] keyword[for] identifier[k] , identifier[l] keyword[in] identifier[zip] ( * identifier[kpath] . identifier[get_kpoints] ( identifier[coords_are_cartesian] = keyword[False] )) keyword[if] identifier[l] } identifier[kpt_line] =[ identifier[kp] . identifier[frac_coords] keyword[for] identifier[kp] keyword[in] identifier[kpt_line] ] keyword[elif] identifier[type] ( identifier[kpt_line] [ literal[int] ])== identifier[Kpoint] : identifier[kpt_line] =[ identifier[kp] . identifier[frac_coords] keyword[for] identifier[kp] keyword[in] identifier[kpt_line] ] identifier[labels_dict] ={ identifier[k] : identifier[labels_dict] [ identifier[k] ]. identifier[frac_coords] keyword[for] identifier[k] keyword[in] identifier[labels_dict] } identifier[idx_list] =[] keyword[for] identifier[i] , identifier[kp] keyword[in] identifier[enumerate] ( identifier[kpt_line] ): identifier[w] =[] identifier[prec] = literal[int] keyword[while] identifier[len] ( identifier[w] )== literal[int] : identifier[w] = identifier[np] . identifier[where] ( identifier[np] . identifier[all] ( identifier[np] . identifier[abs] ( identifier[kp] - identifier[self] . identifier[_bz_kpoints] )<[ identifier[prec] ]* literal[int] , identifier[axis] = literal[int] ))[ literal[int] ] identifier[prec] *= literal[int] identifier[idx_list] . identifier[append] ([ identifier[i] , identifier[w] [ literal[int] ]]) identifier[idx_list] = identifier[np] . identifier[array] ( identifier[idx_list] ) identifier[bands_dict] ={ identifier[Spin] . identifier[up] :( identifier[self] . identifier[_bz_bands] * identifier[Energy] ( literal[int] , literal[string] ). identifier[to] ( literal[string] )+ identifier[efermi] ). identifier[T] [:, identifier[idx_list] [:, literal[int] ]]. identifier[tolist] ()} identifier[sbs] = identifier[BandStructureSymmLine] ( identifier[kpt_line] , identifier[bands_dict] , identifier[structure] . identifier[lattice] . identifier[reciprocal_lattice] , identifier[efermi] , identifier[labels_dict] = identifier[labels_dict] ) keyword[return] identifier[sbs] keyword[except] : keyword[raise] identifier[BoltztrapError] ( literal[string] literal[string] )
def get_symm_bands(self, structure, efermi, kpt_line=None, labels_dict=None): """ Function useful to read bands from Boltztrap output and get a BandStructureSymmLine object comparable with that one from a DFT calculation (if the same kpt_line is provided). Default kpt_line and labels_dict is the standard path of high symmetry k-point for the specified structure. They could be extracted from the BandStructureSymmLine object that you want to compare with. efermi variable must be specified to create the BandStructureSymmLine object (usually it comes from DFT or Boltztrap calc) """ try: if kpt_line is None: kpath = HighSymmKpath(structure) kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for k in kpath.get_kpoints(coords_are_cartesian=False)[0]] labels_dict = {l: k for (k, l) in zip(*kpath.get_kpoints(coords_are_cartesian=False)) if l} kpt_line = [kp.frac_coords for kp in kpt_line] # depends on [control=['if'], data=['kpt_line']] elif type(kpt_line[0]) == Kpoint: kpt_line = [kp.frac_coords for kp in kpt_line] labels_dict = {k: labels_dict[k].frac_coords for k in labels_dict} # depends on [control=['if'], data=[]] idx_list = [] # kpt_dense=np.array([kp for kp in self._bz_kpoints]) for (i, kp) in enumerate(kpt_line): w = [] prec = 1e-05 while len(w) == 0: w = np.where(np.all(np.abs(kp - self._bz_kpoints) < [prec] * 3, axis=1))[0] prec *= 10 # depends on [control=['while'], data=[]] # print( prec ) idx_list.append([i, w[0]]) # depends on [control=['for'], data=[]] # if len(w)>0: # idx_list.append([i,w[0]]) # else: # w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints) # <[1e-04,1e-04,1e-04],axis=1))[0] # idx_list.append([i,w[0]]) idx_list = np.array(idx_list) # print( idx_list.shape ) bands_dict = {Spin.up: (self._bz_bands * Energy(1, 'Ry').to('eV') + efermi).T[:, idx_list[:, 1]].tolist()} # bz_kpoints = bz_kpoints[idx_list[:,1]].tolist() sbs = BandStructureSymmLine(kpt_line, bands_dict, structure.lattice.reciprocal_lattice, efermi, labels_dict=labels_dict) return sbs # depends on [control=['try'], data=[]] except: raise BoltztrapError('Bands are not in output of BoltzTraP.\nBolztrapRunner must be run with run_type=BANDS') # depends on [control=['except'], data=[]]
def s3am_upload(job, fpath, s3_dir, num_cores=1, s3_key_path=None): """ Uploads a file to s3 via S3AM S3AM binary must be on the PATH to use this function For SSE-C encryption: provide a path to a 32-byte file :param toil.job.Job job: Toil job that is calling this function :param str fpath: Path to file to upload :param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory] :param int num_cores: Number of cores to use for up/download with S3AM :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption """ require(s3_dir.startswith('s3://'), 'Format of s3_dir (s3://) is incorrect: %s', s3_dir) s3_dir = os.path.join(s3_dir, os.path.basename(fpath)) _s3am_with_retry(job=job, num_cores=num_cores, file_path=fpath, s3_url=s3_dir, mode='upload', s3_key_path=s3_key_path)
def function[s3am_upload, parameter[job, fpath, s3_dir, num_cores, s3_key_path]]: constant[ Uploads a file to s3 via S3AM S3AM binary must be on the PATH to use this function For SSE-C encryption: provide a path to a 32-byte file :param toil.job.Job job: Toil job that is calling this function :param str fpath: Path to file to upload :param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory] :param int num_cores: Number of cores to use for up/download with S3AM :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption ] call[name[require], parameter[call[name[s3_dir].startswith, parameter[constant[s3://]]], constant[Format of s3_dir (s3://) is incorrect: %s], name[s3_dir]]] variable[s3_dir] assign[=] call[name[os].path.join, parameter[name[s3_dir], call[name[os].path.basename, parameter[name[fpath]]]]] call[name[_s3am_with_retry], parameter[]]
keyword[def] identifier[s3am_upload] ( identifier[job] , identifier[fpath] , identifier[s3_dir] , identifier[num_cores] = literal[int] , identifier[s3_key_path] = keyword[None] ): literal[string] identifier[require] ( identifier[s3_dir] . identifier[startswith] ( literal[string] ), literal[string] , identifier[s3_dir] ) identifier[s3_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[s3_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[fpath] )) identifier[_s3am_with_retry] ( identifier[job] = identifier[job] , identifier[num_cores] = identifier[num_cores] , identifier[file_path] = identifier[fpath] , identifier[s3_url] = identifier[s3_dir] , identifier[mode] = literal[string] , identifier[s3_key_path] = identifier[s3_key_path] )
def s3am_upload(job, fpath, s3_dir, num_cores=1, s3_key_path=None): """ Uploads a file to s3 via S3AM S3AM binary must be on the PATH to use this function For SSE-C encryption: provide a path to a 32-byte file :param toil.job.Job job: Toil job that is calling this function :param str fpath: Path to file to upload :param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory] :param int num_cores: Number of cores to use for up/download with S3AM :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption """ require(s3_dir.startswith('s3://'), 'Format of s3_dir (s3://) is incorrect: %s', s3_dir) s3_dir = os.path.join(s3_dir, os.path.basename(fpath)) _s3am_with_retry(job=job, num_cores=num_cores, file_path=fpath, s3_url=s3_dir, mode='upload', s3_key_path=s3_key_path)
def tag_and_stem(self, text, cache=None): """ Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). """ analysis = self.analyze(text) triples = [] for record in analysis: root = self.get_record_root(record) token = self.get_record_token(record) if token: if unicode_is_punctuation(token): triples.append((token, '.', token)) else: pos = self.get_record_pos(record) triples.append((root, pos, token)) return triples
def function[tag_and_stem, parameter[self, text, cache]]: constant[ Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). ] variable[analysis] assign[=] call[name[self].analyze, parameter[name[text]]] variable[triples] assign[=] list[[]] for taget[name[record]] in starred[name[analysis]] begin[:] variable[root] assign[=] call[name[self].get_record_root, parameter[name[record]]] variable[token] assign[=] call[name[self].get_record_token, parameter[name[record]]] if name[token] begin[:] if call[name[unicode_is_punctuation], parameter[name[token]]] begin[:] call[name[triples].append, parameter[tuple[[<ast.Name object at 0x7da18bcca4d0>, <ast.Constant object at 0x7da18bccb5b0>, <ast.Name object at 0x7da18bccbfa0>]]]] return[name[triples]]
keyword[def] identifier[tag_and_stem] ( identifier[self] , identifier[text] , identifier[cache] = keyword[None] ): literal[string] identifier[analysis] = identifier[self] . identifier[analyze] ( identifier[text] ) identifier[triples] =[] keyword[for] identifier[record] keyword[in] identifier[analysis] : identifier[root] = identifier[self] . identifier[get_record_root] ( identifier[record] ) identifier[token] = identifier[self] . identifier[get_record_token] ( identifier[record] ) keyword[if] identifier[token] : keyword[if] identifier[unicode_is_punctuation] ( identifier[token] ): identifier[triples] . identifier[append] (( identifier[token] , literal[string] , identifier[token] )) keyword[else] : identifier[pos] = identifier[self] . identifier[get_record_pos] ( identifier[record] ) identifier[triples] . identifier[append] (( identifier[root] , identifier[pos] , identifier[token] )) keyword[return] identifier[triples]
def tag_and_stem(self, text, cache=None): """ Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). """ analysis = self.analyze(text) triples = [] for record in analysis: root = self.get_record_root(record) token = self.get_record_token(record) if token: if unicode_is_punctuation(token): triples.append((token, '.', token)) # depends on [control=['if'], data=[]] else: pos = self.get_record_pos(record) triples.append((root, pos, token)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['record']] return triples
def get_seqprop_subsequence_from_structchain_property(self, property_key, property_value, condition, seqprop=None, structprop=None, chain_id=None, use_representatives=False, return_resnums=False): """Get a subsequence as a new SeqProp object given a certain property you want to find in the given StructProp's chain's letter_annotation This is similar to the :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property` method but instead of filtering by the SeqProp's letter_annotation we use the StructProp annotation, and map back to the SeqProp. Args: seqprop (SeqRecord, SeqProp): SeqRecord or SeqProp object that has properties stored in its ``letter_annotations`` attribute property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using property_value (object): Property value that you want to filter by condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by Returns: SeqProp: New SeqProp object that you can run computations on or just extract its properties """ if use_representatives: seqprop = self.representative_sequence structprop = self.representative_structure chain_id = self.representative_chain if not structprop: raise ValueError('No representative structure set, please specify sequence, structure, and chain ID') else: if not seqprop or not structprop or not chain_id: raise ValueError('Please specify sequence, structure, and chain ID') chain_prop = structprop.chains.get_by_id(chain_id) # Get the subsequence from the structure chain_subseq, subfeat_resnums = chain_prop.get_subsequence_from_property(property_key=property_key, property_value=property_value, condition=condition, return_resnums=True) or (None, []) if not chain_subseq: return # Map subsequence feature resnums back to the seqprop mapping_dict = self.map_structprop_resnums_to_seqprop_resnums(resnums=subfeat_resnums, structprop=structprop, chain_id=chain_id, seqprop=seqprop, use_representatives=use_representatives) sub_id = '{}-{}->{}_{}_{}_{}_extracted'.format(structprop.id, chain_id, seqprop.id, property_key, condition, property_value) seqprop_resnums = [v for k,v in mapping_dict.items()] new_sp = seqprop.get_subsequence(resnums=seqprop_resnums, new_id=sub_id, copy_letter_annotations=False) if not new_sp: # XTODO: investigate errors from subsequence extraction.. return try: new_sp.letter_annotations = chain_subseq.letter_annotations except TypeError: # If the length of the mapped sequence does not match, log a warning and don't store letter_annotations log.warning('{}: cannot store structure letter annotations in subsequence, lengths do not match. ' 'Likely a deletion or insertion within the structure!'.format(sub_id)) if return_resnums: return new_sp, seqprop_resnums else: return new_sp
def function[get_seqprop_subsequence_from_structchain_property, parameter[self, property_key, property_value, condition, seqprop, structprop, chain_id, use_representatives, return_resnums]]: constant[Get a subsequence as a new SeqProp object given a certain property you want to find in the given StructProp's chain's letter_annotation This is similar to the :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property` method but instead of filtering by the SeqProp's letter_annotation we use the StructProp annotation, and map back to the SeqProp. Args: seqprop (SeqRecord, SeqProp): SeqRecord or SeqProp object that has properties stored in its ``letter_annotations`` attribute property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using property_value (object): Property value that you want to filter by condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by Returns: SeqProp: New SeqProp object that you can run computations on or just extract its properties ] if name[use_representatives] begin[:] variable[seqprop] assign[=] name[self].representative_sequence variable[structprop] assign[=] name[self].representative_structure variable[chain_id] assign[=] name[self].representative_chain if <ast.UnaryOp object at 0x7da18eb56230> begin[:] <ast.Raise object at 0x7da18eb552a0> variable[chain_prop] assign[=] call[name[structprop].chains.get_by_id, parameter[name[chain_id]]] <ast.Tuple object at 0x7da18eb55ab0> assign[=] <ast.BoolOp object at 0x7da18eb54ac0> if <ast.UnaryOp object at 0x7da18eb54580> begin[:] return[None] variable[mapping_dict] assign[=] call[name[self].map_structprop_resnums_to_seqprop_resnums, parameter[]] variable[sub_id] assign[=] call[constant[{}-{}->{}_{}_{}_{}_extracted].format, parameter[name[structprop].id, name[chain_id], name[seqprop].id, name[property_key], name[condition], name[property_value]]] variable[seqprop_resnums] assign[=] <ast.ListComp object at 0x7da2046236d0> variable[new_sp] assign[=] call[name[seqprop].get_subsequence, parameter[]] if <ast.UnaryOp object at 0x7da204623310> begin[:] return[None] <ast.Try object at 0x7da204621a80> if name[return_resnums] begin[:] return[tuple[[<ast.Name object at 0x7da204620100>, <ast.Name object at 0x7da2046210f0>]]]
keyword[def] identifier[get_seqprop_subsequence_from_structchain_property] ( identifier[self] , identifier[property_key] , identifier[property_value] , identifier[condition] , identifier[seqprop] = keyword[None] , identifier[structprop] = keyword[None] , identifier[chain_id] = keyword[None] , identifier[use_representatives] = keyword[False] , identifier[return_resnums] = keyword[False] ): literal[string] keyword[if] identifier[use_representatives] : identifier[seqprop] = identifier[self] . identifier[representative_sequence] identifier[structprop] = identifier[self] . identifier[representative_structure] identifier[chain_id] = identifier[self] . identifier[representative_chain] keyword[if] keyword[not] identifier[structprop] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[if] keyword[not] identifier[seqprop] keyword[or] keyword[not] identifier[structprop] keyword[or] keyword[not] identifier[chain_id] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[chain_prop] = identifier[structprop] . identifier[chains] . identifier[get_by_id] ( identifier[chain_id] ) identifier[chain_subseq] , identifier[subfeat_resnums] = identifier[chain_prop] . identifier[get_subsequence_from_property] ( identifier[property_key] = identifier[property_key] , identifier[property_value] = identifier[property_value] , identifier[condition] = identifier[condition] , identifier[return_resnums] = keyword[True] ) keyword[or] ( keyword[None] ,[]) keyword[if] keyword[not] identifier[chain_subseq] : keyword[return] identifier[mapping_dict] = identifier[self] . identifier[map_structprop_resnums_to_seqprop_resnums] ( identifier[resnums] = identifier[subfeat_resnums] , identifier[structprop] = identifier[structprop] , identifier[chain_id] = identifier[chain_id] , identifier[seqprop] = identifier[seqprop] , identifier[use_representatives] = identifier[use_representatives] ) identifier[sub_id] = literal[string] . identifier[format] ( identifier[structprop] . identifier[id] , identifier[chain_id] , identifier[seqprop] . identifier[id] , identifier[property_key] , identifier[condition] , identifier[property_value] ) identifier[seqprop_resnums] =[ identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[mapping_dict] . identifier[items] ()] identifier[new_sp] = identifier[seqprop] . identifier[get_subsequence] ( identifier[resnums] = identifier[seqprop_resnums] , identifier[new_id] = identifier[sub_id] , identifier[copy_letter_annotations] = keyword[False] ) keyword[if] keyword[not] identifier[new_sp] : keyword[return] keyword[try] : identifier[new_sp] . identifier[letter_annotations] = identifier[chain_subseq] . identifier[letter_annotations] keyword[except] identifier[TypeError] : identifier[log] . identifier[warning] ( literal[string] literal[string] . identifier[format] ( identifier[sub_id] )) keyword[if] identifier[return_resnums] : keyword[return] identifier[new_sp] , identifier[seqprop_resnums] keyword[else] : keyword[return] identifier[new_sp]
def get_seqprop_subsequence_from_structchain_property(self, property_key, property_value, condition, seqprop=None, structprop=None, chain_id=None, use_representatives=False, return_resnums=False): """Get a subsequence as a new SeqProp object given a certain property you want to find in the given StructProp's chain's letter_annotation This is similar to the :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property` method but instead of filtering by the SeqProp's letter_annotation we use the StructProp annotation, and map back to the SeqProp. Args: seqprop (SeqRecord, SeqProp): SeqRecord or SeqProp object that has properties stored in its ``letter_annotations`` attribute property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using property_value (object): Property value that you want to filter by condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by Returns: SeqProp: New SeqProp object that you can run computations on or just extract its properties """ if use_representatives: seqprop = self.representative_sequence structprop = self.representative_structure chain_id = self.representative_chain if not structprop: raise ValueError('No representative structure set, please specify sequence, structure, and chain ID') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif not seqprop or not structprop or (not chain_id): raise ValueError('Please specify sequence, structure, and chain ID') # depends on [control=['if'], data=[]] chain_prop = structprop.chains.get_by_id(chain_id) # Get the subsequence from the structure (chain_subseq, subfeat_resnums) = chain_prop.get_subsequence_from_property(property_key=property_key, property_value=property_value, condition=condition, return_resnums=True) or (None, []) if not chain_subseq: return # depends on [control=['if'], data=[]] # Map subsequence feature resnums back to the seqprop mapping_dict = self.map_structprop_resnums_to_seqprop_resnums(resnums=subfeat_resnums, structprop=structprop, chain_id=chain_id, seqprop=seqprop, use_representatives=use_representatives) sub_id = '{}-{}->{}_{}_{}_{}_extracted'.format(structprop.id, chain_id, seqprop.id, property_key, condition, property_value) seqprop_resnums = [v for (k, v) in mapping_dict.items()] new_sp = seqprop.get_subsequence(resnums=seqprop_resnums, new_id=sub_id, copy_letter_annotations=False) if not new_sp: # XTODO: investigate errors from subsequence extraction.. return # depends on [control=['if'], data=[]] try: new_sp.letter_annotations = chain_subseq.letter_annotations # depends on [control=['try'], data=[]] except TypeError: # If the length of the mapped sequence does not match, log a warning and don't store letter_annotations log.warning('{}: cannot store structure letter annotations in subsequence, lengths do not match. Likely a deletion or insertion within the structure!'.format(sub_id)) # depends on [control=['except'], data=[]] if return_resnums: return (new_sp, seqprop_resnums) # depends on [control=['if'], data=[]] else: return new_sp
def is_terminal(self): """True if this result will stop the test.""" return (self.raised_exception or self.is_timeout or self.phase_result == openhtf.PhaseResult.STOP)
def function[is_terminal, parameter[self]]: constant[True if this result will stop the test.] return[<ast.BoolOp object at 0x7da1b18c28f0>]
keyword[def] identifier[is_terminal] ( identifier[self] ): literal[string] keyword[return] ( identifier[self] . identifier[raised_exception] keyword[or] identifier[self] . identifier[is_timeout] keyword[or] identifier[self] . identifier[phase_result] == identifier[openhtf] . identifier[PhaseResult] . identifier[STOP] )
def is_terminal(self): """True if this result will stop the test.""" return self.raised_exception or self.is_timeout or self.phase_result == openhtf.PhaseResult.STOP
def setParams(self,params): """ set params """ start = 0 for i in range(self.n_terms): n_effects = self.B[i].size self.B[i] = np.reshape(params[start:start+n_effects],self.B[i].shape, order='F') start += n_effects
def function[setParams, parameter[self, params]]: constant[ set params ] variable[start] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[name[self].n_terms]]] begin[:] variable[n_effects] assign[=] call[name[self].B][name[i]].size call[name[self].B][name[i]] assign[=] call[name[np].reshape, parameter[call[name[params]][<ast.Slice object at 0x7da2054a6620>], call[name[self].B][name[i]].shape]] <ast.AugAssign object at 0x7da2054a7a30>
keyword[def] identifier[setParams] ( identifier[self] , identifier[params] ): literal[string] identifier[start] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[n_terms] ): identifier[n_effects] = identifier[self] . identifier[B] [ identifier[i] ]. identifier[size] identifier[self] . identifier[B] [ identifier[i] ]= identifier[np] . identifier[reshape] ( identifier[params] [ identifier[start] : identifier[start] + identifier[n_effects] ], identifier[self] . identifier[B] [ identifier[i] ]. identifier[shape] , identifier[order] = literal[string] ) identifier[start] += identifier[n_effects]
def setParams(self, params): """ set params """ start = 0 for i in range(self.n_terms): n_effects = self.B[i].size self.B[i] = np.reshape(params[start:start + n_effects], self.B[i].shape, order='F') start += n_effects # depends on [control=['for'], data=['i']]
def create_data_item_from_data_and_metadata(self, data_and_metadata: DataAndMetadata.DataAndMetadata, title: str=None) -> DataItem: """Create a data item in the library from the data and metadata. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.Library.create_data_item_from_data_and_metadata` instead. Scriptable: No """ data_item = DataItemModule.new_data_item(data_and_metadata) if title is not None: data_item.title = title self.__document_controller.document_model.append_data_item(data_item) return DataItem(data_item)
def function[create_data_item_from_data_and_metadata, parameter[self, data_and_metadata, title]]: constant[Create a data item in the library from the data and metadata. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.Library.create_data_item_from_data_and_metadata` instead. Scriptable: No ] variable[data_item] assign[=] call[name[DataItemModule].new_data_item, parameter[name[data_and_metadata]]] if compare[name[title] is_not constant[None]] begin[:] name[data_item].title assign[=] name[title] call[name[self].__document_controller.document_model.append_data_item, parameter[name[data_item]]] return[call[name[DataItem], parameter[name[data_item]]]]
keyword[def] identifier[create_data_item_from_data_and_metadata] ( identifier[self] , identifier[data_and_metadata] : identifier[DataAndMetadata] . identifier[DataAndMetadata] , identifier[title] : identifier[str] = keyword[None] )-> identifier[DataItem] : literal[string] identifier[data_item] = identifier[DataItemModule] . identifier[new_data_item] ( identifier[data_and_metadata] ) keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] : identifier[data_item] . identifier[title] = identifier[title] identifier[self] . identifier[__document_controller] . identifier[document_model] . identifier[append_data_item] ( identifier[data_item] ) keyword[return] identifier[DataItem] ( identifier[data_item] )
def create_data_item_from_data_and_metadata(self, data_and_metadata: DataAndMetadata.DataAndMetadata, title: str=None) -> DataItem: """Create a data item in the library from the data and metadata. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.Library.create_data_item_from_data_and_metadata` instead. Scriptable: No """ data_item = DataItemModule.new_data_item(data_and_metadata) if title is not None: data_item.title = title # depends on [control=['if'], data=['title']] self.__document_controller.document_model.append_data_item(data_item) return DataItem(data_item)
async def get(self, key, *, dc=None, watch=None, consistency=None): """Returns the specified key Parameters: key (str): Key to fetch watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the queried kv value Object will look like:: { "CreateIndex": 100, "ModifyIndex": 200, "LockIndex": 200, "Key": "zip", "Flags": 0, "Value": b"my data", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e" } **CreateIndex** is the internal index value that represents when the entry was created. **ModifyIndex** is the last index that modified this key. This index corresponds to the X-Consul-Index header value that is returned in responses, and it can be used to establish blocking queries. You can even perform blocking queries against entire subtrees of the KV store. **LockIndex** is the number of times this key has successfully been acquired in a lock. If the lock is held, the Session key provides the session that owns the lock. **Key** is simply the full path of the entry. **Flags** is an opaque unsigned integer that can be attached to each entry. Clients can choose to use this however makes sense for their application. **Value** is a :class:`~aioconsul.typing.Payload` object, it depends on **Flags**. """ response = await self._read(key, dc=dc, watch=watch, consistency=consistency) result = response.body[0] result["Value"] = decode_value(result["Value"], result["Flags"]) return consul(result, meta=extract_meta(response.headers))
<ast.AsyncFunctionDef object at 0x7da18c4ce3e0>
keyword[async] keyword[def] identifier[get] ( identifier[self] , identifier[key] ,*, identifier[dc] = keyword[None] , identifier[watch] = keyword[None] , identifier[consistency] = keyword[None] ): literal[string] identifier[response] = keyword[await] identifier[self] . identifier[_read] ( identifier[key] , identifier[dc] = identifier[dc] , identifier[watch] = identifier[watch] , identifier[consistency] = identifier[consistency] ) identifier[result] = identifier[response] . identifier[body] [ literal[int] ] identifier[result] [ literal[string] ]= identifier[decode_value] ( identifier[result] [ literal[string] ], identifier[result] [ literal[string] ]) keyword[return] identifier[consul] ( identifier[result] , identifier[meta] = identifier[extract_meta] ( identifier[response] . identifier[headers] ))
async def get(self, key, *, dc=None, watch=None, consistency=None): """Returns the specified key Parameters: key (str): Key to fetch watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the queried kv value Object will look like:: { "CreateIndex": 100, "ModifyIndex": 200, "LockIndex": 200, "Key": "zip", "Flags": 0, "Value": b"my data", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e" } **CreateIndex** is the internal index value that represents when the entry was created. **ModifyIndex** is the last index that modified this key. This index corresponds to the X-Consul-Index header value that is returned in responses, and it can be used to establish blocking queries. You can even perform blocking queries against entire subtrees of the KV store. **LockIndex** is the number of times this key has successfully been acquired in a lock. If the lock is held, the Session key provides the session that owns the lock. **Key** is simply the full path of the entry. **Flags** is an opaque unsigned integer that can be attached to each entry. Clients can choose to use this however makes sense for their application. **Value** is a :class:`~aioconsul.typing.Payload` object, it depends on **Flags**. """ response = await self._read(key, dc=dc, watch=watch, consistency=consistency) result = response.body[0] result['Value'] = decode_value(result['Value'], result['Flags']) return consul(result, meta=extract_meta(response.headers))
def get(self): """ Constructs a WorkflowRealTimeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsContext """ return WorkflowRealTimeStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
def function[get, parameter[self]]: constant[ Constructs a WorkflowRealTimeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsContext ] return[call[name[WorkflowRealTimeStatisticsContext], parameter[name[self]._version]]]
keyword[def] identifier[get] ( identifier[self] ): literal[string] keyword[return] identifier[WorkflowRealTimeStatisticsContext] ( identifier[self] . identifier[_version] , identifier[workspace_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[workflow_sid] = identifier[self] . identifier[_solution] [ literal[string] ], )
def get(self): """ Constructs a WorkflowRealTimeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsContext """ return WorkflowRealTimeStatisticsContext(self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'])
def output_for_skipping_run_set(self, runSet, reason=None): ''' This function writes a simple message to terminal and logfile, when a run set is skipped. There is no message about skipping a run set in the xml-file. ''' # print to terminal util.printOut("\nSkipping run set" + (" '" + runSet.name + "'" if runSet.name else "") + (" " + reason if reason else "") ) # write into txt_file runSetInfo = "\n\n" if runSet.name: runSetInfo += runSet.name + "\n" runSetInfo += "Run set {0} of {1}: skipped {2}\n".format( runSet.index, len(self.benchmark.run_sets), reason or "") self.txt_file.append(runSetInfo)
def function[output_for_skipping_run_set, parameter[self, runSet, reason]]: constant[ This function writes a simple message to terminal and logfile, when a run set is skipped. There is no message about skipping a run set in the xml-file. ] call[name[util].printOut, parameter[binary_operation[binary_operation[constant[ Skipping run set] + <ast.IfExp object at 0x7da18f58fca0>] + <ast.IfExp object at 0x7da18f58e5f0>]]] variable[runSetInfo] assign[=] constant[ ] if name[runSet].name begin[:] <ast.AugAssign object at 0x7da18f58f490> <ast.AugAssign object at 0x7da18f58dea0> call[name[self].txt_file.append, parameter[name[runSetInfo]]]
keyword[def] identifier[output_for_skipping_run_set] ( identifier[self] , identifier[runSet] , identifier[reason] = keyword[None] ): literal[string] identifier[util] . identifier[printOut] ( literal[string] + ( literal[string] + identifier[runSet] . identifier[name] + literal[string] keyword[if] identifier[runSet] . identifier[name] keyword[else] literal[string] )+ ( literal[string] + identifier[reason] keyword[if] identifier[reason] keyword[else] literal[string] ) ) identifier[runSetInfo] = literal[string] keyword[if] identifier[runSet] . identifier[name] : identifier[runSetInfo] += identifier[runSet] . identifier[name] + literal[string] identifier[runSetInfo] += literal[string] . identifier[format] ( identifier[runSet] . identifier[index] , identifier[len] ( identifier[self] . identifier[benchmark] . identifier[run_sets] ), identifier[reason] keyword[or] literal[string] ) identifier[self] . identifier[txt_file] . identifier[append] ( identifier[runSetInfo] )
def output_for_skipping_run_set(self, runSet, reason=None): """ This function writes a simple message to terminal and logfile, when a run set is skipped. There is no message about skipping a run set in the xml-file. """ # print to terminal util.printOut('\nSkipping run set' + (" '" + runSet.name + "'" if runSet.name else '') + (' ' + reason if reason else '')) # write into txt_file runSetInfo = '\n\n' if runSet.name: runSetInfo += runSet.name + '\n' # depends on [control=['if'], data=[]] runSetInfo += 'Run set {0} of {1}: skipped {2}\n'.format(runSet.index, len(self.benchmark.run_sets), reason or '') self.txt_file.append(runSetInfo)
def mark_rewrite(self, *names): """Mark import names as needing to be re-written. The named module or package as well as any nested modules will be re-written on import. """ already_imported = set(names).intersection(set(sys.modules)) if already_imported: for name in already_imported: if name not in self._rewritten_names: self._warn_already_imported(name) self._must_rewrite.update(names)
def function[mark_rewrite, parameter[self]]: constant[Mark import names as needing to be re-written. The named module or package as well as any nested modules will be re-written on import. ] variable[already_imported] assign[=] call[call[name[set], parameter[name[names]]].intersection, parameter[call[name[set], parameter[name[sys].modules]]]] if name[already_imported] begin[:] for taget[name[name]] in starred[name[already_imported]] begin[:] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._rewritten_names] begin[:] call[name[self]._warn_already_imported, parameter[name[name]]] call[name[self]._must_rewrite.update, parameter[name[names]]]
keyword[def] identifier[mark_rewrite] ( identifier[self] ,* identifier[names] ): literal[string] identifier[already_imported] = identifier[set] ( identifier[names] ). identifier[intersection] ( identifier[set] ( identifier[sys] . identifier[modules] )) keyword[if] identifier[already_imported] : keyword[for] identifier[name] keyword[in] identifier[already_imported] : keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_rewritten_names] : identifier[self] . identifier[_warn_already_imported] ( identifier[name] ) identifier[self] . identifier[_must_rewrite] . identifier[update] ( identifier[names] )
def mark_rewrite(self, *names): """Mark import names as needing to be re-written. The named module or package as well as any nested modules will be re-written on import. """ already_imported = set(names).intersection(set(sys.modules)) if already_imported: for name in already_imported: if name not in self._rewritten_names: self._warn_already_imported(name) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] self._must_rewrite.update(names)
def known(self, object): """ get the type specified in the object's metadata """ try: md = object.__metadata__ known = md.sxtype return known except: pass
def function[known, parameter[self, object]]: constant[ get the type specified in the object's metadata ] <ast.Try object at 0x7da2041dafb0>
keyword[def] identifier[known] ( identifier[self] , identifier[object] ): literal[string] keyword[try] : identifier[md] = identifier[object] . identifier[__metadata__] identifier[known] = identifier[md] . identifier[sxtype] keyword[return] identifier[known] keyword[except] : keyword[pass]
def known(self, object): """ get the type specified in the object's metadata """ try: md = object.__metadata__ known = md.sxtype return known # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]]
def _match_show(self, show): """Match a query for a specific show/list of shows""" if self.show: return match_list(self.show, show) else: return True
def function[_match_show, parameter[self, show]]: constant[Match a query for a specific show/list of shows] if name[self].show begin[:] return[call[name[match_list], parameter[name[self].show, name[show]]]]
keyword[def] identifier[_match_show] ( identifier[self] , identifier[show] ): literal[string] keyword[if] identifier[self] . identifier[show] : keyword[return] identifier[match_list] ( identifier[self] . identifier[show] , identifier[show] ) keyword[else] : keyword[return] keyword[True]
def _match_show(self, show): """Match a query for a specific show/list of shows""" if self.show: return match_list(self.show, show) # depends on [control=['if'], data=[]] else: return True
def cmd_oreoled(self, args): '''send LED pattern as override, using OreoLED conventions''' if len(args) < 4: print("Usage: oreoled LEDNUM RED GREEN BLUE <RATE>") return lednum = int(args[0]) pattern = [0] * 24 pattern[0] = ord('R') pattern[1] = ord('G') pattern[2] = ord('B') pattern[3] = ord('0') pattern[4] = 0 pattern[5] = int(args[1]) pattern[6] = int(args[2]) pattern[7] = int(args[3]) self.master.mav.led_control_send(self.settings.target_system, self.settings.target_component, lednum, 255, 8, pattern)
def function[cmd_oreoled, parameter[self, args]]: constant[send LED pattern as override, using OreoLED conventions] if compare[call[name[len], parameter[name[args]]] less[<] constant[4]] begin[:] call[name[print], parameter[constant[Usage: oreoled LEDNUM RED GREEN BLUE <RATE>]]] return[None] variable[lednum] assign[=] call[name[int], parameter[call[name[args]][constant[0]]]] variable[pattern] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f58f3a0>]] * constant[24]] call[name[pattern]][constant[0]] assign[=] call[name[ord], parameter[constant[R]]] call[name[pattern]][constant[1]] assign[=] call[name[ord], parameter[constant[G]]] call[name[pattern]][constant[2]] assign[=] call[name[ord], parameter[constant[B]]] call[name[pattern]][constant[3]] assign[=] call[name[ord], parameter[constant[0]]] call[name[pattern]][constant[4]] assign[=] constant[0] call[name[pattern]][constant[5]] assign[=] call[name[int], parameter[call[name[args]][constant[1]]]] call[name[pattern]][constant[6]] assign[=] call[name[int], parameter[call[name[args]][constant[2]]]] call[name[pattern]][constant[7]] assign[=] call[name[int], parameter[call[name[args]][constant[3]]]] call[name[self].master.mav.led_control_send, parameter[name[self].settings.target_system, name[self].settings.target_component, name[lednum], constant[255], constant[8], name[pattern]]]
keyword[def] identifier[cmd_oreoled] ( identifier[self] , identifier[args] ): literal[string] keyword[if] identifier[len] ( identifier[args] )< literal[int] : identifier[print] ( literal[string] ) keyword[return] identifier[lednum] = identifier[int] ( identifier[args] [ literal[int] ]) identifier[pattern] =[ literal[int] ]* literal[int] identifier[pattern] [ literal[int] ]= identifier[ord] ( literal[string] ) identifier[pattern] [ literal[int] ]= identifier[ord] ( literal[string] ) identifier[pattern] [ literal[int] ]= identifier[ord] ( literal[string] ) identifier[pattern] [ literal[int] ]= identifier[ord] ( literal[string] ) identifier[pattern] [ literal[int] ]= literal[int] identifier[pattern] [ literal[int] ]= identifier[int] ( identifier[args] [ literal[int] ]) identifier[pattern] [ literal[int] ]= identifier[int] ( identifier[args] [ literal[int] ]) identifier[pattern] [ literal[int] ]= identifier[int] ( identifier[args] [ literal[int] ]) identifier[self] . identifier[master] . identifier[mav] . identifier[led_control_send] ( identifier[self] . identifier[settings] . identifier[target_system] , identifier[self] . identifier[settings] . identifier[target_component] , identifier[lednum] , literal[int] , literal[int] , identifier[pattern] )
def cmd_oreoled(self, args): """send LED pattern as override, using OreoLED conventions""" if len(args) < 4: print('Usage: oreoled LEDNUM RED GREEN BLUE <RATE>') return # depends on [control=['if'], data=[]] lednum = int(args[0]) pattern = [0] * 24 pattern[0] = ord('R') pattern[1] = ord('G') pattern[2] = ord('B') pattern[3] = ord('0') pattern[4] = 0 pattern[5] = int(args[1]) pattern[6] = int(args[2]) pattern[7] = int(args[3]) self.master.mav.led_control_send(self.settings.target_system, self.settings.target_component, lednum, 255, 8, pattern)
def require_int(self, key: str) -> int: """ Returns a configuration value, as an int, by its given key. If it doesn't exist, or the configuration value is not a legal int, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: int :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to int. """ v = self.get_int(key) if v is None: raise ConfigMissingError(self.full_key(key)) return v
def function[require_int, parameter[self, key]]: constant[ Returns a configuration value, as an int, by its given key. If it doesn't exist, or the configuration value is not a legal int, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: int :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to int. ] variable[v] assign[=] call[name[self].get_int, parameter[name[key]]] if compare[name[v] is constant[None]] begin[:] <ast.Raise object at 0x7da20c6e49a0> return[name[v]]
keyword[def] identifier[require_int] ( identifier[self] , identifier[key] : identifier[str] )-> identifier[int] : literal[string] identifier[v] = identifier[self] . identifier[get_int] ( identifier[key] ) keyword[if] identifier[v] keyword[is] keyword[None] : keyword[raise] identifier[ConfigMissingError] ( identifier[self] . identifier[full_key] ( identifier[key] )) keyword[return] identifier[v]
def require_int(self, key: str) -> int: """ Returns a configuration value, as an int, by its given key. If it doesn't exist, or the configuration value is not a legal int, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: int :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to int. """ v = self.get_int(key) if v is None: raise ConfigMissingError(self.full_key(key)) # depends on [control=['if'], data=[]] return v
def inject(self, filename, content): """ add the injection content to the dictionary """ # ensure content always has one trailing newline content = _unicode(content).rstrip() + "\n" if filename not in self.inject_dict: self.inject_dict[filename] = "" self.inject_dict[filename] += content
def function[inject, parameter[self, filename, content]]: constant[ add the injection content to the dictionary ] variable[content] assign[=] binary_operation[call[call[name[_unicode], parameter[name[content]]].rstrip, parameter[]] + constant[ ]] if compare[name[filename] <ast.NotIn object at 0x7da2590d7190> name[self].inject_dict] begin[:] call[name[self].inject_dict][name[filename]] assign[=] constant[] <ast.AugAssign object at 0x7da2047e8730>
keyword[def] identifier[inject] ( identifier[self] , identifier[filename] , identifier[content] ): literal[string] identifier[content] = identifier[_unicode] ( identifier[content] ). identifier[rstrip] ()+ literal[string] keyword[if] identifier[filename] keyword[not] keyword[in] identifier[self] . identifier[inject_dict] : identifier[self] . identifier[inject_dict] [ identifier[filename] ]= literal[string] identifier[self] . identifier[inject_dict] [ identifier[filename] ]+= identifier[content]
def inject(self, filename, content): """ add the injection content to the dictionary """ # ensure content always has one trailing newline content = _unicode(content).rstrip() + '\n' if filename not in self.inject_dict: self.inject_dict[filename] = '' # depends on [control=['if'], data=['filename']] self.inject_dict[filename] += content
def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += string_to_bytes(self.name, 64) payload += bytes([self.order >> 8 & 255, self.order & 255]) payload += bytes([self.placement]) payload += bytes([self.node_variation.value]) return payload
def function[get_payload, parameter[self]]: constant[Return Payload.] variable[payload] assign[=] call[name[bytes], parameter[list[[<ast.Attribute object at 0x7da1b26ac400>]]]] <ast.AugAssign object at 0x7da1b26adb70> <ast.AugAssign object at 0x7da1b26ad8d0> <ast.AugAssign object at 0x7da1b26aea10> <ast.AugAssign object at 0x7da1b26ac520> return[name[payload]]
keyword[def] identifier[get_payload] ( identifier[self] ): literal[string] identifier[payload] = identifier[bytes] ([ identifier[self] . identifier[node_id] ]) identifier[payload] += identifier[string_to_bytes] ( identifier[self] . identifier[name] , literal[int] ) identifier[payload] += identifier[bytes] ([ identifier[self] . identifier[order] >> literal[int] & literal[int] , identifier[self] . identifier[order] & literal[int] ]) identifier[payload] += identifier[bytes] ([ identifier[self] . identifier[placement] ]) identifier[payload] += identifier[bytes] ([ identifier[self] . identifier[node_variation] . identifier[value] ]) keyword[return] identifier[payload]
def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += string_to_bytes(self.name, 64) payload += bytes([self.order >> 8 & 255, self.order & 255]) payload += bytes([self.placement]) payload += bytes([self.node_variation.value]) return payload
def transform_y(self, tfms:TfmList=None, **kwargs): "Set `tfms` to be applied to the targets only." _check_kwargs(self.y, tfms, **kwargs) self.tfm_y=True if tfms is None: self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms))) self.tfmargs_y = {**self.tfmargs, **kwargs} else: tfms = list(filter(lambda t: t.use_on_y, tfms)) self.tfms_y,self.tfmargs_y = tfms,kwargs return self
def function[transform_y, parameter[self, tfms]]: constant[Set `tfms` to be applied to the targets only.] call[name[_check_kwargs], parameter[name[self].y, name[tfms]]] name[self].tfm_y assign[=] constant[True] if compare[name[tfms] is constant[None]] begin[:] name[self].tfms_y assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da18f00ed40>, call[name[listify], parameter[name[self].tfms]]]]]] name[self].tfmargs_y assign[=] dictionary[[None, None], [<ast.Attribute object at 0x7da20e9b1ff0>, <ast.Name object at 0x7da20e9b0a00>]] return[name[self]]
keyword[def] identifier[transform_y] ( identifier[self] , identifier[tfms] : identifier[TfmList] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[_check_kwargs] ( identifier[self] . identifier[y] , identifier[tfms] ,** identifier[kwargs] ) identifier[self] . identifier[tfm_y] = keyword[True] keyword[if] identifier[tfms] keyword[is] keyword[None] : identifier[self] . identifier[tfms_y] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[t] : identifier[t] . identifier[use_on_y] , identifier[listify] ( identifier[self] . identifier[tfms] ))) identifier[self] . identifier[tfmargs_y] ={** identifier[self] . identifier[tfmargs] ,** identifier[kwargs] } keyword[else] : identifier[tfms] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[t] : identifier[t] . identifier[use_on_y] , identifier[tfms] )) identifier[self] . identifier[tfms_y] , identifier[self] . identifier[tfmargs_y] = identifier[tfms] , identifier[kwargs] keyword[return] identifier[self]
def transform_y(self, tfms: TfmList=None, **kwargs): """Set `tfms` to be applied to the targets only.""" _check_kwargs(self.y, tfms, **kwargs) self.tfm_y = True if tfms is None: self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms))) self.tfmargs_y = {**self.tfmargs, **kwargs} # depends on [control=['if'], data=[]] else: tfms = list(filter(lambda t: t.use_on_y, tfms)) (self.tfms_y, self.tfmargs_y) = (tfms, kwargs) return self
def guess_depth(packages): """ Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use. """ if len(packages) == 1: return packages[0].count('.') + 2 return min(p.count('.') for p in packages) + 1
def function[guess_depth, parameter[packages]]: constant[ Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use. ] if compare[call[name[len], parameter[name[packages]]] equal[==] constant[1]] begin[:] return[binary_operation[call[call[name[packages]][constant[0]].count, parameter[constant[.]]] + constant[2]]] return[binary_operation[call[name[min], parameter[<ast.GeneratorExp object at 0x7da1b236aaa0>]] + constant[1]]]
keyword[def] identifier[guess_depth] ( identifier[packages] ): literal[string] keyword[if] identifier[len] ( identifier[packages] )== literal[int] : keyword[return] identifier[packages] [ literal[int] ]. identifier[count] ( literal[string] )+ literal[int] keyword[return] identifier[min] ( identifier[p] . identifier[count] ( literal[string] ) keyword[for] identifier[p] keyword[in] identifier[packages] )+ literal[int]
def guess_depth(packages): """ Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use. """ if len(packages) == 1: return packages[0].count('.') + 2 # depends on [control=['if'], data=[]] return min((p.count('.') for p in packages)) + 1
def _modu16(ins): ''' Reminder of div. 2 16bit unsigned integers. The result is pushed onto the stack. Optimizations: * If 2nd operand is 1 => Return 0 * If 2nd operand = 2^n => do AND (2^n - 1) ''' op1, op2 = tuple(ins.quad[2:]) if is_int(op2): op2 = int16(op2) output = _16bit_oper(op1) if op2 == 1: if op2[0] in ('_', '$'): output = [] # Optimization: Discard previous op if not from the stack output.append('ld hl, 0') output.append('push hl') return output if is_2n(op2): k = op2 - 1 if op2 > 255: # only affects H output.append('ld a, h') output.append('and %i' % (k >> 8)) output.append('ld h, a') else: output.append('ld h, 0') # High part goes 0 output.append('ld a, l') output.append('and %i' % (k % 0xFF)) output.append('ld l, a') output.append('push hl') return output output.append('ld de, %i' % op2) else: output = _16bit_oper(op1, op2) output.append('call __MODU16') output.append('push hl') REQUIRES.add('div16.asm') return output
def function[_modu16, parameter[ins]]: constant[ Reminder of div. 2 16bit unsigned integers. The result is pushed onto the stack. Optimizations: * If 2nd operand is 1 => Return 0 * If 2nd operand = 2^n => do AND (2^n - 1) ] <ast.Tuple object at 0x7da1b06165f0> assign[=] call[name[tuple], parameter[call[name[ins].quad][<ast.Slice object at 0x7da1b06143d0>]]] if call[name[is_int], parameter[name[op2]]] begin[:] variable[op2] assign[=] call[name[int16], parameter[name[op2]]] variable[output] assign[=] call[name[_16bit_oper], parameter[name[op1]]] if compare[name[op2] equal[==] constant[1]] begin[:] if compare[call[name[op2]][constant[0]] in tuple[[<ast.Constant object at 0x7da1b0617e50>, <ast.Constant object at 0x7da1b0614c40>]]] begin[:] variable[output] assign[=] list[[]] call[name[output].append, parameter[constant[ld hl, 0]]] call[name[output].append, parameter[constant[push hl]]] return[name[output]] if call[name[is_2n], parameter[name[op2]]] begin[:] variable[k] assign[=] binary_operation[name[op2] - constant[1]] if compare[name[op2] greater[>] constant[255]] begin[:] call[name[output].append, parameter[constant[ld a, h]]] call[name[output].append, parameter[binary_operation[constant[and %i] <ast.Mod object at 0x7da2590d6920> binary_operation[name[k] <ast.RShift object at 0x7da2590d6a40> constant[8]]]]] call[name[output].append, parameter[constant[ld h, a]]] call[name[output].append, parameter[constant[push hl]]] return[name[output]] call[name[output].append, parameter[binary_operation[constant[ld de, %i] <ast.Mod object at 0x7da2590d6920> name[op2]]]] call[name[output].append, parameter[constant[call __MODU16]]] call[name[output].append, parameter[constant[push hl]]] call[name[REQUIRES].add, parameter[constant[div16.asm]]] return[name[output]]
keyword[def] identifier[_modu16] ( identifier[ins] ): literal[string] identifier[op1] , identifier[op2] = identifier[tuple] ( identifier[ins] . identifier[quad] [ literal[int] :]) keyword[if] identifier[is_int] ( identifier[op2] ): identifier[op2] = identifier[int16] ( identifier[op2] ) identifier[output] = identifier[_16bit_oper] ( identifier[op1] ) keyword[if] identifier[op2] == literal[int] : keyword[if] identifier[op2] [ literal[int] ] keyword[in] ( literal[string] , literal[string] ): identifier[output] =[] identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] ) keyword[return] identifier[output] keyword[if] identifier[is_2n] ( identifier[op2] ): identifier[k] = identifier[op2] - literal[int] keyword[if] identifier[op2] > literal[int] : identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] %( identifier[k] >> literal[int] )) identifier[output] . identifier[append] ( literal[string] ) keyword[else] : identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] %( identifier[k] % literal[int] )) identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] ) keyword[return] identifier[output] identifier[output] . identifier[append] ( literal[string] % identifier[op2] ) keyword[else] : identifier[output] = identifier[_16bit_oper] ( identifier[op1] , identifier[op2] ) identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] ) identifier[REQUIRES] . identifier[add] ( literal[string] ) keyword[return] identifier[output]
def _modu16(ins): """ Reminder of div. 2 16bit unsigned integers. The result is pushed onto the stack. Optimizations: * If 2nd operand is 1 => Return 0 * If 2nd operand = 2^n => do AND (2^n - 1) """ (op1, op2) = tuple(ins.quad[2:]) if is_int(op2): op2 = int16(op2) output = _16bit_oper(op1) if op2 == 1: if op2[0] in ('_', '$'): output = [] # Optimization: Discard previous op if not from the stack # depends on [control=['if'], data=[]] output.append('ld hl, 0') output.append('push hl') return output # depends on [control=['if'], data=['op2']] if is_2n(op2): k = op2 - 1 if op2 > 255: # only affects H output.append('ld a, h') output.append('and %i' % (k >> 8)) output.append('ld h, a') # depends on [control=['if'], data=[]] else: output.append('ld h, 0') # High part goes 0 output.append('ld a, l') output.append('and %i' % (k % 255)) output.append('ld l, a') output.append('push hl') return output # depends on [control=['if'], data=[]] output.append('ld de, %i' % op2) # depends on [control=['if'], data=[]] else: output = _16bit_oper(op1, op2) output.append('call __MODU16') output.append('push hl') REQUIRES.add('div16.asm') return output
def getHostDetailsByIndex(self, index, lanInterfaceId=1, timeout=1): """Execute GetGenericHostEntry action to get detailed information's of a connected host. :param index: the index of the host :param int lanInterfaceId: the id of the LAN interface :param float timeout: the timeout to wait for the action to be executed :return: the detailed information's of a connected host. :rtype: HostDetails .. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected` """ namespace = Lan.getServiceType("getHostDetailsByIndex") + str(lanInterfaceId) uri = self.getControlURL(namespace) results = self.execute(uri, namespace, "GetGenericHostEntry", timeout=timeout, NewIndex=index) return HostDetails(results)
def function[getHostDetailsByIndex, parameter[self, index, lanInterfaceId, timeout]]: constant[Execute GetGenericHostEntry action to get detailed information's of a connected host. :param index: the index of the host :param int lanInterfaceId: the id of the LAN interface :param float timeout: the timeout to wait for the action to be executed :return: the detailed information's of a connected host. :rtype: HostDetails .. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected` ] variable[namespace] assign[=] binary_operation[call[name[Lan].getServiceType, parameter[constant[getHostDetailsByIndex]]] + call[name[str], parameter[name[lanInterfaceId]]]] variable[uri] assign[=] call[name[self].getControlURL, parameter[name[namespace]]] variable[results] assign[=] call[name[self].execute, parameter[name[uri], name[namespace], constant[GetGenericHostEntry]]] return[call[name[HostDetails], parameter[name[results]]]]
keyword[def] identifier[getHostDetailsByIndex] ( identifier[self] , identifier[index] , identifier[lanInterfaceId] = literal[int] , identifier[timeout] = literal[int] ): literal[string] identifier[namespace] = identifier[Lan] . identifier[getServiceType] ( literal[string] )+ identifier[str] ( identifier[lanInterfaceId] ) identifier[uri] = identifier[self] . identifier[getControlURL] ( identifier[namespace] ) identifier[results] = identifier[self] . identifier[execute] ( identifier[uri] , identifier[namespace] , literal[string] , identifier[timeout] = identifier[timeout] , identifier[NewIndex] = identifier[index] ) keyword[return] identifier[HostDetails] ( identifier[results] )
def getHostDetailsByIndex(self, index, lanInterfaceId=1, timeout=1): """Execute GetGenericHostEntry action to get detailed information's of a connected host. :param index: the index of the host :param int lanInterfaceId: the id of the LAN interface :param float timeout: the timeout to wait for the action to be executed :return: the detailed information's of a connected host. :rtype: HostDetails .. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected` """ namespace = Lan.getServiceType('getHostDetailsByIndex') + str(lanInterfaceId) uri = self.getControlURL(namespace) results = self.execute(uri, namespace, 'GetGenericHostEntry', timeout=timeout, NewIndex=index) return HostDetails(results)
def K(self, X, X2, target): """Compute the covariance matrix between X and X2.""" if X2 is None: X2 = X # i1 = X[:,1] # i2 = X2[:,1] # X = X[:,0].reshape(-1,1) # X2 = X2[:,0].reshape(-1,1) dist = np.abs(X - X2.T) ly=1/self.lengthscaleY lu=np.sqrt(3)/self.lengthscaleU #ly=self.lengthscaleY #lu=self.lengthscaleU k1 = np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2 k2 = (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2 k3 = np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 ) np.add(self.varianceU*self.varianceY*(k1+k2+k3), target, target)
def function[K, parameter[self, X, X2, target]]: constant[Compute the covariance matrix between X and X2.] if compare[name[X2] is constant[None]] begin[:] variable[X2] assign[=] name[X] variable[dist] assign[=] call[name[np].abs, parameter[binary_operation[name[X] - name[X2].T]]] variable[ly] assign[=] binary_operation[constant[1] / name[self].lengthscaleY] variable[lu] assign[=] binary_operation[call[name[np].sqrt, parameter[constant[3]]] / name[self].lengthscaleU] variable[k1] assign[=] binary_operation[binary_operation[call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1c0e2c0> * name[dist]]]] * binary_operation[binary_operation[constant[2] * name[lu]] + name[ly]]] / binary_operation[binary_operation[name[lu] + name[ly]] ** constant[2]]] variable[k2] assign[=] binary_operation[binary_operation[binary_operation[call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1c0d4b0> * name[dist]]]] * binary_operation[binary_operation[binary_operation[name[ly] - binary_operation[constant[2] * name[lu]]] + binary_operation[binary_operation[name[lu] * name[ly]] * name[dist]]] - binary_operation[binary_operation[name[lu] ** constant[2]] * name[dist]]]] + binary_operation[call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1c0d4e0> * name[dist]]]] * binary_operation[binary_operation[constant[2] * name[lu]] - name[ly]]]] / binary_operation[binary_operation[name[ly] - name[lu]] ** constant[2]]] variable[k3] assign[=] binary_operation[call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1c0db70> * name[dist]]]] * binary_operation[binary_operation[binary_operation[constant[1] + binary_operation[name[lu] * name[dist]]] / binary_operation[name[lu] + name[ly]]] + binary_operation[name[lu] / binary_operation[binary_operation[name[lu] + name[ly]] ** constant[2]]]]] call[name[np].add, parameter[binary_operation[binary_operation[name[self].varianceU * name[self].varianceY] * binary_operation[binary_operation[name[k1] + name[k2]] + name[k3]]], name[target], name[target]]]
keyword[def] identifier[K] ( identifier[self] , identifier[X] , identifier[X2] , identifier[target] ): literal[string] keyword[if] identifier[X2] keyword[is] keyword[None] : identifier[X2] = identifier[X] identifier[dist] = identifier[np] . identifier[abs] ( identifier[X] - identifier[X2] . identifier[T] ) identifier[ly] = literal[int] / identifier[self] . identifier[lengthscaleY] identifier[lu] = identifier[np] . identifier[sqrt] ( literal[int] )/ identifier[self] . identifier[lengthscaleU] identifier[k1] = identifier[np] . identifier[exp] (- identifier[ly] * identifier[dist] )*( literal[int] * identifier[lu] + identifier[ly] )/( identifier[lu] + identifier[ly] )** literal[int] identifier[k2] =( identifier[np] . identifier[exp] (- identifier[lu] * identifier[dist] )*( identifier[ly] - literal[int] * identifier[lu] + identifier[lu] * identifier[ly] * identifier[dist] - identifier[lu] ** literal[int] * identifier[dist] )+ identifier[np] . identifier[exp] (- identifier[ly] * identifier[dist] )*( literal[int] * identifier[lu] - identifier[ly] ))/( identifier[ly] - identifier[lu] )** literal[int] identifier[k3] = identifier[np] . identifier[exp] (- identifier[lu] * identifier[dist] )*(( literal[int] + identifier[lu] * identifier[dist] )/( identifier[lu] + identifier[ly] )+( identifier[lu] )/( identifier[lu] + identifier[ly] )** literal[int] ) identifier[np] . identifier[add] ( identifier[self] . identifier[varianceU] * identifier[self] . identifier[varianceY] *( identifier[k1] + identifier[k2] + identifier[k3] ), identifier[target] , identifier[target] )
def K(self, X, X2, target): """Compute the covariance matrix between X and X2.""" if X2 is None: X2 = X # depends on [control=['if'], data=['X2']] # i1 = X[:,1] # i2 = X2[:,1] # X = X[:,0].reshape(-1,1) # X2 = X2[:,0].reshape(-1,1) dist = np.abs(X - X2.T) ly = 1 / self.lengthscaleY lu = np.sqrt(3) / self.lengthscaleU #ly=self.lengthscaleY #lu=self.lengthscaleU k1 = np.exp(-ly * dist) * (2 * lu + ly) / (lu + ly) ** 2 k2 = (np.exp(-lu * dist) * (ly - 2 * lu + lu * ly * dist - lu ** 2 * dist) + np.exp(-ly * dist) * (2 * lu - ly)) / (ly - lu) ** 2 k3 = np.exp(-lu * dist) * ((1 + lu * dist) / (lu + ly) + lu / (lu + ly) ** 2) np.add(self.varianceU * self.varianceY * (k1 + k2 + k3), target, target)
def addJunctionPos(shape, fromPos, toPos): """Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality""" result = list(shape) if fromPos != shape[0]: result = [fromPos] + result if toPos != shape[-1]: result.append(toPos) return result
def function[addJunctionPos, parameter[shape, fromPos, toPos]]: constant[Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality] variable[result] assign[=] call[name[list], parameter[name[shape]]] if compare[name[fromPos] not_equal[!=] call[name[shape]][constant[0]]] begin[:] variable[result] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b0944670>]] + name[result]] if compare[name[toPos] not_equal[!=] call[name[shape]][<ast.UnaryOp object at 0x7da1b09465c0>]] begin[:] call[name[result].append, parameter[name[toPos]]] return[name[result]]
keyword[def] identifier[addJunctionPos] ( identifier[shape] , identifier[fromPos] , identifier[toPos] ): literal[string] identifier[result] = identifier[list] ( identifier[shape] ) keyword[if] identifier[fromPos] != identifier[shape] [ literal[int] ]: identifier[result] =[ identifier[fromPos] ]+ identifier[result] keyword[if] identifier[toPos] != identifier[shape] [- literal[int] ]: identifier[result] . identifier[append] ( identifier[toPos] ) keyword[return] identifier[result]
def addJunctionPos(shape, fromPos, toPos): """Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality""" result = list(shape) if fromPos != shape[0]: result = [fromPos] + result # depends on [control=['if'], data=['fromPos']] if toPos != shape[-1]: result.append(toPos) # depends on [control=['if'], data=['toPos']] return result
def template_stack(name=None, profile=None): ''' Return template a specific stack (heat stack-template) name Name of the stack profile Profile to use CLI Example: .. code-block:: bash salt '*' heat.template_stack name=mystack profile=openstack1 ''' h_client = _auth(profile) if not name: return { 'result': False, 'comment': 'Parameter name missing or None' } try: get_template = h_client.stacks.template(name) except heatclient.exc.HTTPNotFound: return { 'result': False, 'comment': 'No stack with {0}'.format(name) } except heatclient.exc.BadRequest: return { 'result': False, 'comment': 'Bad request fot stack {0}'.format(name) } if 'heat_template_version' in get_template: template = salt.utils.yaml.safe_dump(get_template) else: template = jsonutils.dumps(get_template, indent=2, ensure_ascii=False) checksum = __salt__['hashutil.digest'](template) ret = { 'template': template, 'result': True, 'checksum': checksum } return ret
def function[template_stack, parameter[name, profile]]: constant[ Return template a specific stack (heat stack-template) name Name of the stack profile Profile to use CLI Example: .. code-block:: bash salt '*' heat.template_stack name=mystack profile=openstack1 ] variable[h_client] assign[=] call[name[_auth], parameter[name[profile]]] if <ast.UnaryOp object at 0x7da18f58c5e0> begin[:] return[dictionary[[<ast.Constant object at 0x7da18f58f6d0>, <ast.Constant object at 0x7da18f58c520>], [<ast.Constant object at 0x7da18f58d720>, <ast.Constant object at 0x7da18f58e830>]]] <ast.Try object at 0x7da18f58f670> if compare[constant[heat_template_version] in name[get_template]] begin[:] variable[template] assign[=] call[name[salt].utils.yaml.safe_dump, parameter[name[get_template]]] variable[checksum] assign[=] call[call[name[__salt__]][constant[hashutil.digest]], parameter[name[template]]] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18f58df90>, <ast.Constant object at 0x7da18f58db40>, <ast.Constant object at 0x7da18f58dd20>], [<ast.Name object at 0x7da18f58c6a0>, <ast.Constant object at 0x7da18f58f250>, <ast.Name object at 0x7da18f58eec0>]] return[name[ret]]
keyword[def] identifier[template_stack] ( identifier[name] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] identifier[h_client] = identifier[_auth] ( identifier[profile] ) keyword[if] keyword[not] identifier[name] : keyword[return] { literal[string] : keyword[False] , literal[string] : literal[string] } keyword[try] : identifier[get_template] = identifier[h_client] . identifier[stacks] . identifier[template] ( identifier[name] ) keyword[except] identifier[heatclient] . identifier[exc] . identifier[HTTPNotFound] : keyword[return] { literal[string] : keyword[False] , literal[string] : literal[string] . identifier[format] ( identifier[name] ) } keyword[except] identifier[heatclient] . identifier[exc] . identifier[BadRequest] : keyword[return] { literal[string] : keyword[False] , literal[string] : literal[string] . identifier[format] ( identifier[name] ) } keyword[if] literal[string] keyword[in] identifier[get_template] : identifier[template] = identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_dump] ( identifier[get_template] ) keyword[else] : identifier[template] = identifier[jsonutils] . identifier[dumps] ( identifier[get_template] , identifier[indent] = literal[int] , identifier[ensure_ascii] = keyword[False] ) identifier[checksum] = identifier[__salt__] [ literal[string] ]( identifier[template] ) identifier[ret] ={ literal[string] : identifier[template] , literal[string] : keyword[True] , literal[string] : identifier[checksum] } keyword[return] identifier[ret]
def template_stack(name=None, profile=None): """ Return template a specific stack (heat stack-template) name Name of the stack profile Profile to use CLI Example: .. code-block:: bash salt '*' heat.template_stack name=mystack profile=openstack1 """ h_client = _auth(profile) if not name: return {'result': False, 'comment': 'Parameter name missing or None'} # depends on [control=['if'], data=[]] try: get_template = h_client.stacks.template(name) # depends on [control=['try'], data=[]] except heatclient.exc.HTTPNotFound: return {'result': False, 'comment': 'No stack with {0}'.format(name)} # depends on [control=['except'], data=[]] except heatclient.exc.BadRequest: return {'result': False, 'comment': 'Bad request fot stack {0}'.format(name)} # depends on [control=['except'], data=[]] if 'heat_template_version' in get_template: template = salt.utils.yaml.safe_dump(get_template) # depends on [control=['if'], data=['get_template']] else: template = jsonutils.dumps(get_template, indent=2, ensure_ascii=False) checksum = __salt__['hashutil.digest'](template) ret = {'template': template, 'result': True, 'checksum': checksum} return ret
def dfa_projection(dfa: dict, symbols_to_remove: set) -> dict: """ Returns a NFA that reads the language recognized by the input DFA where all the symbols in **symbols_to_project** are projected out of the alphabet. Projection in a DFA is the operation that existentially removes from a word all occurrence of symbols in a set X. Given a dfa :math:`A = (Σ, S, s_0 , ρ, F )`, we can define an NFA :math:`A_{πX}` that recognizes the language :math:`πX(L(A))` as :math:`A_{πX}= ( Σ−X, S, S_0 , ρ_X , F )` where • :math:`S_0 = \{s | (s_0 , s) ∈ ε_X \}` • :math:`(s,a,s_y ) ∈ ρ_X` iff there exist :math:`(t, t_y)` s.t. :math:`(s,t) ∈ ε_X , t_y = ρ(t,a)` and :math:`(t_y , s_y ) ∈ ε_X` :param dict dfa: input DFA; :param set symbols_to_remove: set containing symbols ∈ dfa[ 'alphabet'] to be projected out from DFA. :return: *(dict)* representing a NFA. """ nfa = { 'alphabet': dfa['alphabet'].difference(symbols_to_remove), 'states': dfa['states'].copy(), 'initial_states': {dfa['initial_state']}, 'accepting_states': dfa['accepting_states'].copy(), 'transitions': dict() } current_nfa_transitions = None current_e_x = None e_x = dict() # equivalence relation dictionary # while no more changes are possible while current_nfa_transitions != nfa['transitions'] or current_e_x != e_x: current_nfa_transitions = nfa['transitions'].copy() current_e_x = deepcopy(e_x) for (state, a) in dfa['transitions']: next_state = dfa['transitions'][state, a] if a in symbols_to_remove: # mark next_state as equivalent to state e_x.setdefault(state, set()).add(next_state) app_set = set() for equivalent in e_x[state]: # mark states equivalent to next_states also to state if equivalent in e_x: app_set.update(e_x[equivalent]) # add all transitions of equivalent states to state for act in nfa['alphabet']: if (equivalent, act) in dfa['transitions']: equivalent_next = dfa['transitions'][ equivalent, act] nfa['transitions'].setdefault( (state, act), set()).add(equivalent_next) # if equivalent_next has equivalent states if equivalent_next in e_x: # the transition leads also to these states nfa['transitions'][state, act].update( e_x[equivalent_next]) e_x[state].update(app_set) else: # add the transition to the NFA nfa['transitions'].setdefault((state, a), set()).add( next_state) # if next_state has equivalent states if next_state in e_x: # the same transition arrive also to all these other states nfa['transitions'][state, a].update(e_x[next_state]) # Add all state equivalent to the initial one to NFA initial states set if dfa['initial_state'] in e_x: nfa['initial_states'].update(e_x[dfa['initial_state']]) return nfa
def function[dfa_projection, parameter[dfa, symbols_to_remove]]: constant[ Returns a NFA that reads the language recognized by the input DFA where all the symbols in **symbols_to_project** are projected out of the alphabet. Projection in a DFA is the operation that existentially removes from a word all occurrence of symbols in a set X. Given a dfa :math:`A = (Σ, S, s_0 , ρ, F )`, we can define an NFA :math:`A_{πX}` that recognizes the language :math:`πX(L(A))` as :math:`A_{πX}= ( Σ−X, S, S_0 , ρ_X , F )` where • :math:`S_0 = \{s | (s_0 , s) ∈ ε_X \}` • :math:`(s,a,s_y ) ∈ ρ_X` iff there exist :math:`(t, t_y)` s.t. :math:`(s,t) ∈ ε_X , t_y = ρ(t,a)` and :math:`(t_y , s_y ) ∈ ε_X` :param dict dfa: input DFA; :param set symbols_to_remove: set containing symbols ∈ dfa[ 'alphabet'] to be projected out from DFA. :return: *(dict)* representing a NFA. ] variable[nfa] assign[=] dictionary[[<ast.Constant object at 0x7da1b2604b50>, <ast.Constant object at 0x7da1b26061d0>, <ast.Constant object at 0x7da1b2607e50>, <ast.Constant object at 0x7da1b2604820>, <ast.Constant object at 0x7da1b2604670>], [<ast.Call object at 0x7da1b2605000>, <ast.Call object at 0x7da1b2607160>, <ast.Set object at 0x7da1b2605270>, <ast.Call object at 0x7da1b2605f30>, <ast.Call object at 0x7da1b2604910>]] variable[current_nfa_transitions] assign[=] constant[None] variable[current_e_x] assign[=] constant[None] variable[e_x] assign[=] call[name[dict], parameter[]] while <ast.BoolOp object at 0x7da1b2607760> begin[:] variable[current_nfa_transitions] assign[=] call[call[name[nfa]][constant[transitions]].copy, parameter[]] variable[current_e_x] assign[=] call[name[deepcopy], parameter[name[e_x]]] for taget[tuple[[<ast.Name object at 0x7da1b26043a0>, <ast.Name object at 0x7da1b2604d00>]]] in starred[call[name[dfa]][constant[transitions]]] begin[:] variable[next_state] assign[=] call[call[name[dfa]][constant[transitions]]][tuple[[<ast.Name object at 0x7da1b2605240>, <ast.Name object at 0x7da1b2604fd0>]]] if compare[name[a] in name[symbols_to_remove]] begin[:] call[call[name[e_x].setdefault, parameter[name[state], call[name[set], parameter[]]]].add, parameter[name[next_state]]] variable[app_set] assign[=] call[name[set], parameter[]] for taget[name[equivalent]] in starred[call[name[e_x]][name[state]]] begin[:] if compare[name[equivalent] in name[e_x]] begin[:] call[name[app_set].update, parameter[call[name[e_x]][name[equivalent]]]] for taget[name[act]] in starred[call[name[nfa]][constant[alphabet]]] begin[:] if compare[tuple[[<ast.Name object at 0x7da1b26a06a0>, <ast.Name object at 0x7da1b26a3c40>]] in call[name[dfa]][constant[transitions]]] begin[:] variable[equivalent_next] assign[=] call[call[name[dfa]][constant[transitions]]][tuple[[<ast.Name object at 0x7da1b26a0f40>, <ast.Name object at 0x7da1b26a19f0>]]] call[call[call[name[nfa]][constant[transitions]].setdefault, parameter[tuple[[<ast.Name object at 0x7da1b26a3d30>, <ast.Name object at 0x7da1b26a0550>]], call[name[set], parameter[]]]].add, parameter[name[equivalent_next]]] if compare[name[equivalent_next] in name[e_x]] begin[:] call[call[call[name[nfa]][constant[transitions]]][tuple[[<ast.Name object at 0x7da1b26a1b10>, <ast.Name object at 0x7da1b26a05b0>]]].update, parameter[call[name[e_x]][name[equivalent_next]]]] call[call[name[e_x]][name[state]].update, parameter[name[app_set]]] if compare[call[name[dfa]][constant[initial_state]] in name[e_x]] begin[:] call[call[name[nfa]][constant[initial_states]].update, parameter[call[name[e_x]][call[name[dfa]][constant[initial_state]]]]] return[name[nfa]]
keyword[def] identifier[dfa_projection] ( identifier[dfa] : identifier[dict] , identifier[symbols_to_remove] : identifier[set] )-> identifier[dict] : literal[string] identifier[nfa] ={ literal[string] : identifier[dfa] [ literal[string] ]. identifier[difference] ( identifier[symbols_to_remove] ), literal[string] : identifier[dfa] [ literal[string] ]. identifier[copy] (), literal[string] :{ identifier[dfa] [ literal[string] ]}, literal[string] : identifier[dfa] [ literal[string] ]. identifier[copy] (), literal[string] : identifier[dict] () } identifier[current_nfa_transitions] = keyword[None] identifier[current_e_x] = keyword[None] identifier[e_x] = identifier[dict] () keyword[while] identifier[current_nfa_transitions] != identifier[nfa] [ literal[string] ] keyword[or] identifier[current_e_x] != identifier[e_x] : identifier[current_nfa_transitions] = identifier[nfa] [ literal[string] ]. identifier[copy] () identifier[current_e_x] = identifier[deepcopy] ( identifier[e_x] ) keyword[for] ( identifier[state] , identifier[a] ) keyword[in] identifier[dfa] [ literal[string] ]: identifier[next_state] = identifier[dfa] [ literal[string] ][ identifier[state] , identifier[a] ] keyword[if] identifier[a] keyword[in] identifier[symbols_to_remove] : identifier[e_x] . identifier[setdefault] ( identifier[state] , identifier[set] ()). identifier[add] ( identifier[next_state] ) identifier[app_set] = identifier[set] () keyword[for] identifier[equivalent] keyword[in] identifier[e_x] [ identifier[state] ]: keyword[if] identifier[equivalent] keyword[in] identifier[e_x] : identifier[app_set] . identifier[update] ( identifier[e_x] [ identifier[equivalent] ]) keyword[for] identifier[act] keyword[in] identifier[nfa] [ literal[string] ]: keyword[if] ( identifier[equivalent] , identifier[act] ) keyword[in] identifier[dfa] [ literal[string] ]: identifier[equivalent_next] = identifier[dfa] [ literal[string] ][ identifier[equivalent] , identifier[act] ] identifier[nfa] [ literal[string] ]. identifier[setdefault] ( ( identifier[state] , identifier[act] ), identifier[set] ()). identifier[add] ( identifier[equivalent_next] ) keyword[if] identifier[equivalent_next] keyword[in] identifier[e_x] : identifier[nfa] [ literal[string] ][ identifier[state] , identifier[act] ]. identifier[update] ( identifier[e_x] [ identifier[equivalent_next] ]) identifier[e_x] [ identifier[state] ]. identifier[update] ( identifier[app_set] ) keyword[else] : identifier[nfa] [ literal[string] ]. identifier[setdefault] (( identifier[state] , identifier[a] ), identifier[set] ()). identifier[add] ( identifier[next_state] ) keyword[if] identifier[next_state] keyword[in] identifier[e_x] : identifier[nfa] [ literal[string] ][ identifier[state] , identifier[a] ]. identifier[update] ( identifier[e_x] [ identifier[next_state] ]) keyword[if] identifier[dfa] [ literal[string] ] keyword[in] identifier[e_x] : identifier[nfa] [ literal[string] ]. identifier[update] ( identifier[e_x] [ identifier[dfa] [ literal[string] ]]) keyword[return] identifier[nfa]
def dfa_projection(dfa: dict, symbols_to_remove: set) -> dict: """ Returns a NFA that reads the language recognized by the input DFA where all the symbols in **symbols_to_project** are projected out of the alphabet. Projection in a DFA is the operation that existentially removes from a word all occurrence of symbols in a set X. Given a dfa :math:`A = (Σ, S, s_0 , ρ, F )`, we can define an NFA :math:`A_{πX}` that recognizes the language :math:`πX(L(A))` as :math:`A_{πX}= ( Σ−X, S, S_0 , ρ_X , F )` where • :math:`S_0 = \\{s | (s_0 , s) ∈ ε_X \\}` • :math:`(s,a,s_y ) ∈ ρ_X` iff there exist :math:`(t, t_y)` s.t. :math:`(s,t) ∈ ε_X , t_y = ρ(t,a)` and :math:`(t_y , s_y ) ∈ ε_X` :param dict dfa: input DFA; :param set symbols_to_remove: set containing symbols ∈ dfa[ 'alphabet'] to be projected out from DFA. :return: *(dict)* representing a NFA. """ nfa = {'alphabet': dfa['alphabet'].difference(symbols_to_remove), 'states': dfa['states'].copy(), 'initial_states': {dfa['initial_state']}, 'accepting_states': dfa['accepting_states'].copy(), 'transitions': dict()} current_nfa_transitions = None current_e_x = None e_x = dict() # equivalence relation dictionary # while no more changes are possible while current_nfa_transitions != nfa['transitions'] or current_e_x != e_x: current_nfa_transitions = nfa['transitions'].copy() current_e_x = deepcopy(e_x) for (state, a) in dfa['transitions']: next_state = dfa['transitions'][state, a] if a in symbols_to_remove: # mark next_state as equivalent to state e_x.setdefault(state, set()).add(next_state) app_set = set() for equivalent in e_x[state]: # mark states equivalent to next_states also to state if equivalent in e_x: app_set.update(e_x[equivalent]) # depends on [control=['if'], data=['equivalent', 'e_x']] # add all transitions of equivalent states to state for act in nfa['alphabet']: if (equivalent, act) in dfa['transitions']: equivalent_next = dfa['transitions'][equivalent, act] nfa['transitions'].setdefault((state, act), set()).add(equivalent_next) # if equivalent_next has equivalent states if equivalent_next in e_x: # the transition leads also to these states nfa['transitions'][state, act].update(e_x[equivalent_next]) # depends on [control=['if'], data=['equivalent_next', 'e_x']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['act']] # depends on [control=['for'], data=['equivalent']] e_x[state].update(app_set) # depends on [control=['if'], data=[]] else: # add the transition to the NFA nfa['transitions'].setdefault((state, a), set()).add(next_state) # if next_state has equivalent states if next_state in e_x: # the same transition arrive also to all these other states nfa['transitions'][state, a].update(e_x[next_state]) # depends on [control=['if'], data=['next_state', 'e_x']] # depends on [control=['for'], data=[]] # depends on [control=['while'], data=[]] # Add all state equivalent to the initial one to NFA initial states set if dfa['initial_state'] in e_x: nfa['initial_states'].update(e_x[dfa['initial_state']]) # depends on [control=['if'], data=['e_x']] return nfa
def spread_stats(stats, spreader=False): """Iterates all descendant statistics under the given root statistics. When ``spreader=True``, each iteration yields a descendant statistics and `spread()` function together. You should call `spread()` if you want to spread the yielded statistics also. """ spread = spread_t() if spreader else True descendants = deque(stats) while descendants: _stats = descendants.popleft() if spreader: spread.clear() yield _stats, spread else: yield _stats if spread: descendants.extend(_stats)
def function[spread_stats, parameter[stats, spreader]]: constant[Iterates all descendant statistics under the given root statistics. When ``spreader=True``, each iteration yields a descendant statistics and `spread()` function together. You should call `spread()` if you want to spread the yielded statistics also. ] variable[spread] assign[=] <ast.IfExp object at 0x7da1b11a8820> variable[descendants] assign[=] call[name[deque], parameter[name[stats]]] while name[descendants] begin[:] variable[_stats] assign[=] call[name[descendants].popleft, parameter[]] if name[spreader] begin[:] call[name[spread].clear, parameter[]] <ast.Yield object at 0x7da1b11a8ca0> if name[spread] begin[:] call[name[descendants].extend, parameter[name[_stats]]]
keyword[def] identifier[spread_stats] ( identifier[stats] , identifier[spreader] = keyword[False] ): literal[string] identifier[spread] = identifier[spread_t] () keyword[if] identifier[spreader] keyword[else] keyword[True] identifier[descendants] = identifier[deque] ( identifier[stats] ) keyword[while] identifier[descendants] : identifier[_stats] = identifier[descendants] . identifier[popleft] () keyword[if] identifier[spreader] : identifier[spread] . identifier[clear] () keyword[yield] identifier[_stats] , identifier[spread] keyword[else] : keyword[yield] identifier[_stats] keyword[if] identifier[spread] : identifier[descendants] . identifier[extend] ( identifier[_stats] )
def spread_stats(stats, spreader=False): """Iterates all descendant statistics under the given root statistics. When ``spreader=True``, each iteration yields a descendant statistics and `spread()` function together. You should call `spread()` if you want to spread the yielded statistics also. """ spread = spread_t() if spreader else True descendants = deque(stats) while descendants: _stats = descendants.popleft() if spreader: spread.clear() yield (_stats, spread) # depends on [control=['if'], data=[]] else: yield _stats if spread: descendants.extend(_stats) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def send_confirmation(self, message, success=True): """Send success or error message to configured confirmation :param str message: Body message to send :param bool success: Device executed successfully to personalize message :return: None """ message = message.strip() if not self.confirmation: return try: self.confirmation.send(message, success) except Exception as e: logger.warning('Error sending confirmation on device {}: {}'.format(self.name, e))
def function[send_confirmation, parameter[self, message, success]]: constant[Send success or error message to configured confirmation :param str message: Body message to send :param bool success: Device executed successfully to personalize message :return: None ] variable[message] assign[=] call[name[message].strip, parameter[]] if <ast.UnaryOp object at 0x7da18dc07c10> begin[:] return[None] <ast.Try object at 0x7da18dc07880>
keyword[def] identifier[send_confirmation] ( identifier[self] , identifier[message] , identifier[success] = keyword[True] ): literal[string] identifier[message] = identifier[message] . identifier[strip] () keyword[if] keyword[not] identifier[self] . identifier[confirmation] : keyword[return] keyword[try] : identifier[self] . identifier[confirmation] . identifier[send] ( identifier[message] , identifier[success] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[e] ))
def send_confirmation(self, message, success=True): """Send success or error message to configured confirmation :param str message: Body message to send :param bool success: Device executed successfully to personalize message :return: None """ message = message.strip() if not self.confirmation: return # depends on [control=['if'], data=[]] try: self.confirmation.send(message, success) # depends on [control=['try'], data=[]] except Exception as e: logger.warning('Error sending confirmation on device {}: {}'.format(self.name, e)) # depends on [control=['except'], data=['e']]
def get_config(): """Read the configfile and return config dict. Returns ------- dict Dictionary with the content of the configpath file. """ configpath = get_configpath() if not configpath.exists(): raise IOError("Config file {} not found.".format(str(configpath))) else: config = configparser.ConfigParser() config.read(str(configpath)) return config
def function[get_config, parameter[]]: constant[Read the configfile and return config dict. Returns ------- dict Dictionary with the content of the configpath file. ] variable[configpath] assign[=] call[name[get_configpath], parameter[]] if <ast.UnaryOp object at 0x7da1b11751b0> begin[:] <ast.Raise object at 0x7da1b1174730>
keyword[def] identifier[get_config] (): literal[string] identifier[configpath] = identifier[get_configpath] () keyword[if] keyword[not] identifier[configpath] . identifier[exists] (): keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[str] ( identifier[configpath] ))) keyword[else] : identifier[config] = identifier[configparser] . identifier[ConfigParser] () identifier[config] . identifier[read] ( identifier[str] ( identifier[configpath] )) keyword[return] identifier[config]
def get_config(): """Read the configfile and return config dict. Returns ------- dict Dictionary with the content of the configpath file. """ configpath = get_configpath() if not configpath.exists(): raise IOError('Config file {} not found.'.format(str(configpath))) # depends on [control=['if'], data=[]] else: config = configparser.ConfigParser() config.read(str(configpath)) return config
def save(self, filename='saved.ol.p'): """ Save model to pickle file """ import dill as pickle sv = { # 'feature_function': self.feature_function, 'cl': self.cl } pickle.dump(sv, open(filename, "wb"))
def function[save, parameter[self, filename]]: constant[ Save model to pickle file ] import module[dill] as alias[pickle] variable[sv] assign[=] dictionary[[<ast.Constant object at 0x7da1b23710f0>], [<ast.Attribute object at 0x7da1b2373430>]] call[name[pickle].dump, parameter[name[sv], call[name[open], parameter[name[filename], constant[wb]]]]]
keyword[def] identifier[save] ( identifier[self] , identifier[filename] = literal[string] ): literal[string] keyword[import] identifier[dill] keyword[as] identifier[pickle] identifier[sv] ={ literal[string] : identifier[self] . identifier[cl] } identifier[pickle] . identifier[dump] ( identifier[sv] , identifier[open] ( identifier[filename] , literal[string] ))
def save(self, filename='saved.ol.p'): """ Save model to pickle file """ import dill as pickle # 'feature_function': self.feature_function, sv = {'cl': self.cl} pickle.dump(sv, open(filename, 'wb'))
def enable_support_autoupload(self, **kwargs): """Set Spanning Tree state. Args: enabled (bool): Is Autoupload enabled? (True, False). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... enabled = True ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ... enabled = False ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) """ enabled = kwargs.pop('enabled') callback = kwargs.pop('callback', self._callback) if not isinstance(enabled, bool): raise ValueError('%s must be `True` or `False`.' % repr(enabled)) state_args = dict() autoupload_state = getattr(self._ras, 'support_autoupload_enable') config = autoupload_state(**state_args) if not enabled: shutdown = config.find('.//*enable') shutdown.set('operation', 'delete') return callback(config)
def function[enable_support_autoupload, parameter[self]]: constant[Set Spanning Tree state. Args: enabled (bool): Is Autoupload enabled? (True, False). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... enabled = True ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ... enabled = False ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ] variable[enabled] assign[=] call[name[kwargs].pop, parameter[constant[enabled]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] if <ast.UnaryOp object at 0x7da18f721e10> begin[:] <ast.Raise object at 0x7da18f722860> variable[state_args] assign[=] call[name[dict], parameter[]] variable[autoupload_state] assign[=] call[name[getattr], parameter[name[self]._ras, constant[support_autoupload_enable]]] variable[config] assign[=] call[name[autoupload_state], parameter[]] if <ast.UnaryOp object at 0x7da20c76ee60> begin[:] variable[shutdown] assign[=] call[name[config].find, parameter[constant[.//*enable]]] call[name[shutdown].set, parameter[constant[operation], constant[delete]]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[enable_support_autoupload] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[enabled] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[enabled] , identifier[bool] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[repr] ( identifier[enabled] )) identifier[state_args] = identifier[dict] () identifier[autoupload_state] = identifier[getattr] ( identifier[self] . identifier[_ras] , literal[string] ) identifier[config] = identifier[autoupload_state] (** identifier[state_args] ) keyword[if] keyword[not] identifier[enabled] : identifier[shutdown] = identifier[config] . identifier[find] ( literal[string] ) identifier[shutdown] . identifier[set] ( literal[string] , literal[string] ) keyword[return] identifier[callback] ( identifier[config] )
def enable_support_autoupload(self, **kwargs): """Set Spanning Tree state. Args: enabled (bool): Is Autoupload enabled? (True, False). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... enabled = True ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ... enabled = False ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) """ enabled = kwargs.pop('enabled') callback = kwargs.pop('callback', self._callback) if not isinstance(enabled, bool): raise ValueError('%s must be `True` or `False`.' % repr(enabled)) # depends on [control=['if'], data=[]] state_args = dict() autoupload_state = getattr(self._ras, 'support_autoupload_enable') config = autoupload_state(**state_args) if not enabled: shutdown = config.find('.//*enable') shutdown.set('operation', 'delete') # depends on [control=['if'], data=[]] return callback(config)
def set_nest_transactions_with_savepoints(self, nest_transactions_with_savepoints): """Sets if nested transactions should use savepoints. :param nest_transactions_with_savepoints: `True` or `False` """ if self._transaction_nesting_level > 0: raise DBALConnectionError.may_not_alter_nested_transaction_with_savepoints_in_transaction() if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self._nest_transactions_with_savepoints = bool(nest_transactions_with_savepoints)
def function[set_nest_transactions_with_savepoints, parameter[self, nest_transactions_with_savepoints]]: constant[Sets if nested transactions should use savepoints. :param nest_transactions_with_savepoints: `True` or `False` ] if compare[name[self]._transaction_nesting_level greater[>] constant[0]] begin[:] <ast.Raise object at 0x7da1b23479d0> if <ast.UnaryOp object at 0x7da1b2347670> begin[:] <ast.Raise object at 0x7da1b23468f0> name[self]._nest_transactions_with_savepoints assign[=] call[name[bool], parameter[name[nest_transactions_with_savepoints]]]
keyword[def] identifier[set_nest_transactions_with_savepoints] ( identifier[self] , identifier[nest_transactions_with_savepoints] ): literal[string] keyword[if] identifier[self] . identifier[_transaction_nesting_level] > literal[int] : keyword[raise] identifier[DBALConnectionError] . identifier[may_not_alter_nested_transaction_with_savepoints_in_transaction] () keyword[if] keyword[not] identifier[self] . identifier[_platform] . identifier[is_savepoints_supported] (): keyword[raise] identifier[DBALConnectionError] . identifier[savepoints_not_supported] () identifier[self] . identifier[_nest_transactions_with_savepoints] = identifier[bool] ( identifier[nest_transactions_with_savepoints] )
def set_nest_transactions_with_savepoints(self, nest_transactions_with_savepoints): """Sets if nested transactions should use savepoints. :param nest_transactions_with_savepoints: `True` or `False` """ if self._transaction_nesting_level > 0: raise DBALConnectionError.may_not_alter_nested_transaction_with_savepoints_in_transaction() # depends on [control=['if'], data=[]] if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() # depends on [control=['if'], data=[]] self._nest_transactions_with_savepoints = bool(nest_transactions_with_savepoints)
def transpose(self, *axes): """ Transpose just the keys of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- axes : tuple New proposed axes. """ new = argpack(axes) old = range(self.ndim) istransposeable(new, old) if new == old: return self._barray def f(k): return tuple(k[i] for i in new) newrdd = self._barray._rdd.map(lambda kv: (f(kv[0]), kv[1])) newshape = tuple(self.shape[i] for i in new) + self._barray.values.shape return BoltArraySpark(newrdd, shape=newshape, ordered=False).__finalize__(self._barray)
def function[transpose, parameter[self]]: constant[ Transpose just the keys of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- axes : tuple New proposed axes. ] variable[new] assign[=] call[name[argpack], parameter[name[axes]]] variable[old] assign[=] call[name[range], parameter[name[self].ndim]] call[name[istransposeable], parameter[name[new], name[old]]] if compare[name[new] equal[==] name[old]] begin[:] return[name[self]._barray] def function[f, parameter[k]]: return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da2054a41c0>]]] variable[newrdd] assign[=] call[name[self]._barray._rdd.map, parameter[<ast.Lambda object at 0x7da20e954f70>]] variable[newshape] assign[=] binary_operation[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20e9540a0>]] + name[self]._barray.values.shape] return[call[call[name[BoltArraySpark], parameter[name[newrdd]]].__finalize__, parameter[name[self]._barray]]]
keyword[def] identifier[transpose] ( identifier[self] ,* identifier[axes] ): literal[string] identifier[new] = identifier[argpack] ( identifier[axes] ) identifier[old] = identifier[range] ( identifier[self] . identifier[ndim] ) identifier[istransposeable] ( identifier[new] , identifier[old] ) keyword[if] identifier[new] == identifier[old] : keyword[return] identifier[self] . identifier[_barray] keyword[def] identifier[f] ( identifier[k] ): keyword[return] identifier[tuple] ( identifier[k] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[new] ) identifier[newrdd] = identifier[self] . identifier[_barray] . identifier[_rdd] . identifier[map] ( keyword[lambda] identifier[kv] :( identifier[f] ( identifier[kv] [ literal[int] ]), identifier[kv] [ literal[int] ])) identifier[newshape] = identifier[tuple] ( identifier[self] . identifier[shape] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[new] )+ identifier[self] . identifier[_barray] . identifier[values] . identifier[shape] keyword[return] identifier[BoltArraySpark] ( identifier[newrdd] , identifier[shape] = identifier[newshape] , identifier[ordered] = keyword[False] ). identifier[__finalize__] ( identifier[self] . identifier[_barray] )
def transpose(self, *axes): """ Transpose just the keys of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- axes : tuple New proposed axes. """ new = argpack(axes) old = range(self.ndim) istransposeable(new, old) if new == old: return self._barray # depends on [control=['if'], data=[]] def f(k): return tuple((k[i] for i in new)) newrdd = self._barray._rdd.map(lambda kv: (f(kv[0]), kv[1])) newshape = tuple((self.shape[i] for i in new)) + self._barray.values.shape return BoltArraySpark(newrdd, shape=newshape, ordered=False).__finalize__(self._barray)
def _set_rstp(self, v, load=False): """ Setter method for rstp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/rstp (container) If this variable is read-only (config: false) in the source YANG file, then _set_rstp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rstp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=rstp.rstp, is_container='container', presence=False, yang_name="rstp", rest_name="rstp", parent=self, choice=(u'spanning-tree-mode', u'rstp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rstp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=rstp.rstp, is_container='container', presence=False, yang_name="rstp", rest_name="rstp", parent=self, choice=(u'spanning-tree-mode', u'rstp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True)""", }) self.__rstp = t if hasattr(self, '_set'): self._set()
def function[_set_rstp, parameter[self, v, load]]: constant[ Setter method for rstp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/rstp (container) If this variable is read-only (config: false) in the source YANG file, then _set_rstp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rstp() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da2041da620> name[self].__rstp assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_rstp] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[rstp] . identifier[rstp] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[choice] =( literal[string] , literal[string] ), identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[False] , identifier[extensions] = keyword[None] , identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__rstp] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_rstp(self, v, load=False): """ Setter method for rstp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/rstp (container) If this variable is read-only (config: false) in the source YANG file, then _set_rstp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rstp() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=rstp.rstp, is_container='container', presence=False, yang_name='rstp', rest_name='rstp', parent=self, choice=(u'spanning-tree-mode', u'rstp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'rstp must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=rstp.rstp, is_container=\'container\', presence=False, yang_name="rstp", rest_name="rstp", parent=self, choice=(u\'spanning-tree-mode\', u\'rstp\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace=\'urn:brocade.com:mgmt:brocade-xstp-ext\', defining_module=\'brocade-xstp-ext\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__rstp = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def find_all_hid_devices(): "Finds all HID devices connected to the system" # # From DDK documentation (finding and Opening HID collection): # After a user-mode application is loaded, it does the following sequence # of operations: # # * Calls HidD_GetHidGuid to obtain the system-defined GUID for HIDClass # devices. # # * Calls SetupDiGetClassDevs to obtain a handle to an opaque device # information set that describes the device interfaces supported by all # the HID collections currently installed in the system. The # application should specify DIGCF_PRESENT and DIGCF_INTERFACEDEVICE # in the Flags parameter passed to SetupDiGetClassDevs. # # * Calls SetupDiEnumDeviceInterfaces repeatedly to retrieve all the # available interface information. # # * Calls SetupDiGetDeviceInterfaceDetail to format interface information # for each collection as a SP_INTERFACE_DEVICE_DETAIL_DATA structure. # The device_path member of this structure contains the user-mode name # that the application uses with the Win32 function CreateFile to # obtain a file handle to a HID collection. # # get HID device class guid guid = winapi.GetHidGuid() # retrieve all the available interface information. results = [] required_size = DWORD() info_data = winapi.SP_DEVINFO_DATA() info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA) with winapi.DeviceInterfaceSetInfo(guid) as h_info: for interface_data in winapi.enum_device_interfaces(h_info, guid): device_path = winapi.get_device_path(h_info, interface_data, byref(info_data)) parent_device = c_ulong() #get parent instance id (so we can discriminate on port) if setup_api.CM_Get_Parent(byref(parent_device), info_data.dev_inst, 0) != 0: #CR_SUCCESS = 0 parent_device.value = 0 #null #get unique instance id string required_size.value = 0 winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), None, 0, byref(required_size) ) device_instance_id = create_unicode_buffer(required_size.value) if required_size.value > 0: winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), device_instance_id, required_size, byref(required_size) ) hid_device = HidDevice(device_path, parent_device.value, device_instance_id.value ) else: hid_device = HidDevice(device_path, parent_device.value ) # add device to results, if not protected if hid_device.vendor_id: results.append(hid_device) return results
def function[find_all_hid_devices, parameter[]]: constant[Finds all HID devices connected to the system] variable[guid] assign[=] call[name[winapi].GetHidGuid, parameter[]] variable[results] assign[=] list[[]] variable[required_size] assign[=] call[name[DWORD], parameter[]] variable[info_data] assign[=] call[name[winapi].SP_DEVINFO_DATA, parameter[]] name[info_data].cb_size assign[=] call[name[sizeof], parameter[name[winapi].SP_DEVINFO_DATA]] with call[name[winapi].DeviceInterfaceSetInfo, parameter[name[guid]]] begin[:] for taget[name[interface_data]] in starred[call[name[winapi].enum_device_interfaces, parameter[name[h_info], name[guid]]]] begin[:] variable[device_path] assign[=] call[name[winapi].get_device_path, parameter[name[h_info], name[interface_data], call[name[byref], parameter[name[info_data]]]]] variable[parent_device] assign[=] call[name[c_ulong], parameter[]] if compare[call[name[setup_api].CM_Get_Parent, parameter[call[name[byref], parameter[name[parent_device]]], name[info_data].dev_inst, constant[0]]] not_equal[!=] constant[0]] begin[:] name[parent_device].value assign[=] constant[0] name[required_size].value assign[=] constant[0] call[name[winapi].SetupDiGetDeviceInstanceId, parameter[name[h_info], call[name[byref], parameter[name[info_data]]], constant[None], constant[0], call[name[byref], parameter[name[required_size]]]]] variable[device_instance_id] assign[=] call[name[create_unicode_buffer], parameter[name[required_size].value]] if compare[name[required_size].value greater[>] constant[0]] begin[:] call[name[winapi].SetupDiGetDeviceInstanceId, parameter[name[h_info], call[name[byref], parameter[name[info_data]]], name[device_instance_id], name[required_size], call[name[byref], parameter[name[required_size]]]]] variable[hid_device] assign[=] call[name[HidDevice], parameter[name[device_path], name[parent_device].value, name[device_instance_id].value]] if name[hid_device].vendor_id begin[:] call[name[results].append, parameter[name[hid_device]]] return[name[results]]
keyword[def] identifier[find_all_hid_devices] (): literal[string] identifier[guid] = identifier[winapi] . identifier[GetHidGuid] () identifier[results] =[] identifier[required_size] = identifier[DWORD] () identifier[info_data] = identifier[winapi] . identifier[SP_DEVINFO_DATA] () identifier[info_data] . identifier[cb_size] = identifier[sizeof] ( identifier[winapi] . identifier[SP_DEVINFO_DATA] ) keyword[with] identifier[winapi] . identifier[DeviceInterfaceSetInfo] ( identifier[guid] ) keyword[as] identifier[h_info] : keyword[for] identifier[interface_data] keyword[in] identifier[winapi] . identifier[enum_device_interfaces] ( identifier[h_info] , identifier[guid] ): identifier[device_path] = identifier[winapi] . identifier[get_device_path] ( identifier[h_info] , identifier[interface_data] , identifier[byref] ( identifier[info_data] )) identifier[parent_device] = identifier[c_ulong] () keyword[if] identifier[setup_api] . identifier[CM_Get_Parent] ( identifier[byref] ( identifier[parent_device] ), identifier[info_data] . identifier[dev_inst] , literal[int] )!= literal[int] : identifier[parent_device] . identifier[value] = literal[int] identifier[required_size] . identifier[value] = literal[int] identifier[winapi] . identifier[SetupDiGetDeviceInstanceId] ( identifier[h_info] , identifier[byref] ( identifier[info_data] ), keyword[None] , literal[int] , identifier[byref] ( identifier[required_size] )) identifier[device_instance_id] = identifier[create_unicode_buffer] ( identifier[required_size] . identifier[value] ) keyword[if] identifier[required_size] . identifier[value] > literal[int] : identifier[winapi] . identifier[SetupDiGetDeviceInstanceId] ( identifier[h_info] , identifier[byref] ( identifier[info_data] ), identifier[device_instance_id] , identifier[required_size] , identifier[byref] ( identifier[required_size] )) identifier[hid_device] = identifier[HidDevice] ( identifier[device_path] , identifier[parent_device] . identifier[value] , identifier[device_instance_id] . identifier[value] ) keyword[else] : identifier[hid_device] = identifier[HidDevice] ( identifier[device_path] , identifier[parent_device] . identifier[value] ) keyword[if] identifier[hid_device] . identifier[vendor_id] : identifier[results] . identifier[append] ( identifier[hid_device] ) keyword[return] identifier[results]
def find_all_hid_devices(): """Finds all HID devices connected to the system""" # # From DDK documentation (finding and Opening HID collection): # After a user-mode application is loaded, it does the following sequence # of operations: # # * Calls HidD_GetHidGuid to obtain the system-defined GUID for HIDClass # devices. # # * Calls SetupDiGetClassDevs to obtain a handle to an opaque device # information set that describes the device interfaces supported by all # the HID collections currently installed in the system. The # application should specify DIGCF_PRESENT and DIGCF_INTERFACEDEVICE # in the Flags parameter passed to SetupDiGetClassDevs. # # * Calls SetupDiEnumDeviceInterfaces repeatedly to retrieve all the # available interface information. # # * Calls SetupDiGetDeviceInterfaceDetail to format interface information # for each collection as a SP_INTERFACE_DEVICE_DETAIL_DATA structure. # The device_path member of this structure contains the user-mode name # that the application uses with the Win32 function CreateFile to # obtain a file handle to a HID collection. # # get HID device class guid guid = winapi.GetHidGuid() # retrieve all the available interface information. results = [] required_size = DWORD() info_data = winapi.SP_DEVINFO_DATA() info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA) with winapi.DeviceInterfaceSetInfo(guid) as h_info: for interface_data in winapi.enum_device_interfaces(h_info, guid): device_path = winapi.get_device_path(h_info, interface_data, byref(info_data)) parent_device = c_ulong() #get parent instance id (so we can discriminate on port) if setup_api.CM_Get_Parent(byref(parent_device), info_data.dev_inst, 0) != 0: #CR_SUCCESS = 0 parent_device.value = 0 #null # depends on [control=['if'], data=[]] #get unique instance id string required_size.value = 0 winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), None, 0, byref(required_size)) device_instance_id = create_unicode_buffer(required_size.value) if required_size.value > 0: winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), device_instance_id, required_size, byref(required_size)) hid_device = HidDevice(device_path, parent_device.value, device_instance_id.value) # depends on [control=['if'], data=[]] else: hid_device = HidDevice(device_path, parent_device.value) # add device to results, if not protected if hid_device.vendor_id: results.append(hid_device) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['interface_data']] # depends on [control=['with'], data=['h_info']] return results
def unmarshall_key(self, key): """ Unmarshalls a Crash key read from the database. @type key: str or buffer @param key: Key to convert. @rtype: L{Crash} key. @return: Converted key. """ key = str(key) if self.escapeKeys: key = key.decode('hex') if self.compressKeys: key = zlib.decompress(key) key = pickle.loads(key) return key
def function[unmarshall_key, parameter[self, key]]: constant[ Unmarshalls a Crash key read from the database. @type key: str or buffer @param key: Key to convert. @rtype: L{Crash} key. @return: Converted key. ] variable[key] assign[=] call[name[str], parameter[name[key]]] if name[self].escapeKeys begin[:] variable[key] assign[=] call[name[key].decode, parameter[constant[hex]]] if name[self].compressKeys begin[:] variable[key] assign[=] call[name[zlib].decompress, parameter[name[key]]] variable[key] assign[=] call[name[pickle].loads, parameter[name[key]]] return[name[key]]
keyword[def] identifier[unmarshall_key] ( identifier[self] , identifier[key] ): literal[string] identifier[key] = identifier[str] ( identifier[key] ) keyword[if] identifier[self] . identifier[escapeKeys] : identifier[key] = identifier[key] . identifier[decode] ( literal[string] ) keyword[if] identifier[self] . identifier[compressKeys] : identifier[key] = identifier[zlib] . identifier[decompress] ( identifier[key] ) identifier[key] = identifier[pickle] . identifier[loads] ( identifier[key] ) keyword[return] identifier[key]
def unmarshall_key(self, key): """ Unmarshalls a Crash key read from the database. @type key: str or buffer @param key: Key to convert. @rtype: L{Crash} key. @return: Converted key. """ key = str(key) if self.escapeKeys: key = key.decode('hex') # depends on [control=['if'], data=[]] if self.compressKeys: key = zlib.decompress(key) # depends on [control=['if'], data=[]] key = pickle.loads(key) return key
def update_key_bundle(key_bundle, diff): """ Apply a diff specification to a KeyBundle. The keys that are to be added are added. The keys that should be deleted are marked as inactive. :param key_bundle: The original KeyBundle :param diff: The difference specification :return: An updated key_bundle """ try: _add = diff['add'] except KeyError: pass else: key_bundle.extend(_add) try: _del = diff['del'] except KeyError: pass else: _now = time.time() for k in _del: k.inactive_since = _now
def function[update_key_bundle, parameter[key_bundle, diff]]: constant[ Apply a diff specification to a KeyBundle. The keys that are to be added are added. The keys that should be deleted are marked as inactive. :param key_bundle: The original KeyBundle :param diff: The difference specification :return: An updated key_bundle ] <ast.Try object at 0x7da20c6ab370> <ast.Try object at 0x7da20c6aa020>
keyword[def] identifier[update_key_bundle] ( identifier[key_bundle] , identifier[diff] ): literal[string] keyword[try] : identifier[_add] = identifier[diff] [ literal[string] ] keyword[except] identifier[KeyError] : keyword[pass] keyword[else] : identifier[key_bundle] . identifier[extend] ( identifier[_add] ) keyword[try] : identifier[_del] = identifier[diff] [ literal[string] ] keyword[except] identifier[KeyError] : keyword[pass] keyword[else] : identifier[_now] = identifier[time] . identifier[time] () keyword[for] identifier[k] keyword[in] identifier[_del] : identifier[k] . identifier[inactive_since] = identifier[_now]
def update_key_bundle(key_bundle, diff): """ Apply a diff specification to a KeyBundle. The keys that are to be added are added. The keys that should be deleted are marked as inactive. :param key_bundle: The original KeyBundle :param diff: The difference specification :return: An updated key_bundle """ try: _add = diff['add'] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] else: key_bundle.extend(_add) try: _del = diff['del'] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] else: _now = time.time() for k in _del: k.inactive_since = _now # depends on [control=['for'], data=['k']]
def resolve_relation_type_config(value): """Resolve the relation type to config object. Resolve relation type from string (e.g.: serialization) or int (db value) to the full config object. """ relation_types = current_app.config['PIDRELATIONS_RELATION_TYPES'] if isinstance(value, six.string_types): try: obj = next(rt for rt in relation_types if rt.name == value) except StopIteration: raise ValueError("Relation name '{0}' is not configured.".format( value)) elif isinstance(value, int): try: obj = next(rt for rt in relation_types if rt.id == value) except StopIteration: raise ValueError("Relation ID {0} is not configured.".format( value)) else: raise ValueError("Type of value '{0}' is not supported for resolving.". format(value)) api_class = obj_or_import_string(obj.api) schema_class = obj_or_import_string(obj.schema) return obj.__class__(obj.id, obj.name, obj.label, api_class, schema_class)
def function[resolve_relation_type_config, parameter[value]]: constant[Resolve the relation type to config object. Resolve relation type from string (e.g.: serialization) or int (db value) to the full config object. ] variable[relation_types] assign[=] call[name[current_app].config][constant[PIDRELATIONS_RELATION_TYPES]] if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:] <ast.Try object at 0x7da1b24d7910> variable[api_class] assign[=] call[name[obj_or_import_string], parameter[name[obj].api]] variable[schema_class] assign[=] call[name[obj_or_import_string], parameter[name[obj].schema]] return[call[name[obj].__class__, parameter[name[obj].id, name[obj].name, name[obj].label, name[api_class], name[schema_class]]]]
keyword[def] identifier[resolve_relation_type_config] ( identifier[value] ): literal[string] identifier[relation_types] = identifier[current_app] . identifier[config] [ literal[string] ] keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ): keyword[try] : identifier[obj] = identifier[next] ( identifier[rt] keyword[for] identifier[rt] keyword[in] identifier[relation_types] keyword[if] identifier[rt] . identifier[name] == identifier[value] ) keyword[except] identifier[StopIteration] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[value] )) keyword[elif] identifier[isinstance] ( identifier[value] , identifier[int] ): keyword[try] : identifier[obj] = identifier[next] ( identifier[rt] keyword[for] identifier[rt] keyword[in] identifier[relation_types] keyword[if] identifier[rt] . identifier[id] == identifier[value] ) keyword[except] identifier[StopIteration] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[value] )) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[value] )) identifier[api_class] = identifier[obj_or_import_string] ( identifier[obj] . identifier[api] ) identifier[schema_class] = identifier[obj_or_import_string] ( identifier[obj] . identifier[schema] ) keyword[return] identifier[obj] . identifier[__class__] ( identifier[obj] . identifier[id] , identifier[obj] . identifier[name] , identifier[obj] . identifier[label] , identifier[api_class] , identifier[schema_class] )
def resolve_relation_type_config(value): """Resolve the relation type to config object. Resolve relation type from string (e.g.: serialization) or int (db value) to the full config object. """ relation_types = current_app.config['PIDRELATIONS_RELATION_TYPES'] if isinstance(value, six.string_types): try: obj = next((rt for rt in relation_types if rt.name == value)) # depends on [control=['try'], data=[]] except StopIteration: raise ValueError("Relation name '{0}' is not configured.".format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif isinstance(value, int): try: obj = next((rt for rt in relation_types if rt.id == value)) # depends on [control=['try'], data=[]] except StopIteration: raise ValueError('Relation ID {0} is not configured.'.format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: raise ValueError("Type of value '{0}' is not supported for resolving.".format(value)) api_class = obj_or_import_string(obj.api) schema_class = obj_or_import_string(obj.schema) return obj.__class__(obj.id, obj.name, obj.label, api_class, schema_class)
def main( lang='deu', n=900, epochs=50, batch_size=64, num_neurons=256, encoder_input_data=None, decoder_input_data=None, decoder_target_data=None, checkpoint_dir=os.path.join(BIGDATA_PATH, 'checkpoints'), ): """ Train an LSTM encoder-decoder squence-to-sequence model on Anki flashcards for international translation >>> model = main('spa', n=400, epochs=3, batch_size=128, num_neurons=32) Train on 360 samples, validate on 40 samples Epoch 1/3 ... >>> len(model.get_weights()) 8 # 64 common characters in German, 56 in English >>> model.get_weights()[-1].shape[0] >=50 True >>> model.get_weights()[-2].shape[0] 32 """ mkdir_p(checkpoint_dir) encoder_input_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-input-{}.npy'.format(lang)) decoder_input_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-decoder-input-{}.npy'.format(lang)) decoder_target_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-target-{}.npy'.format('eng')) data_paths = (encoder_input_path, decoder_input_path, decoder_target_path) encoder_input_data = [] if all([os.path.isfile(p) for p in data_paths]): encoder_input_data = np.load(encoder_input_path) decoder_input_data = np.load(decoder_input_path) decoder_target_data = np.load(decoder_target_path) if len(encoder_input_data) < n: encoder_input_data, decoder_input_data, decoder_target_data = onehot_char_training_data( lang=lang, n=n, data_paths=data_paths) encoder_input_data = encoder_input_data[:n] decoder_input_data = decoder_input_data[:n] decoder_target_data = decoder_target_data[:n] model = fit(data_paths=data_paths, epochs=epochs, batch_size=batch_size, num_neurons=num_neurons) return model
def function[main, parameter[lang, n, epochs, batch_size, num_neurons, encoder_input_data, decoder_input_data, decoder_target_data, checkpoint_dir]]: constant[ Train an LSTM encoder-decoder squence-to-sequence model on Anki flashcards for international translation >>> model = main('spa', n=400, epochs=3, batch_size=128, num_neurons=32) Train on 360 samples, validate on 40 samples Epoch 1/3 ... >>> len(model.get_weights()) 8 # 64 common characters in German, 56 in English >>> model.get_weights()[-1].shape[0] >=50 True >>> model.get_weights()[-2].shape[0] 32 ] call[name[mkdir_p], parameter[name[checkpoint_dir]]] variable[encoder_input_path] assign[=] call[name[os].path.join, parameter[name[checkpoint_dir], call[constant[nlpia-ch10-translate-input-{}.npy].format, parameter[name[lang]]]]] variable[decoder_input_path] assign[=] call[name[os].path.join, parameter[name[checkpoint_dir], call[constant[nlpia-ch10-translate-decoder-input-{}.npy].format, parameter[name[lang]]]]] variable[decoder_target_path] assign[=] call[name[os].path.join, parameter[name[checkpoint_dir], call[constant[nlpia-ch10-translate-target-{}.npy].format, parameter[constant[eng]]]]] variable[data_paths] assign[=] tuple[[<ast.Name object at 0x7da20c991480>, <ast.Name object at 0x7da20c993370>, <ast.Name object at 0x7da20c990550>]] variable[encoder_input_data] assign[=] list[[]] if call[name[all], parameter[<ast.ListComp object at 0x7da20c9927d0>]] begin[:] variable[encoder_input_data] assign[=] call[name[np].load, parameter[name[encoder_input_path]]] variable[decoder_input_data] assign[=] call[name[np].load, parameter[name[decoder_input_path]]] variable[decoder_target_data] assign[=] call[name[np].load, parameter[name[decoder_target_path]]] if compare[call[name[len], parameter[name[encoder_input_data]]] less[<] name[n]] begin[:] <ast.Tuple object at 0x7da20c990160> assign[=] call[name[onehot_char_training_data], parameter[]] variable[encoder_input_data] assign[=] call[name[encoder_input_data]][<ast.Slice object at 0x7da18c4cfa60>] variable[decoder_input_data] assign[=] call[name[decoder_input_data]][<ast.Slice object at 0x7da18c4cfee0>] variable[decoder_target_data] assign[=] call[name[decoder_target_data]][<ast.Slice object at 0x7da18c4cee30>] variable[model] assign[=] call[name[fit], parameter[]] return[name[model]]
keyword[def] identifier[main] ( identifier[lang] = literal[string] , identifier[n] = literal[int] , identifier[epochs] = literal[int] , identifier[batch_size] = literal[int] , identifier[num_neurons] = literal[int] , identifier[encoder_input_data] = keyword[None] , identifier[decoder_input_data] = keyword[None] , identifier[decoder_target_data] = keyword[None] , identifier[checkpoint_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[BIGDATA_PATH] , literal[string] ), ): literal[string] identifier[mkdir_p] ( identifier[checkpoint_dir] ) identifier[encoder_input_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[checkpoint_dir] , literal[string] . identifier[format] ( identifier[lang] )) identifier[decoder_input_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[checkpoint_dir] , literal[string] . identifier[format] ( identifier[lang] )) identifier[decoder_target_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[checkpoint_dir] , literal[string] . identifier[format] ( literal[string] )) identifier[data_paths] =( identifier[encoder_input_path] , identifier[decoder_input_path] , identifier[decoder_target_path] ) identifier[encoder_input_data] =[] keyword[if] identifier[all] ([ identifier[os] . identifier[path] . identifier[isfile] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[data_paths] ]): identifier[encoder_input_data] = identifier[np] . identifier[load] ( identifier[encoder_input_path] ) identifier[decoder_input_data] = identifier[np] . identifier[load] ( identifier[decoder_input_path] ) identifier[decoder_target_data] = identifier[np] . identifier[load] ( identifier[decoder_target_path] ) keyword[if] identifier[len] ( identifier[encoder_input_data] )< identifier[n] : identifier[encoder_input_data] , identifier[decoder_input_data] , identifier[decoder_target_data] = identifier[onehot_char_training_data] ( identifier[lang] = identifier[lang] , identifier[n] = identifier[n] , identifier[data_paths] = identifier[data_paths] ) identifier[encoder_input_data] = identifier[encoder_input_data] [: identifier[n] ] identifier[decoder_input_data] = identifier[decoder_input_data] [: identifier[n] ] identifier[decoder_target_data] = identifier[decoder_target_data] [: identifier[n] ] identifier[model] = identifier[fit] ( identifier[data_paths] = identifier[data_paths] , identifier[epochs] = identifier[epochs] , identifier[batch_size] = identifier[batch_size] , identifier[num_neurons] = identifier[num_neurons] ) keyword[return] identifier[model]
def main(lang='deu', n=900, epochs=50, batch_size=64, num_neurons=256, encoder_input_data=None, decoder_input_data=None, decoder_target_data=None, checkpoint_dir=os.path.join(BIGDATA_PATH, 'checkpoints')): """ Train an LSTM encoder-decoder squence-to-sequence model on Anki flashcards for international translation >>> model = main('spa', n=400, epochs=3, batch_size=128, num_neurons=32) Train on 360 samples, validate on 40 samples Epoch 1/3 ... >>> len(model.get_weights()) 8 # 64 common characters in German, 56 in English >>> model.get_weights()[-1].shape[0] >=50 True >>> model.get_weights()[-2].shape[0] 32 """ mkdir_p(checkpoint_dir) encoder_input_path = os.path.join(checkpoint_dir, 'nlpia-ch10-translate-input-{}.npy'.format(lang)) decoder_input_path = os.path.join(checkpoint_dir, 'nlpia-ch10-translate-decoder-input-{}.npy'.format(lang)) decoder_target_path = os.path.join(checkpoint_dir, 'nlpia-ch10-translate-target-{}.npy'.format('eng')) data_paths = (encoder_input_path, decoder_input_path, decoder_target_path) encoder_input_data = [] if all([os.path.isfile(p) for p in data_paths]): encoder_input_data = np.load(encoder_input_path) decoder_input_data = np.load(decoder_input_path) decoder_target_data = np.load(decoder_target_path) # depends on [control=['if'], data=[]] if len(encoder_input_data) < n: (encoder_input_data, decoder_input_data, decoder_target_data) = onehot_char_training_data(lang=lang, n=n, data_paths=data_paths) # depends on [control=['if'], data=['n']] encoder_input_data = encoder_input_data[:n] decoder_input_data = decoder_input_data[:n] decoder_target_data = decoder_target_data[:n] model = fit(data_paths=data_paths, epochs=epochs, batch_size=batch_size, num_neurons=num_neurons) return model
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes): """ Return the entries for this commit, the entries of the parent commits, and the difference between the two (current_files - parent_files) """ if prefix and prefixes and prefix not in prefixes: return empty, empty parent_files = set() for oid in parent_oids: parent_files.update(self.entries_in_tree_oid(prefix, oid)) current_files = self.entries_in_tree_oid(prefix, current_oid) return (current_files, parent_files), (current_files - parent_files)
def function[tree_structures_for, parameter[self, prefix, current_oid, parent_oids, prefixes]]: constant[ Return the entries for this commit, the entries of the parent commits, and the difference between the two (current_files - parent_files) ] if <ast.BoolOp object at 0x7da1b15f4be0> begin[:] return[tuple[[<ast.Name object at 0x7da18bc729b0>, <ast.Name object at 0x7da18bc72b60>]]] variable[parent_files] assign[=] call[name[set], parameter[]] for taget[name[oid]] in starred[name[parent_oids]] begin[:] call[name[parent_files].update, parameter[call[name[self].entries_in_tree_oid, parameter[name[prefix], name[oid]]]]] variable[current_files] assign[=] call[name[self].entries_in_tree_oid, parameter[name[prefix], name[current_oid]]] return[tuple[[<ast.Tuple object at 0x7da18bc71870>, <ast.BinOp object at 0x7da18bc724d0>]]]
keyword[def] identifier[tree_structures_for] ( identifier[self] , identifier[prefix] , identifier[current_oid] , identifier[parent_oids] , identifier[prefixes] ): literal[string] keyword[if] identifier[prefix] keyword[and] identifier[prefixes] keyword[and] identifier[prefix] keyword[not] keyword[in] identifier[prefixes] : keyword[return] identifier[empty] , identifier[empty] identifier[parent_files] = identifier[set] () keyword[for] identifier[oid] keyword[in] identifier[parent_oids] : identifier[parent_files] . identifier[update] ( identifier[self] . identifier[entries_in_tree_oid] ( identifier[prefix] , identifier[oid] )) identifier[current_files] = identifier[self] . identifier[entries_in_tree_oid] ( identifier[prefix] , identifier[current_oid] ) keyword[return] ( identifier[current_files] , identifier[parent_files] ),( identifier[current_files] - identifier[parent_files] )
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes): """ Return the entries for this commit, the entries of the parent commits, and the difference between the two (current_files - parent_files) """ if prefix and prefixes and (prefix not in prefixes): return (empty, empty) # depends on [control=['if'], data=[]] parent_files = set() for oid in parent_oids: parent_files.update(self.entries_in_tree_oid(prefix, oid)) # depends on [control=['for'], data=['oid']] current_files = self.entries_in_tree_oid(prefix, current_oid) return ((current_files, parent_files), current_files - parent_files)
def ordering(self, type): """ Get the attribute ordering defined in the specified XSD type information. @param type: An XSD type object. @type type: SchemaObject @return: An ordered list of attribute names. @rtype: list """ result = [] for child, ancestry in type.resolve(): name = child.name if child.name is None: continue if child.isattr(): name = '_%s' % child.name result.append(name) return result
def function[ordering, parameter[self, type]]: constant[ Get the attribute ordering defined in the specified XSD type information. @param type: An XSD type object. @type type: SchemaObject @return: An ordered list of attribute names. @rtype: list ] variable[result] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b088c760>, <ast.Name object at 0x7da1b088d420>]]] in starred[call[name[type].resolve, parameter[]]] begin[:] variable[name] assign[=] name[child].name if compare[name[child].name is constant[None]] begin[:] continue if call[name[child].isattr, parameter[]] begin[:] variable[name] assign[=] binary_operation[constant[_%s] <ast.Mod object at 0x7da2590d6920> name[child].name] call[name[result].append, parameter[name[name]]] return[name[result]]
keyword[def] identifier[ordering] ( identifier[self] , identifier[type] ): literal[string] identifier[result] =[] keyword[for] identifier[child] , identifier[ancestry] keyword[in] identifier[type] . identifier[resolve] (): identifier[name] = identifier[child] . identifier[name] keyword[if] identifier[child] . identifier[name] keyword[is] keyword[None] : keyword[continue] keyword[if] identifier[child] . identifier[isattr] (): identifier[name] = literal[string] % identifier[child] . identifier[name] identifier[result] . identifier[append] ( identifier[name] ) keyword[return] identifier[result]
def ordering(self, type): """ Get the attribute ordering defined in the specified XSD type information. @param type: An XSD type object. @type type: SchemaObject @return: An ordered list of attribute names. @rtype: list """ result = [] for (child, ancestry) in type.resolve(): name = child.name if child.name is None: continue # depends on [control=['if'], data=[]] if child.isattr(): name = '_%s' % child.name # depends on [control=['if'], data=[]] result.append(name) # depends on [control=['for'], data=[]] return result
def collection(name, child_names=None, child_source=None, child_view=None, child_model=None, child_label=None, child_desc=None, child_meta=None, label=None, desc=None, meta=None, model_meta=None): """ Annotate a dynamic collection of sub-models. @param name: name of the collection model containing the sub-models @type name: str or unicode @param child_names: an effect that retrieve all sub-models names or None if sub-models are not iterable. @type child_names: callable @param child_source: an effect that retrieve a sub-model source. @type child_source: callable @param child_view: an effect that retrieve a sub-model view. @type child_view: callable @param child_model: the model identity, model factory or effect to get it, or None to use IModel adapter. @type child_model: str or unicode or callable or IModelFactory or None @param child_label: the model items label or None. @type child_label: str or unicode or None @param child_desc: the model items description or None. @type child_desc: str or unicode or None @param child_meta: collection model's items metadata. @type child_meta: list of tuple @param label: the collection label or None. @type label: str or unicode or None @param desc: the collection description or None. @type desc: str or unicode or None @param meta: item metadata. @type meta: list of tuple @param model_meta: collection model metadata. @type model_meta: list of tuple """ _annotate("collection", name, child_names=child_names, child_source=child_source, child_view=child_view, child_model=child_model, child_label=child_label, child_desc=child_desc, child_meta=child_meta, label=label, desc=desc, meta=meta, model_meta=model_meta)
def function[collection, parameter[name, child_names, child_source, child_view, child_model, child_label, child_desc, child_meta, label, desc, meta, model_meta]]: constant[ Annotate a dynamic collection of sub-models. @param name: name of the collection model containing the sub-models @type name: str or unicode @param child_names: an effect that retrieve all sub-models names or None if sub-models are not iterable. @type child_names: callable @param child_source: an effect that retrieve a sub-model source. @type child_source: callable @param child_view: an effect that retrieve a sub-model view. @type child_view: callable @param child_model: the model identity, model factory or effect to get it, or None to use IModel adapter. @type child_model: str or unicode or callable or IModelFactory or None @param child_label: the model items label or None. @type child_label: str or unicode or None @param child_desc: the model items description or None. @type child_desc: str or unicode or None @param child_meta: collection model's items metadata. @type child_meta: list of tuple @param label: the collection label or None. @type label: str or unicode or None @param desc: the collection description or None. @type desc: str or unicode or None @param meta: item metadata. @type meta: list of tuple @param model_meta: collection model metadata. @type model_meta: list of tuple ] call[name[_annotate], parameter[constant[collection], name[name]]]
keyword[def] identifier[collection] ( identifier[name] , identifier[child_names] = keyword[None] , identifier[child_source] = keyword[None] , identifier[child_view] = keyword[None] , identifier[child_model] = keyword[None] , identifier[child_label] = keyword[None] , identifier[child_desc] = keyword[None] , identifier[child_meta] = keyword[None] , identifier[label] = keyword[None] , identifier[desc] = keyword[None] , identifier[meta] = keyword[None] , identifier[model_meta] = keyword[None] ): literal[string] identifier[_annotate] ( literal[string] , identifier[name] , identifier[child_names] = identifier[child_names] , identifier[child_source] = identifier[child_source] , identifier[child_view] = identifier[child_view] , identifier[child_model] = identifier[child_model] , identifier[child_label] = identifier[child_label] , identifier[child_desc] = identifier[child_desc] , identifier[child_meta] = identifier[child_meta] , identifier[label] = identifier[label] , identifier[desc] = identifier[desc] , identifier[meta] = identifier[meta] , identifier[model_meta] = identifier[model_meta] )
def collection(name, child_names=None, child_source=None, child_view=None, child_model=None, child_label=None, child_desc=None, child_meta=None, label=None, desc=None, meta=None, model_meta=None): """ Annotate a dynamic collection of sub-models. @param name: name of the collection model containing the sub-models @type name: str or unicode @param child_names: an effect that retrieve all sub-models names or None if sub-models are not iterable. @type child_names: callable @param child_source: an effect that retrieve a sub-model source. @type child_source: callable @param child_view: an effect that retrieve a sub-model view. @type child_view: callable @param child_model: the model identity, model factory or effect to get it, or None to use IModel adapter. @type child_model: str or unicode or callable or IModelFactory or None @param child_label: the model items label or None. @type child_label: str or unicode or None @param child_desc: the model items description or None. @type child_desc: str or unicode or None @param child_meta: collection model's items metadata. @type child_meta: list of tuple @param label: the collection label or None. @type label: str or unicode or None @param desc: the collection description or None. @type desc: str or unicode or None @param meta: item metadata. @type meta: list of tuple @param model_meta: collection model metadata. @type model_meta: list of tuple """ _annotate('collection', name, child_names=child_names, child_source=child_source, child_view=child_view, child_model=child_model, child_label=child_label, child_desc=child_desc, child_meta=child_meta, label=label, desc=desc, meta=meta, model_meta=model_meta)
def calculate_dependencies(): """Calculate test dependencies First do a topological sorting based on the dependencies. Then sort the different dependency groups based on priorities. """ order = [] for g in toposort(merge_dicts(dependencies, soft_dependencies)): for t in sorted(g, key=lambda x: (priorities[x], x)): order.append(t) return order
def function[calculate_dependencies, parameter[]]: constant[Calculate test dependencies First do a topological sorting based on the dependencies. Then sort the different dependency groups based on priorities. ] variable[order] assign[=] list[[]] for taget[name[g]] in starred[call[name[toposort], parameter[call[name[merge_dicts], parameter[name[dependencies], name[soft_dependencies]]]]]] begin[:] for taget[name[t]] in starred[call[name[sorted], parameter[name[g]]]] begin[:] call[name[order].append, parameter[name[t]]] return[name[order]]
keyword[def] identifier[calculate_dependencies] (): literal[string] identifier[order] =[] keyword[for] identifier[g] keyword[in] identifier[toposort] ( identifier[merge_dicts] ( identifier[dependencies] , identifier[soft_dependencies] )): keyword[for] identifier[t] keyword[in] identifier[sorted] ( identifier[g] , identifier[key] = keyword[lambda] identifier[x] :( identifier[priorities] [ identifier[x] ], identifier[x] )): identifier[order] . identifier[append] ( identifier[t] ) keyword[return] identifier[order]
def calculate_dependencies(): """Calculate test dependencies First do a topological sorting based on the dependencies. Then sort the different dependency groups based on priorities. """ order = [] for g in toposort(merge_dicts(dependencies, soft_dependencies)): for t in sorted(g, key=lambda x: (priorities[x], x)): order.append(t) # depends on [control=['for'], data=['t']] # depends on [control=['for'], data=['g']] return order
def get_block_height(block_hash, coin_symbol='btc', api_key=None): ''' Takes a block_hash and returns the block_height ''' return get_block_overview(block_representation=block_hash, coin_symbol=coin_symbol, txn_limit=1, api_key=api_key)['height']
def function[get_block_height, parameter[block_hash, coin_symbol, api_key]]: constant[ Takes a block_hash and returns the block_height ] return[call[call[name[get_block_overview], parameter[]]][constant[height]]]
keyword[def] identifier[get_block_height] ( identifier[block_hash] , identifier[coin_symbol] = literal[string] , identifier[api_key] = keyword[None] ): literal[string] keyword[return] identifier[get_block_overview] ( identifier[block_representation] = identifier[block_hash] , identifier[coin_symbol] = identifier[coin_symbol] , identifier[txn_limit] = literal[int] , identifier[api_key] = identifier[api_key] )[ literal[string] ]
def get_block_height(block_hash, coin_symbol='btc', api_key=None): """ Takes a block_hash and returns the block_height """ return get_block_overview(block_representation=block_hash, coin_symbol=coin_symbol, txn_limit=1, api_key=api_key)['height']
def set(self, value): """Sets value""" self._value = self._setter(self._node, value, self._value) if self._setter else value
def function[set, parameter[self, value]]: constant[Sets value] name[self]._value assign[=] <ast.IfExp object at 0x7da18f722200>
keyword[def] identifier[set] ( identifier[self] , identifier[value] ): literal[string] identifier[self] . identifier[_value] = identifier[self] . identifier[_setter] ( identifier[self] . identifier[_node] , identifier[value] , identifier[self] . identifier[_value] ) keyword[if] identifier[self] . identifier[_setter] keyword[else] identifier[value]
def set(self, value): """Sets value""" self._value = self._setter(self._node, value, self._value) if self._setter else value
def find_uuid(self, obj, column_name): """ Find uuid. Parameters ---------- obj : ??? the object returned by the MDAL Query column_name : str input point returned from MDAL Query Returns ------- str the uuid that correlates with the data """ keys = obj.context.keys() for i in keys: if column_name in obj.context[i]['?point']: uuid = i return i
def function[find_uuid, parameter[self, obj, column_name]]: constant[ Find uuid. Parameters ---------- obj : ??? the object returned by the MDAL Query column_name : str input point returned from MDAL Query Returns ------- str the uuid that correlates with the data ] variable[keys] assign[=] call[name[obj].context.keys, parameter[]] for taget[name[i]] in starred[name[keys]] begin[:] if compare[name[column_name] in call[call[name[obj].context][name[i]]][constant[?point]]] begin[:] variable[uuid] assign[=] name[i] return[name[i]]
keyword[def] identifier[find_uuid] ( identifier[self] , identifier[obj] , identifier[column_name] ): literal[string] identifier[keys] = identifier[obj] . identifier[context] . identifier[keys] () keyword[for] identifier[i] keyword[in] identifier[keys] : keyword[if] identifier[column_name] keyword[in] identifier[obj] . identifier[context] [ identifier[i] ][ literal[string] ]: identifier[uuid] = identifier[i] keyword[return] identifier[i]
def find_uuid(self, obj, column_name): """ Find uuid. Parameters ---------- obj : ??? the object returned by the MDAL Query column_name : str input point returned from MDAL Query Returns ------- str the uuid that correlates with the data """ keys = obj.context.keys() for i in keys: if column_name in obj.context[i]['?point']: uuid = i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return i
def get_thumbnail_name(self, thumbnail_options, transparent=False, high_resolution=False): """ A version of ``Thumbnailer.get_thumbnail_name`` that produces a reproducible thumbnail name that can be converted back to the original filename. """ path, source_filename = os.path.split(self.name) source_extension = os.path.splitext(source_filename)[1][1:] if self.thumbnail_preserve_extensions is True or \ (self.thumbnail_preserve_extensions and source_extension.lower() in self.thumbnail_preserve_extensions): extension = source_extension elif transparent: extension = self.thumbnail_transparency_extension else: extension = self.thumbnail_extension extension = extension or 'jpg' thumbnail_options = thumbnail_options.copy() size = tuple(thumbnail_options.pop('size')) quality = thumbnail_options.pop('quality', self.thumbnail_quality) initial_opts = ['%sx%s' % size, 'q%s' % quality] opts = list(thumbnail_options.items()) opts.sort() # Sort the options so the file name is consistent. opts = ['%s' % (v is not True and '%s-%s' % (k, v) or k) for k, v in opts if v] all_opts = '_'.join(initial_opts + opts) basedir = self.thumbnail_basedir subdir = self.thumbnail_subdir # make sure our magic delimiter is not used in all_opts all_opts = all_opts.replace('__', '_') if high_resolution: try: all_opts += self.thumbnail_highres_infix except AttributeError: all_opts += '@2x' filename = '%s__%s.%s' % (source_filename, all_opts, extension) return os.path.join(basedir, path, subdir, filename)
def function[get_thumbnail_name, parameter[self, thumbnail_options, transparent, high_resolution]]: constant[ A version of ``Thumbnailer.get_thumbnail_name`` that produces a reproducible thumbnail name that can be converted back to the original filename. ] <ast.Tuple object at 0x7da20c9930a0> assign[=] call[name[os].path.split, parameter[name[self].name]] variable[source_extension] assign[=] call[call[call[name[os].path.splitext, parameter[name[source_filename]]]][constant[1]]][<ast.Slice object at 0x7da20c992e30>] if <ast.BoolOp object at 0x7da20c993100> begin[:] variable[extension] assign[=] name[source_extension] variable[extension] assign[=] <ast.BoolOp object at 0x7da20c990fa0> variable[thumbnail_options] assign[=] call[name[thumbnail_options].copy, parameter[]] variable[size] assign[=] call[name[tuple], parameter[call[name[thumbnail_options].pop, parameter[constant[size]]]]] variable[quality] assign[=] call[name[thumbnail_options].pop, parameter[constant[quality], name[self].thumbnail_quality]] variable[initial_opts] assign[=] list[[<ast.BinOp object at 0x7da20c991d20>, <ast.BinOp object at 0x7da20c991960>]] variable[opts] assign[=] call[name[list], parameter[call[name[thumbnail_options].items, parameter[]]]] call[name[opts].sort, parameter[]] variable[opts] assign[=] <ast.ListComp object at 0x7da20c991180> variable[all_opts] assign[=] call[constant[_].join, parameter[binary_operation[name[initial_opts] + name[opts]]]] variable[basedir] assign[=] name[self].thumbnail_basedir variable[subdir] assign[=] name[self].thumbnail_subdir variable[all_opts] assign[=] call[name[all_opts].replace, parameter[constant[__], constant[_]]] if name[high_resolution] begin[:] <ast.Try object at 0x7da20c991cf0> variable[filename] assign[=] binary_operation[constant[%s__%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9932b0>, <ast.Name object at 0x7da20c993130>, <ast.Name object at 0x7da20c992c50>]]] return[call[name[os].path.join, parameter[name[basedir], name[path], name[subdir], name[filename]]]]
keyword[def] identifier[get_thumbnail_name] ( identifier[self] , identifier[thumbnail_options] , identifier[transparent] = keyword[False] , identifier[high_resolution] = keyword[False] ): literal[string] identifier[path] , identifier[source_filename] = identifier[os] . identifier[path] . identifier[split] ( identifier[self] . identifier[name] ) identifier[source_extension] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[source_filename] )[ literal[int] ][ literal[int] :] keyword[if] identifier[self] . identifier[thumbnail_preserve_extensions] keyword[is] keyword[True] keyword[or] ( identifier[self] . identifier[thumbnail_preserve_extensions] keyword[and] identifier[source_extension] . identifier[lower] () keyword[in] identifier[self] . identifier[thumbnail_preserve_extensions] ): identifier[extension] = identifier[source_extension] keyword[elif] identifier[transparent] : identifier[extension] = identifier[self] . identifier[thumbnail_transparency_extension] keyword[else] : identifier[extension] = identifier[self] . identifier[thumbnail_extension] identifier[extension] = identifier[extension] keyword[or] literal[string] identifier[thumbnail_options] = identifier[thumbnail_options] . identifier[copy] () identifier[size] = identifier[tuple] ( identifier[thumbnail_options] . identifier[pop] ( literal[string] )) identifier[quality] = identifier[thumbnail_options] . identifier[pop] ( literal[string] , identifier[self] . identifier[thumbnail_quality] ) identifier[initial_opts] =[ literal[string] % identifier[size] , literal[string] % identifier[quality] ] identifier[opts] = identifier[list] ( identifier[thumbnail_options] . identifier[items] ()) identifier[opts] . identifier[sort] () identifier[opts] =[ literal[string] %( identifier[v] keyword[is] keyword[not] keyword[True] keyword[and] literal[string] %( identifier[k] , identifier[v] ) keyword[or] identifier[k] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[opts] keyword[if] identifier[v] ] identifier[all_opts] = literal[string] . identifier[join] ( identifier[initial_opts] + identifier[opts] ) identifier[basedir] = identifier[self] . identifier[thumbnail_basedir] identifier[subdir] = identifier[self] . identifier[thumbnail_subdir] identifier[all_opts] = identifier[all_opts] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[high_resolution] : keyword[try] : identifier[all_opts] += identifier[self] . identifier[thumbnail_highres_infix] keyword[except] identifier[AttributeError] : identifier[all_opts] += literal[string] identifier[filename] = literal[string] %( identifier[source_filename] , identifier[all_opts] , identifier[extension] ) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , identifier[path] , identifier[subdir] , identifier[filename] )
def get_thumbnail_name(self, thumbnail_options, transparent=False, high_resolution=False): """ A version of ``Thumbnailer.get_thumbnail_name`` that produces a reproducible thumbnail name that can be converted back to the original filename. """ (path, source_filename) = os.path.split(self.name) source_extension = os.path.splitext(source_filename)[1][1:] if self.thumbnail_preserve_extensions is True or (self.thumbnail_preserve_extensions and source_extension.lower() in self.thumbnail_preserve_extensions): extension = source_extension # depends on [control=['if'], data=[]] elif transparent: extension = self.thumbnail_transparency_extension # depends on [control=['if'], data=[]] else: extension = self.thumbnail_extension extension = extension or 'jpg' thumbnail_options = thumbnail_options.copy() size = tuple(thumbnail_options.pop('size')) quality = thumbnail_options.pop('quality', self.thumbnail_quality) initial_opts = ['%sx%s' % size, 'q%s' % quality] opts = list(thumbnail_options.items()) opts.sort() # Sort the options so the file name is consistent. opts = ['%s' % (v is not True and '%s-%s' % (k, v) or k) for (k, v) in opts if v] all_opts = '_'.join(initial_opts + opts) basedir = self.thumbnail_basedir subdir = self.thumbnail_subdir # make sure our magic delimiter is not used in all_opts all_opts = all_opts.replace('__', '_') if high_resolution: try: all_opts += self.thumbnail_highres_infix # depends on [control=['try'], data=[]] except AttributeError: all_opts += '@2x' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] filename = '%s__%s.%s' % (source_filename, all_opts, extension) return os.path.join(basedir, path, subdir, filename)
def bfd(self, **kwargs): """Configure BFD for BGP globally. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) tx (str): BFD transmit interval in milliseconds (300, 500, etc) rx (str): BFD receive interval in milliseconds (300, 500, etc) multiplier (str): BFD multiplier. (3, 7, 5, etc) delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `tx`, `rx`, or `multiplier` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230') ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230', get=True) ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230', delete=True) """ kwargs['min_tx'] = kwargs.pop('tx') kwargs['min_rx'] = kwargs.pop('rx') kwargs['delete'] = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) bfd_tx = self._bfd_tx(**kwargs) bfd_rx = self._bfd_rx(**kwargs) bfd_multiplier = self._bfd_multiplier(**kwargs) if kwargs.pop('get', False): return self._get_bfd(bfd_tx, bfd_rx, bfd_multiplier) config = pynos.utilities.merge_xml(bfd_tx, bfd_rx) config = pynos.utilities.merge_xml(config, bfd_multiplier) return callback(config)
def function[bfd, parameter[self]]: constant[Configure BFD for BGP globally. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) tx (str): BFD transmit interval in milliseconds (300, 500, etc) rx (str): BFD receive interval in milliseconds (300, 500, etc) multiplier (str): BFD multiplier. (3, 7, 5, etc) delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `tx`, `rx`, or `multiplier` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230') ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230', get=True) ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230', delete=True) ] call[name[kwargs]][constant[min_tx]] assign[=] call[name[kwargs].pop, parameter[constant[tx]]] call[name[kwargs]][constant[min_rx]] assign[=] call[name[kwargs].pop, parameter[constant[rx]]] call[name[kwargs]][constant[delete]] assign[=] call[name[kwargs].pop, parameter[constant[delete], constant[False]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] variable[bfd_tx] assign[=] call[name[self]._bfd_tx, parameter[]] variable[bfd_rx] assign[=] call[name[self]._bfd_rx, parameter[]] variable[bfd_multiplier] assign[=] call[name[self]._bfd_multiplier, parameter[]] if call[name[kwargs].pop, parameter[constant[get], constant[False]]] begin[:] return[call[name[self]._get_bfd, parameter[name[bfd_tx], name[bfd_rx], name[bfd_multiplier]]]] variable[config] assign[=] call[name[pynos].utilities.merge_xml, parameter[name[bfd_tx], name[bfd_rx]]] variable[config] assign[=] call[name[pynos].utilities.merge_xml, parameter[name[config], name[bfd_multiplier]]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[bfd] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[kwargs] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[kwargs] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) identifier[bfd_tx] = identifier[self] . identifier[_bfd_tx] (** identifier[kwargs] ) identifier[bfd_rx] = identifier[self] . identifier[_bfd_rx] (** identifier[kwargs] ) identifier[bfd_multiplier] = identifier[self] . identifier[_bfd_multiplier] (** identifier[kwargs] ) keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ): keyword[return] identifier[self] . identifier[_get_bfd] ( identifier[bfd_tx] , identifier[bfd_rx] , identifier[bfd_multiplier] ) identifier[config] = identifier[pynos] . identifier[utilities] . identifier[merge_xml] ( identifier[bfd_tx] , identifier[bfd_rx] ) identifier[config] = identifier[pynos] . identifier[utilities] . identifier[merge_xml] ( identifier[config] , identifier[bfd_multiplier] ) keyword[return] identifier[callback] ( identifier[config] )
def bfd(self, **kwargs): """Configure BFD for BGP globally. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) tx (str): BFD transmit interval in milliseconds (300, 500, etc) rx (str): BFD receive interval in milliseconds (300, 500, etc) multiplier (str): BFD multiplier. (3, 7, 5, etc) delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `tx`, `rx`, or `multiplier` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230') ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230', get=True) ... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3', ... rbridge_id='230', delete=True) """ kwargs['min_tx'] = kwargs.pop('tx') kwargs['min_rx'] = kwargs.pop('rx') kwargs['delete'] = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) bfd_tx = self._bfd_tx(**kwargs) bfd_rx = self._bfd_rx(**kwargs) bfd_multiplier = self._bfd_multiplier(**kwargs) if kwargs.pop('get', False): return self._get_bfd(bfd_tx, bfd_rx, bfd_multiplier) # depends on [control=['if'], data=[]] config = pynos.utilities.merge_xml(bfd_tx, bfd_rx) config = pynos.utilities.merge_xml(config, bfd_multiplier) return callback(config)
def do_check(self, params): """ \x1b[1mNAME\x1b[0m check - Checks that a path is at a given version (only works within a transaction) \x1b[1mSYNOPSIS\x1b[0m check <path> <version> \x1b[1mEXAMPLES\x1b[0m > txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1' """ if not self.in_transaction: return self.client_context.check(params.path, params.version)
def function[do_check, parameter[self, params]]: constant[ NAME check - Checks that a path is at a given version (only works within a transaction) SYNOPSIS check <path> <version> EXAMPLES > txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1' ] if <ast.UnaryOp object at 0x7da1b26ae230> begin[:] return[None] call[name[self].client_context.check, parameter[name[params].path, name[params].version]]
keyword[def] identifier[do_check] ( identifier[self] , identifier[params] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[in_transaction] : keyword[return] identifier[self] . identifier[client_context] . identifier[check] ( identifier[params] . identifier[path] , identifier[params] . identifier[version] )
def do_check(self, params): """ \x1b[1mNAME\x1b[0m check - Checks that a path is at a given version (only works within a transaction) \x1b[1mSYNOPSIS\x1b[0m check <path> <version> \x1b[1mEXAMPLES\x1b[0m > txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1' """ if not self.in_transaction: return # depends on [control=['if'], data=[]] self.client_context.check(params.path, params.version)
def process(self): """ Calls the process endpoint for all locales of the asset. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets/asset-processing """ for locale in self._fields.keys(): self._client._put( "{0}/files/{1}/process".format( self.__class__.base_url( self.space.id, self.id, environment_id=self._environment_id ), locale ), {}, headers=self._update_headers() ) return self.reload()
def function[process, parameter[self]]: constant[ Calls the process endpoint for all locales of the asset. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets/asset-processing ] for taget[name[locale]] in starred[call[name[self]._fields.keys, parameter[]]] begin[:] call[name[self]._client._put, parameter[call[constant[{0}/files/{1}/process].format, parameter[call[name[self].__class__.base_url, parameter[name[self].space.id, name[self].id]], name[locale]]], dictionary[[], []]]] return[call[name[self].reload, parameter[]]]
keyword[def] identifier[process] ( identifier[self] ): literal[string] keyword[for] identifier[locale] keyword[in] identifier[self] . identifier[_fields] . identifier[keys] (): identifier[self] . identifier[_client] . identifier[_put] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[base_url] ( identifier[self] . identifier[space] . identifier[id] , identifier[self] . identifier[id] , identifier[environment_id] = identifier[self] . identifier[_environment_id] ), identifier[locale] ), {}, identifier[headers] = identifier[self] . identifier[_update_headers] () ) keyword[return] identifier[self] . identifier[reload] ()
def process(self): """ Calls the process endpoint for all locales of the asset. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets/asset-processing """ for locale in self._fields.keys(): self._client._put('{0}/files/{1}/process'.format(self.__class__.base_url(self.space.id, self.id, environment_id=self._environment_id), locale), {}, headers=self._update_headers()) # depends on [control=['for'], data=['locale']] return self.reload()
def aligner_from_header(in_bam): """Identify aligner from the BAM header; handling pre-aligned inputs. """ from bcbio.pipeline.alignment import TOOLS with pysam.Samfile(in_bam, "rb") as bamfile: for pg in bamfile.header.get("PG", []): for ka in TOOLS.keys(): if pg.get("PN", "").lower().find(ka) >= 0: return ka
def function[aligner_from_header, parameter[in_bam]]: constant[Identify aligner from the BAM header; handling pre-aligned inputs. ] from relative_module[bcbio.pipeline.alignment] import module[TOOLS] with call[name[pysam].Samfile, parameter[name[in_bam], constant[rb]]] begin[:] for taget[name[pg]] in starred[call[name[bamfile].header.get, parameter[constant[PG], list[[]]]]] begin[:] for taget[name[ka]] in starred[call[name[TOOLS].keys, parameter[]]] begin[:] if compare[call[call[call[name[pg].get, parameter[constant[PN], constant[]]].lower, parameter[]].find, parameter[name[ka]]] greater_or_equal[>=] constant[0]] begin[:] return[name[ka]]
keyword[def] identifier[aligner_from_header] ( identifier[in_bam] ): literal[string] keyword[from] identifier[bcbio] . identifier[pipeline] . identifier[alignment] keyword[import] identifier[TOOLS] keyword[with] identifier[pysam] . identifier[Samfile] ( identifier[in_bam] , literal[string] ) keyword[as] identifier[bamfile] : keyword[for] identifier[pg] keyword[in] identifier[bamfile] . identifier[header] . identifier[get] ( literal[string] ,[]): keyword[for] identifier[ka] keyword[in] identifier[TOOLS] . identifier[keys] (): keyword[if] identifier[pg] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] (). identifier[find] ( identifier[ka] )>= literal[int] : keyword[return] identifier[ka]
def aligner_from_header(in_bam): """Identify aligner from the BAM header; handling pre-aligned inputs. """ from bcbio.pipeline.alignment import TOOLS with pysam.Samfile(in_bam, 'rb') as bamfile: for pg in bamfile.header.get('PG', []): for ka in TOOLS.keys(): if pg.get('PN', '').lower().find(ka) >= 0: return ka # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ka']] # depends on [control=['for'], data=['pg']] # depends on [control=['with'], data=['bamfile']]
def list_directors(self, service_id, version_number): """List the directors for a particular service and version.""" content = self._fetch("/service/%s/version/%d/director" % (service_id, version_number)) return map(lambda x: FastlyDirector(self, x), content)
def function[list_directors, parameter[self, service_id, version_number]]: constant[List the directors for a particular service and version.] variable[content] assign[=] call[name[self]._fetch, parameter[binary_operation[constant[/service/%s/version/%d/director] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f10340>, <ast.Name object at 0x7da1b0f117b0>]]]]] return[call[name[map], parameter[<ast.Lambda object at 0x7da1b0f10700>, name[content]]]]
keyword[def] identifier[list_directors] ( identifier[self] , identifier[service_id] , identifier[version_number] ): literal[string] identifier[content] = identifier[self] . identifier[_fetch] ( literal[string] %( identifier[service_id] , identifier[version_number] )) keyword[return] identifier[map] ( keyword[lambda] identifier[x] : identifier[FastlyDirector] ( identifier[self] , identifier[x] ), identifier[content] )
def list_directors(self, service_id, version_number): """List the directors for a particular service and version.""" content = self._fetch('/service/%s/version/%d/director' % (service_id, version_number)) return map(lambda x: FastlyDirector(self, x), content)
def _sentence_to_interstitial_spacing(self): """Fix common spacing errors caused by LaTeX's habit of using an inter-sentence space after any full stop.""" not_sentence_end_chars = [' '] abbreviations = ['i.e.', 'e.g.', ' v.', ' w.', ' wh.'] titles = ['Prof.', 'Mr.', 'Mrs.', 'Messrs.', 'Mmes.', 'Msgr.', 'Ms.', 'Fr.', 'Rev.', 'St.', 'Dr.', 'Lieut.', 'Lt.', 'Capt.', 'Cptn.', 'Sgt.', 'Sjt.', 'Gen.', 'Hon.', 'Cpl.', 'L-Cpl.', 'Pvt.', 'Dvr.', 'Gnr.', 'Spr.', 'Col.', 'Lt-Col', 'Lt-Gen.', 'Mx.'] for abbrev in abbreviations: for x in not_sentence_end_chars: self._str_replacement(abbrev + x, abbrev + '\ ') for title in titles: for x in not_sentence_end_chars: self._str_replacement(title + x, title + '~')
def function[_sentence_to_interstitial_spacing, parameter[self]]: constant[Fix common spacing errors caused by LaTeX's habit of using an inter-sentence space after any full stop.] variable[not_sentence_end_chars] assign[=] list[[<ast.Constant object at 0x7da2054a42e0>]] variable[abbreviations] assign[=] list[[<ast.Constant object at 0x7da2054a7730>, <ast.Constant object at 0x7da2054a52d0>, <ast.Constant object at 0x7da2054a5750>, <ast.Constant object at 0x7da2054a62f0>, <ast.Constant object at 0x7da2054a7b20>]] variable[titles] assign[=] list[[<ast.Constant object at 0x7da20c6abc10>, <ast.Constant object at 0x7da20c6aa620>, <ast.Constant object at 0x7da20c6abb50>, <ast.Constant object at 0x7da20c6a8910>, <ast.Constant object at 0x7da20c6ab310>, <ast.Constant object at 0x7da20c6a8eb0>, <ast.Constant object at 0x7da20c6aab30>, <ast.Constant object at 0x7da20c6a8a60>, <ast.Constant object at 0x7da20c6aa950>, <ast.Constant object at 0x7da20c6ab9d0>, <ast.Constant object at 0x7da20c6a9d20>, <ast.Constant object at 0x7da20c6a8b50>, <ast.Constant object at 0x7da20c6a85e0>, <ast.Constant object at 0x7da20c6aa020>, <ast.Constant object at 0x7da20c6a9ae0>, <ast.Constant object at 0x7da20c6a8250>, <ast.Constant object at 0x7da20c6abaf0>, <ast.Constant object at 0x7da20c6a9e70>, <ast.Constant object at 0x7da20c6a9cc0>, <ast.Constant object at 0x7da20c6aaa40>, <ast.Constant object at 0x7da20c6abf40>, <ast.Constant object at 0x7da20c6ab3a0>, <ast.Constant object at 0x7da20c6a8e50>, <ast.Constant object at 0x7da20c6a94e0>, <ast.Constant object at 0x7da20c6ab5b0>, <ast.Constant object at 0x7da20c6a8700>, <ast.Constant object at 0x7da20c6a8af0>, <ast.Constant object at 0x7da20c6ab8b0>, <ast.Constant object at 0x7da20c6ab940>]] for taget[name[abbrev]] in starred[name[abbreviations]] begin[:] for taget[name[x]] in starred[name[not_sentence_end_chars]] begin[:] call[name[self]._str_replacement, parameter[binary_operation[name[abbrev] + name[x]], binary_operation[name[abbrev] + constant[\ ]]]] for taget[name[title]] in starred[name[titles]] begin[:] for taget[name[x]] in starred[name[not_sentence_end_chars]] begin[:] call[name[self]._str_replacement, parameter[binary_operation[name[title] + name[x]], binary_operation[name[title] + constant[~]]]]
keyword[def] identifier[_sentence_to_interstitial_spacing] ( identifier[self] ): literal[string] identifier[not_sentence_end_chars] =[ literal[string] ] identifier[abbreviations] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[titles] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[for] identifier[abbrev] keyword[in] identifier[abbreviations] : keyword[for] identifier[x] keyword[in] identifier[not_sentence_end_chars] : identifier[self] . identifier[_str_replacement] ( identifier[abbrev] + identifier[x] , identifier[abbrev] + literal[string] ) keyword[for] identifier[title] keyword[in] identifier[titles] : keyword[for] identifier[x] keyword[in] identifier[not_sentence_end_chars] : identifier[self] . identifier[_str_replacement] ( identifier[title] + identifier[x] , identifier[title] + literal[string] )
def _sentence_to_interstitial_spacing(self): """Fix common spacing errors caused by LaTeX's habit of using an inter-sentence space after any full stop.""" not_sentence_end_chars = [' '] abbreviations = ['i.e.', 'e.g.', ' v.', ' w.', ' wh.'] titles = ['Prof.', 'Mr.', 'Mrs.', 'Messrs.', 'Mmes.', 'Msgr.', 'Ms.', 'Fr.', 'Rev.', 'St.', 'Dr.', 'Lieut.', 'Lt.', 'Capt.', 'Cptn.', 'Sgt.', 'Sjt.', 'Gen.', 'Hon.', 'Cpl.', 'L-Cpl.', 'Pvt.', 'Dvr.', 'Gnr.', 'Spr.', 'Col.', 'Lt-Col', 'Lt-Gen.', 'Mx.'] for abbrev in abbreviations: for x in not_sentence_end_chars: self._str_replacement(abbrev + x, abbrev + '\\ ') # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['abbrev']] for title in titles: for x in not_sentence_end_chars: self._str_replacement(title + x, title + '~') # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['title']]
def bivnorm (sx, sy, cxy): """Given the parameters of a Gaussian bivariate distribution, compute the correct normalization for the equivalent 2D Gaussian. It's 1 / (2 pi sqrt (sx**2 sy**2 - cxy**2). This function adds a lot of sanity checking. Inputs: * sx: standard deviation (not variance) of x var * sy: standard deviation (not variance) of y var * cxy: covariance (not correlation coefficient) of x and y Returns: the scalar normalization """ _bivcheck (sx, sy, cxy) from numpy import pi, sqrt t = (sx * sy)**2 - cxy**2 if t <= 0: raise ValueError ('covariance just barely out of bounds ' '(sx=%.10e, sy=%.10e, cxy=%.10e, cxy/sxsy=%.16f)' % (sx, sy, cxy, cxy / (sx * sy))) return (2 * pi * sqrt (t))**-1
def function[bivnorm, parameter[sx, sy, cxy]]: constant[Given the parameters of a Gaussian bivariate distribution, compute the correct normalization for the equivalent 2D Gaussian. It's 1 / (2 pi sqrt (sx**2 sy**2 - cxy**2). This function adds a lot of sanity checking. Inputs: * sx: standard deviation (not variance) of x var * sy: standard deviation (not variance) of y var * cxy: covariance (not correlation coefficient) of x and y Returns: the scalar normalization ] call[name[_bivcheck], parameter[name[sx], name[sy], name[cxy]]] from relative_module[numpy] import module[pi], module[sqrt] variable[t] assign[=] binary_operation[binary_operation[binary_operation[name[sx] * name[sy]] ** constant[2]] - binary_operation[name[cxy] ** constant[2]]] if compare[name[t] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da1b2647940> return[binary_operation[binary_operation[binary_operation[constant[2] * name[pi]] * call[name[sqrt], parameter[name[t]]]] ** <ast.UnaryOp object at 0x7da1b26474f0>]]
keyword[def] identifier[bivnorm] ( identifier[sx] , identifier[sy] , identifier[cxy] ): literal[string] identifier[_bivcheck] ( identifier[sx] , identifier[sy] , identifier[cxy] ) keyword[from] identifier[numpy] keyword[import] identifier[pi] , identifier[sqrt] identifier[t] =( identifier[sx] * identifier[sy] )** literal[int] - identifier[cxy] ** literal[int] keyword[if] identifier[t] <= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] % ( identifier[sx] , identifier[sy] , identifier[cxy] , identifier[cxy] /( identifier[sx] * identifier[sy] ))) keyword[return] ( literal[int] * identifier[pi] * identifier[sqrt] ( identifier[t] ))**- literal[int]
def bivnorm(sx, sy, cxy): """Given the parameters of a Gaussian bivariate distribution, compute the correct normalization for the equivalent 2D Gaussian. It's 1 / (2 pi sqrt (sx**2 sy**2 - cxy**2). This function adds a lot of sanity checking. Inputs: * sx: standard deviation (not variance) of x var * sy: standard deviation (not variance) of y var * cxy: covariance (not correlation coefficient) of x and y Returns: the scalar normalization """ _bivcheck(sx, sy, cxy) from numpy import pi, sqrt t = (sx * sy) ** 2 - cxy ** 2 if t <= 0: raise ValueError('covariance just barely out of bounds (sx=%.10e, sy=%.10e, cxy=%.10e, cxy/sxsy=%.16f)' % (sx, sy, cxy, cxy / (sx * sy))) # depends on [control=['if'], data=[]] return (2 * pi * sqrt(t)) ** (-1)
def do_rmpostfix(self, arg): """Removes a postfix function from a variable. See 'postfix'.""" altered = False if arg in self.curargs["functions"]: del self.curargs["functions"][arg] altered = True elif arg == "*": for varname in list(self.curargs["functions"].keys()): del self.curargs["functions"][varname] altered = True if altered: self.do_postfix("list")
def function[do_rmpostfix, parameter[self, arg]]: constant[Removes a postfix function from a variable. See 'postfix'.] variable[altered] assign[=] constant[False] if compare[name[arg] in call[name[self].curargs][constant[functions]]] begin[:] <ast.Delete object at 0x7da1b2547910> variable[altered] assign[=] constant[True] if name[altered] begin[:] call[name[self].do_postfix, parameter[constant[list]]]
keyword[def] identifier[do_rmpostfix] ( identifier[self] , identifier[arg] ): literal[string] identifier[altered] = keyword[False] keyword[if] identifier[arg] keyword[in] identifier[self] . identifier[curargs] [ literal[string] ]: keyword[del] identifier[self] . identifier[curargs] [ literal[string] ][ identifier[arg] ] identifier[altered] = keyword[True] keyword[elif] identifier[arg] == literal[string] : keyword[for] identifier[varname] keyword[in] identifier[list] ( identifier[self] . identifier[curargs] [ literal[string] ]. identifier[keys] ()): keyword[del] identifier[self] . identifier[curargs] [ literal[string] ][ identifier[varname] ] identifier[altered] = keyword[True] keyword[if] identifier[altered] : identifier[self] . identifier[do_postfix] ( literal[string] )
def do_rmpostfix(self, arg): """Removes a postfix function from a variable. See 'postfix'.""" altered = False if arg in self.curargs['functions']: del self.curargs['functions'][arg] altered = True # depends on [control=['if'], data=['arg']] elif arg == '*': for varname in list(self.curargs['functions'].keys()): del self.curargs['functions'][varname] # depends on [control=['for'], data=['varname']] altered = True # depends on [control=['if'], data=[]] if altered: self.do_postfix('list') # depends on [control=['if'], data=[]]
def _make_dict_elems(build_instr, builders): """ Return a list of keys and a list of values for the dictionary literal generated by ``build_instr``. """ keys = [] values = [] for _ in range(build_instr.arg): popped = builders.pop() if not isinstance(popped, instrs.STORE_MAP): raise DecompilationError( "Expected a STORE_MAP but got %s" % popped ) keys.append(make_expr(builders)) values.append(make_expr(builders)) # Keys and values are emitted in reverse order of how they appear in the # AST. keys.reverse() values.reverse() return keys, values
def function[_make_dict_elems, parameter[build_instr, builders]]: constant[ Return a list of keys and a list of values for the dictionary literal generated by ``build_instr``. ] variable[keys] assign[=] list[[]] variable[values] assign[=] list[[]] for taget[name[_]] in starred[call[name[range], parameter[name[build_instr].arg]]] begin[:] variable[popped] assign[=] call[name[builders].pop, parameter[]] if <ast.UnaryOp object at 0x7da1b05b74f0> begin[:] <ast.Raise object at 0x7da1b05b52a0> call[name[keys].append, parameter[call[name[make_expr], parameter[name[builders]]]]] call[name[values].append, parameter[call[name[make_expr], parameter[name[builders]]]]] call[name[keys].reverse, parameter[]] call[name[values].reverse, parameter[]] return[tuple[[<ast.Name object at 0x7da1b05b6680>, <ast.Name object at 0x7da1b05b6710>]]]
keyword[def] identifier[_make_dict_elems] ( identifier[build_instr] , identifier[builders] ): literal[string] identifier[keys] =[] identifier[values] =[] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[build_instr] . identifier[arg] ): identifier[popped] = identifier[builders] . identifier[pop] () keyword[if] keyword[not] identifier[isinstance] ( identifier[popped] , identifier[instrs] . identifier[STORE_MAP] ): keyword[raise] identifier[DecompilationError] ( literal[string] % identifier[popped] ) identifier[keys] . identifier[append] ( identifier[make_expr] ( identifier[builders] )) identifier[values] . identifier[append] ( identifier[make_expr] ( identifier[builders] )) identifier[keys] . identifier[reverse] () identifier[values] . identifier[reverse] () keyword[return] identifier[keys] , identifier[values]
def _make_dict_elems(build_instr, builders): """ Return a list of keys and a list of values for the dictionary literal generated by ``build_instr``. """ keys = [] values = [] for _ in range(build_instr.arg): popped = builders.pop() if not isinstance(popped, instrs.STORE_MAP): raise DecompilationError('Expected a STORE_MAP but got %s' % popped) # depends on [control=['if'], data=[]] keys.append(make_expr(builders)) values.append(make_expr(builders)) # depends on [control=['for'], data=[]] # Keys and values are emitted in reverse order of how they appear in the # AST. keys.reverse() values.reverse() return (keys, values)
def pack_multiple(messages): """ Converts a list of SMB2CreateEABuffer structures and packs them as a bytes object used when setting to the SMB2CreateContextRequest buffer_data field. This should be used as it would calculate the correct next_entry_offset field value for each buffer entry. :param messages: List of SMB2CreateEABuffer structures :return: bytes object that is set on the SMB2CreateContextRequest buffer_data field. """ data = b"" msg_count = len(messages) for i, msg in enumerate(messages): if i == msg_count - 1: msg['next_entry_offset'] = 0 else: # because the end padding val won't be populated if the entry # offset is 0, we set to 1 so the len calc is correct msg['next_entry_offset'] = 1 msg['next_entry_offset'] = len(msg) data += msg.pack() return data
def function[pack_multiple, parameter[messages]]: constant[ Converts a list of SMB2CreateEABuffer structures and packs them as a bytes object used when setting to the SMB2CreateContextRequest buffer_data field. This should be used as it would calculate the correct next_entry_offset field value for each buffer entry. :param messages: List of SMB2CreateEABuffer structures :return: bytes object that is set on the SMB2CreateContextRequest buffer_data field. ] variable[data] assign[=] constant[b''] variable[msg_count] assign[=] call[name[len], parameter[name[messages]]] for taget[tuple[[<ast.Name object at 0x7da1b06cdb40>, <ast.Name object at 0x7da1b06ce4d0>]]] in starred[call[name[enumerate], parameter[name[messages]]]] begin[:] if compare[name[i] equal[==] binary_operation[name[msg_count] - constant[1]]] begin[:] call[name[msg]][constant[next_entry_offset]] assign[=] constant[0] <ast.AugAssign object at 0x7da18c4cc640> return[name[data]]
keyword[def] identifier[pack_multiple] ( identifier[messages] ): literal[string] identifier[data] = literal[string] identifier[msg_count] = identifier[len] ( identifier[messages] ) keyword[for] identifier[i] , identifier[msg] keyword[in] identifier[enumerate] ( identifier[messages] ): keyword[if] identifier[i] == identifier[msg_count] - literal[int] : identifier[msg] [ literal[string] ]= literal[int] keyword[else] : identifier[msg] [ literal[string] ]= literal[int] identifier[msg] [ literal[string] ]= identifier[len] ( identifier[msg] ) identifier[data] += identifier[msg] . identifier[pack] () keyword[return] identifier[data]
def pack_multiple(messages): """ Converts a list of SMB2CreateEABuffer structures and packs them as a bytes object used when setting to the SMB2CreateContextRequest buffer_data field. This should be used as it would calculate the correct next_entry_offset field value for each buffer entry. :param messages: List of SMB2CreateEABuffer structures :return: bytes object that is set on the SMB2CreateContextRequest buffer_data field. """ data = b'' msg_count = len(messages) for (i, msg) in enumerate(messages): if i == msg_count - 1: msg['next_entry_offset'] = 0 # depends on [control=['if'], data=[]] else: # because the end padding val won't be populated if the entry # offset is 0, we set to 1 so the len calc is correct msg['next_entry_offset'] = 1 msg['next_entry_offset'] = len(msg) data += msg.pack() # depends on [control=['for'], data=[]] return data
def add_val(self, val): """add value in form of dict""" if not isinstance(val, type({})): raise ValueError(type({})) self.read() self.config.update(val) self.save()
def function[add_val, parameter[self, val]]: constant[add value in form of dict] if <ast.UnaryOp object at 0x7da1b0eb8400> begin[:] <ast.Raise object at 0x7da1b0eb8f70> call[name[self].read, parameter[]] call[name[self].config.update, parameter[name[val]]] call[name[self].save, parameter[]]
keyword[def] identifier[add_val] ( identifier[self] , identifier[val] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[val] , identifier[type] ({})): keyword[raise] identifier[ValueError] ( identifier[type] ({})) identifier[self] . identifier[read] () identifier[self] . identifier[config] . identifier[update] ( identifier[val] ) identifier[self] . identifier[save] ()
def add_val(self, val): """add value in form of dict""" if not isinstance(val, type({})): raise ValueError(type({})) # depends on [control=['if'], data=[]] self.read() self.config.update(val) self.save()
def request_password_reset(email): '''Trigger Parse\'s Password Process. Return True/False indicate success/failure on the request''' url = '/'.join([API_ROOT, 'requestPasswordReset']) try: User.POST(url, email=email) return True except ParseError: return False
def function[request_password_reset, parameter[email]]: constant[Trigger Parse's Password Process. Return True/False indicate success/failure on the request] variable[url] assign[=] call[constant[/].join, parameter[list[[<ast.Name object at 0x7da1b0577700>, <ast.Constant object at 0x7da1b05771f0>]]]] <ast.Try object at 0x7da1b0576890>
keyword[def] identifier[request_password_reset] ( identifier[email] ): literal[string] identifier[url] = literal[string] . identifier[join] ([ identifier[API_ROOT] , literal[string] ]) keyword[try] : identifier[User] . identifier[POST] ( identifier[url] , identifier[email] = identifier[email] ) keyword[return] keyword[True] keyword[except] identifier[ParseError] : keyword[return] keyword[False]
def request_password_reset(email): """Trigger Parse's Password Process. Return True/False indicate success/failure on the request""" url = '/'.join([API_ROOT, 'requestPasswordReset']) try: User.POST(url, email=email) return True # depends on [control=['try'], data=[]] except ParseError: return False # depends on [control=['except'], data=[]]
def is_possible_type( self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType ) -> bool: """Check whether a concrete type is possible for an abstract type.""" possible_type_map = self._possible_type_map try: possible_type_names = possible_type_map[abstract_type.name] except KeyError: possible_types = self.get_possible_types(abstract_type) possible_type_names = {type_.name for type_ in possible_types} possible_type_map[abstract_type.name] = possible_type_names return possible_type.name in possible_type_names
def function[is_possible_type, parameter[self, abstract_type, possible_type]]: constant[Check whether a concrete type is possible for an abstract type.] variable[possible_type_map] assign[=] name[self]._possible_type_map <ast.Try object at 0x7da1b1d80130> return[compare[name[possible_type].name in name[possible_type_names]]]
keyword[def] identifier[is_possible_type] ( identifier[self] , identifier[abstract_type] : identifier[GraphQLAbstractType] , identifier[possible_type] : identifier[GraphQLObjectType] )-> identifier[bool] : literal[string] identifier[possible_type_map] = identifier[self] . identifier[_possible_type_map] keyword[try] : identifier[possible_type_names] = identifier[possible_type_map] [ identifier[abstract_type] . identifier[name] ] keyword[except] identifier[KeyError] : identifier[possible_types] = identifier[self] . identifier[get_possible_types] ( identifier[abstract_type] ) identifier[possible_type_names] ={ identifier[type_] . identifier[name] keyword[for] identifier[type_] keyword[in] identifier[possible_types] } identifier[possible_type_map] [ identifier[abstract_type] . identifier[name] ]= identifier[possible_type_names] keyword[return] identifier[possible_type] . identifier[name] keyword[in] identifier[possible_type_names]
def is_possible_type(self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType) -> bool: """Check whether a concrete type is possible for an abstract type.""" possible_type_map = self._possible_type_map try: possible_type_names = possible_type_map[abstract_type.name] # depends on [control=['try'], data=[]] except KeyError: possible_types = self.get_possible_types(abstract_type) possible_type_names = {type_.name for type_ in possible_types} possible_type_map[abstract_type.name] = possible_type_names # depends on [control=['except'], data=[]] return possible_type.name in possible_type_names
def layer_depth( lat, lon, layerID="LID-BOTTOM"): """Returns layer depth at lat / lon (degrees) where lat/lon may be arrays (of equal size). Depths are returned in metres. """ ## Must wrap longitude from 0 to 360 ... lon1 = np.array(lon)%360.0 lat1 = np.array(lat) # ## Must wrap longitude from -180 to 180 ... # # lon1[np.where(lon1 > 180.0)] = 360.0 - lon1[np.where(lon1 > 180.0)] # data, err = _interpolator.interpolate( np.radians(lon1), np.radians(lat1), _litho_data[l1_layer_decode[layerID], l1_data_decode["DEPTH"]], order=1 ) return data
def function[layer_depth, parameter[lat, lon, layerID]]: constant[Returns layer depth at lat / lon (degrees) where lat/lon may be arrays (of equal size). Depths are returned in metres. ] variable[lon1] assign[=] binary_operation[call[name[np].array, parameter[name[lon]]] <ast.Mod object at 0x7da2590d6920> constant[360.0]] variable[lat1] assign[=] call[name[np].array, parameter[name[lat]]] <ast.Tuple object at 0x7da1b1c3b430> assign[=] call[name[_interpolator].interpolate, parameter[call[name[np].radians, parameter[name[lon1]]], call[name[np].radians, parameter[name[lat1]]], call[name[_litho_data]][tuple[[<ast.Subscript object at 0x7da1b2309db0>, <ast.Subscript object at 0x7da1b246dcc0>]]]]] return[name[data]]
keyword[def] identifier[layer_depth] ( identifier[lat] , identifier[lon] , identifier[layerID] = literal[string] ): literal[string] identifier[lon1] = identifier[np] . identifier[array] ( identifier[lon] )% literal[int] identifier[lat1] = identifier[np] . identifier[array] ( identifier[lat] ) identifier[data] , identifier[err] = identifier[_interpolator] . identifier[interpolate] ( identifier[np] . identifier[radians] ( identifier[lon1] ), identifier[np] . identifier[radians] ( identifier[lat1] ), identifier[_litho_data] [ identifier[l1_layer_decode] [ identifier[layerID] ], identifier[l1_data_decode] [ literal[string] ]], identifier[order] = literal[int] ) keyword[return] identifier[data]
def layer_depth(lat, lon, layerID='LID-BOTTOM'): """Returns layer depth at lat / lon (degrees) where lat/lon may be arrays (of equal size). Depths are returned in metres. """ ## Must wrap longitude from 0 to 360 ... lon1 = np.array(lon) % 360.0 lat1 = np.array(lat) # ## Must wrap longitude from -180 to 180 ... # # lon1[np.where(lon1 > 180.0)] = 360.0 - lon1[np.where(lon1 > 180.0)] # (data, err) = _interpolator.interpolate(np.radians(lon1), np.radians(lat1), _litho_data[l1_layer_decode[layerID], l1_data_decode['DEPTH']], order=1) return data
def find_suitable_encoding(self, char): """The order of our search is a specific one: 1. code pages that we already tried before; there is a good chance they might work again, reducing the search space, and by re-using already used encodings we might also reduce the number of codepage change instructiosn we have to send. Still, any performance gains will presumably be fairly minor. 2. code pages in lower ESCPOS slots first. Presumably, they are more likely to be supported, so if a printer profile is missing or incomplete, we might increase our change that the code page we pick for this character is actually supported. """ sorted_encodings = sorted( self.codepages.items(), key=self.__encoding_sort_func) for encoding, _ in sorted_encodings: if self.can_encode(encoding, char): # This encoding worked; at it to the set of used ones. self.used_encodings.add(encoding) return encoding
def function[find_suitable_encoding, parameter[self, char]]: constant[The order of our search is a specific one: 1. code pages that we already tried before; there is a good chance they might work again, reducing the search space, and by re-using already used encodings we might also reduce the number of codepage change instructiosn we have to send. Still, any performance gains will presumably be fairly minor. 2. code pages in lower ESCPOS slots first. Presumably, they are more likely to be supported, so if a printer profile is missing or incomplete, we might increase our change that the code page we pick for this character is actually supported. ] variable[sorted_encodings] assign[=] call[name[sorted], parameter[call[name[self].codepages.items, parameter[]]]] for taget[tuple[[<ast.Name object at 0x7da1b1d3bc40>, <ast.Name object at 0x7da1b1d383d0>]]] in starred[name[sorted_encodings]] begin[:] if call[name[self].can_encode, parameter[name[encoding], name[char]]] begin[:] call[name[self].used_encodings.add, parameter[name[encoding]]] return[name[encoding]]
keyword[def] identifier[find_suitable_encoding] ( identifier[self] , identifier[char] ): literal[string] identifier[sorted_encodings] = identifier[sorted] ( identifier[self] . identifier[codepages] . identifier[items] (), identifier[key] = identifier[self] . identifier[__encoding_sort_func] ) keyword[for] identifier[encoding] , identifier[_] keyword[in] identifier[sorted_encodings] : keyword[if] identifier[self] . identifier[can_encode] ( identifier[encoding] , identifier[char] ): identifier[self] . identifier[used_encodings] . identifier[add] ( identifier[encoding] ) keyword[return] identifier[encoding]
def find_suitable_encoding(self, char): """The order of our search is a specific one: 1. code pages that we already tried before; there is a good chance they might work again, reducing the search space, and by re-using already used encodings we might also reduce the number of codepage change instructiosn we have to send. Still, any performance gains will presumably be fairly minor. 2. code pages in lower ESCPOS slots first. Presumably, they are more likely to be supported, so if a printer profile is missing or incomplete, we might increase our change that the code page we pick for this character is actually supported. """ sorted_encodings = sorted(self.codepages.items(), key=self.__encoding_sort_func) for (encoding, _) in sorted_encodings: if self.can_encode(encoding, char): # This encoding worked; at it to the set of used ones. self.used_encodings.add(encoding) return encoding # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def train(self, category, text): """ Trains a category with a sample of text :param category: the name of the category we want to train :type category: str :param text: the text we want to train the category with :type text: str """ try: bayes_category = self.categories.get_category(category) except KeyError: bayes_category = self.categories.add_category(category) tokens = self.tokenizer(str(text)) occurrence_counts = self.count_token_occurrences(tokens) for word, count in occurrence_counts.items(): bayes_category.train_token(word, count) # Updating our per-category overall probabilities self.calculate_category_probability()
def function[train, parameter[self, category, text]]: constant[ Trains a category with a sample of text :param category: the name of the category we want to train :type category: str :param text: the text we want to train the category with :type text: str ] <ast.Try object at 0x7da1b0aac820> variable[tokens] assign[=] call[name[self].tokenizer, parameter[call[name[str], parameter[name[text]]]]] variable[occurrence_counts] assign[=] call[name[self].count_token_occurrences, parameter[name[tokens]]] for taget[tuple[[<ast.Name object at 0x7da1b0a2c790>, <ast.Name object at 0x7da1b0a2cdf0>]]] in starred[call[name[occurrence_counts].items, parameter[]]] begin[:] call[name[bayes_category].train_token, parameter[name[word], name[count]]] call[name[self].calculate_category_probability, parameter[]]
keyword[def] identifier[train] ( identifier[self] , identifier[category] , identifier[text] ): literal[string] keyword[try] : identifier[bayes_category] = identifier[self] . identifier[categories] . identifier[get_category] ( identifier[category] ) keyword[except] identifier[KeyError] : identifier[bayes_category] = identifier[self] . identifier[categories] . identifier[add_category] ( identifier[category] ) identifier[tokens] = identifier[self] . identifier[tokenizer] ( identifier[str] ( identifier[text] )) identifier[occurrence_counts] = identifier[self] . identifier[count_token_occurrences] ( identifier[tokens] ) keyword[for] identifier[word] , identifier[count] keyword[in] identifier[occurrence_counts] . identifier[items] (): identifier[bayes_category] . identifier[train_token] ( identifier[word] , identifier[count] ) identifier[self] . identifier[calculate_category_probability] ()
def train(self, category, text): """ Trains a category with a sample of text :param category: the name of the category we want to train :type category: str :param text: the text we want to train the category with :type text: str """ try: bayes_category = self.categories.get_category(category) # depends on [control=['try'], data=[]] except KeyError: bayes_category = self.categories.add_category(category) # depends on [control=['except'], data=[]] tokens = self.tokenizer(str(text)) occurrence_counts = self.count_token_occurrences(tokens) for (word, count) in occurrence_counts.items(): bayes_category.train_token(word, count) # depends on [control=['for'], data=[]] # Updating our per-category overall probabilities self.calculate_category_probability()
def load(filename): ''' Open and return the supplied json file ''' global _DICTIONARY try: json_file = filename + '.json' with open(os.path.join(_DEFAULT_DIR, json_file), 'rb') as f: _DICTIONARY = json.load(f) except IOError: raise IOError('Language file not found. Make sure that your ', 'translation file is in the languages directory, ')
def function[load, parameter[filename]]: constant[ Open and return the supplied json file ] <ast.Global object at 0x7da18eb57880> <ast.Try object at 0x7da18eb56320>
keyword[def] identifier[load] ( identifier[filename] ): literal[string] keyword[global] identifier[_DICTIONARY] keyword[try] : identifier[json_file] = identifier[filename] + literal[string] keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[_DEFAULT_DIR] , identifier[json_file] ), literal[string] ) keyword[as] identifier[f] : identifier[_DICTIONARY] = identifier[json] . identifier[load] ( identifier[f] ) keyword[except] identifier[IOError] : keyword[raise] identifier[IOError] ( literal[string] , literal[string] )
def load(filename): """ Open and return the supplied json file """ global _DICTIONARY try: json_file = filename + '.json' with open(os.path.join(_DEFAULT_DIR, json_file), 'rb') as f: _DICTIONARY = json.load(f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except IOError: raise IOError('Language file not found. Make sure that your ', 'translation file is in the languages directory, ') # depends on [control=['except'], data=[]]
def keep_form_with_max_profit(self, forms=None): """ This converts the dataframe, which shows all profitable forms, to the form with the greatest profit, so that more profitable forms outcompete less profitable forms. Parameters ---------- forms: list of strings List of forms which compete which other. Can leave some out. Returns ------- Nothing. Goes from a multi-index to a single index with only the most profitable form. """ f = self.feasibility if forms is not None: f = f[forms] if len(f) > 0: mu = self._max_form(f, "max_profit") indexes = [tuple(x) for x in mu.reset_index().values] else: indexes = [] df = f.stack(level=0).loc[indexes] df.index.names = ["parcel_id", "form"] df = df.reset_index(level=1) return df
def function[keep_form_with_max_profit, parameter[self, forms]]: constant[ This converts the dataframe, which shows all profitable forms, to the form with the greatest profit, so that more profitable forms outcompete less profitable forms. Parameters ---------- forms: list of strings List of forms which compete which other. Can leave some out. Returns ------- Nothing. Goes from a multi-index to a single index with only the most profitable form. ] variable[f] assign[=] name[self].feasibility if compare[name[forms] is_not constant[None]] begin[:] variable[f] assign[=] call[name[f]][name[forms]] if compare[call[name[len], parameter[name[f]]] greater[>] constant[0]] begin[:] variable[mu] assign[=] call[name[self]._max_form, parameter[name[f], constant[max_profit]]] variable[indexes] assign[=] <ast.ListComp object at 0x7da20c76d120> variable[df] assign[=] call[call[name[f].stack, parameter[]].loc][name[indexes]] name[df].index.names assign[=] list[[<ast.Constant object at 0x7da2054a4220>, <ast.Constant object at 0x7da2054a4e80>]] variable[df] assign[=] call[name[df].reset_index, parameter[]] return[name[df]]
keyword[def] identifier[keep_form_with_max_profit] ( identifier[self] , identifier[forms] = keyword[None] ): literal[string] identifier[f] = identifier[self] . identifier[feasibility] keyword[if] identifier[forms] keyword[is] keyword[not] keyword[None] : identifier[f] = identifier[f] [ identifier[forms] ] keyword[if] identifier[len] ( identifier[f] )> literal[int] : identifier[mu] = identifier[self] . identifier[_max_form] ( identifier[f] , literal[string] ) identifier[indexes] =[ identifier[tuple] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[mu] . identifier[reset_index] (). identifier[values] ] keyword[else] : identifier[indexes] =[] identifier[df] = identifier[f] . identifier[stack] ( identifier[level] = literal[int] ). identifier[loc] [ identifier[indexes] ] identifier[df] . identifier[index] . identifier[names] =[ literal[string] , literal[string] ] identifier[df] = identifier[df] . identifier[reset_index] ( identifier[level] = literal[int] ) keyword[return] identifier[df]
def keep_form_with_max_profit(self, forms=None): """ This converts the dataframe, which shows all profitable forms, to the form with the greatest profit, so that more profitable forms outcompete less profitable forms. Parameters ---------- forms: list of strings List of forms which compete which other. Can leave some out. Returns ------- Nothing. Goes from a multi-index to a single index with only the most profitable form. """ f = self.feasibility if forms is not None: f = f[forms] # depends on [control=['if'], data=['forms']] if len(f) > 0: mu = self._max_form(f, 'max_profit') indexes = [tuple(x) for x in mu.reset_index().values] # depends on [control=['if'], data=[]] else: indexes = [] df = f.stack(level=0).loc[indexes] df.index.names = ['parcel_id', 'form'] df = df.reset_index(level=1) return df
def p_exprs(p): """ exprs : expr | exprs COMMA expr """ if len(p) == 2: p[0] = node.expr_list([p[1]]) elif len(p) == 4: p[0] = p[1] p[0].append(p[3]) else: assert (0) assert isinstance(p[0], node.expr_list)
def function[p_exprs, parameter[p]]: constant[ exprs : expr | exprs COMMA expr ] if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:] call[name[p]][constant[0]] assign[=] call[name[node].expr_list, parameter[list[[<ast.Subscript object at 0x7da1b1c7db10>]]]] assert[call[name[isinstance], parameter[call[name[p]][constant[0]], name[node].expr_list]]]
keyword[def] identifier[p_exprs] ( identifier[p] ): literal[string] keyword[if] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[node] . identifier[expr_list] ([ identifier[p] [ literal[int] ]]) keyword[elif] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ] identifier[p] [ literal[int] ]. identifier[append] ( identifier[p] [ literal[int] ]) keyword[else] : keyword[assert] ( literal[int] ) keyword[assert] identifier[isinstance] ( identifier[p] [ literal[int] ], identifier[node] . identifier[expr_list] )
def p_exprs(p): """ exprs : expr | exprs COMMA expr """ if len(p) == 2: p[0] = node.expr_list([p[1]]) # depends on [control=['if'], data=[]] elif len(p) == 4: p[0] = p[1] p[0].append(p[3]) # depends on [control=['if'], data=[]] else: assert 0 assert isinstance(p[0], node.expr_list)
def iterator(self): """ If search has occurred and no ordering has occurred, decorate each result with the number of search terms so that it can be sorted by the number of occurrence of terms. In the case of search fields that span model relationships, we cannot accurately match occurrences without some very complicated traversal code, which we won't attempt. So in this case, namely when there are no matches for a result (count=0), and search fields contain relationships (double underscores), we assume one match for one of the fields, and use the average weight of all search fields with relationships. """ results = super(SearchableQuerySet, self).iterator() if self._search_terms and not self._search_ordered: results = list(results) for i, result in enumerate(results): count = 0 related_weights = [] for (field, weight) in self._search_fields.items(): if "__" in field: related_weights.append(weight) for term in self._search_terms: field_value = getattr(result, field, None) if field_value: count += field_value.lower().count(term) * weight if not count and related_weights: count = int(sum(related_weights) / len(related_weights)) results[i].result_count = count return iter(results) return results
def function[iterator, parameter[self]]: constant[ If search has occurred and no ordering has occurred, decorate each result with the number of search terms so that it can be sorted by the number of occurrence of terms. In the case of search fields that span model relationships, we cannot accurately match occurrences without some very complicated traversal code, which we won't attempt. So in this case, namely when there are no matches for a result (count=0), and search fields contain relationships (double underscores), we assume one match for one of the fields, and use the average weight of all search fields with relationships. ] variable[results] assign[=] call[call[name[super], parameter[name[SearchableQuerySet], name[self]]].iterator, parameter[]] if <ast.BoolOp object at 0x7da204344430> begin[:] variable[results] assign[=] call[name[list], parameter[name[results]]] for taget[tuple[[<ast.Name object at 0x7da2043449d0>, <ast.Name object at 0x7da2043471c0>]]] in starred[call[name[enumerate], parameter[name[results]]]] begin[:] variable[count] assign[=] constant[0] variable[related_weights] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2043469e0>, <ast.Name object at 0x7da2043460e0>]]] in starred[call[name[self]._search_fields.items, parameter[]]] begin[:] if compare[constant[__] in name[field]] begin[:] call[name[related_weights].append, parameter[name[weight]]] for taget[name[term]] in starred[name[self]._search_terms] begin[:] variable[field_value] assign[=] call[name[getattr], parameter[name[result], name[field], constant[None]]] if name[field_value] begin[:] <ast.AugAssign object at 0x7da2044c0730> if <ast.BoolOp object at 0x7da2044c33a0> begin[:] variable[count] assign[=] call[name[int], parameter[binary_operation[call[name[sum], parameter[name[related_weights]]] / call[name[len], parameter[name[related_weights]]]]]] call[name[results]][name[i]].result_count assign[=] name[count] return[call[name[iter], parameter[name[results]]]] return[name[results]]
keyword[def] identifier[iterator] ( identifier[self] ): literal[string] identifier[results] = identifier[super] ( identifier[SearchableQuerySet] , identifier[self] ). identifier[iterator] () keyword[if] identifier[self] . identifier[_search_terms] keyword[and] keyword[not] identifier[self] . identifier[_search_ordered] : identifier[results] = identifier[list] ( identifier[results] ) keyword[for] identifier[i] , identifier[result] keyword[in] identifier[enumerate] ( identifier[results] ): identifier[count] = literal[int] identifier[related_weights] =[] keyword[for] ( identifier[field] , identifier[weight] ) keyword[in] identifier[self] . identifier[_search_fields] . identifier[items] (): keyword[if] literal[string] keyword[in] identifier[field] : identifier[related_weights] . identifier[append] ( identifier[weight] ) keyword[for] identifier[term] keyword[in] identifier[self] . identifier[_search_terms] : identifier[field_value] = identifier[getattr] ( identifier[result] , identifier[field] , keyword[None] ) keyword[if] identifier[field_value] : identifier[count] += identifier[field_value] . identifier[lower] (). identifier[count] ( identifier[term] )* identifier[weight] keyword[if] keyword[not] identifier[count] keyword[and] identifier[related_weights] : identifier[count] = identifier[int] ( identifier[sum] ( identifier[related_weights] )/ identifier[len] ( identifier[related_weights] )) identifier[results] [ identifier[i] ]. identifier[result_count] = identifier[count] keyword[return] identifier[iter] ( identifier[results] ) keyword[return] identifier[results]
def iterator(self): """ If search has occurred and no ordering has occurred, decorate each result with the number of search terms so that it can be sorted by the number of occurrence of terms. In the case of search fields that span model relationships, we cannot accurately match occurrences without some very complicated traversal code, which we won't attempt. So in this case, namely when there are no matches for a result (count=0), and search fields contain relationships (double underscores), we assume one match for one of the fields, and use the average weight of all search fields with relationships. """ results = super(SearchableQuerySet, self).iterator() if self._search_terms and (not self._search_ordered): results = list(results) for (i, result) in enumerate(results): count = 0 related_weights = [] for (field, weight) in self._search_fields.items(): if '__' in field: related_weights.append(weight) # depends on [control=['if'], data=[]] for term in self._search_terms: field_value = getattr(result, field, None) if field_value: count += field_value.lower().count(term) * weight # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['term']] # depends on [control=['for'], data=[]] if not count and related_weights: count = int(sum(related_weights) / len(related_weights)) # depends on [control=['if'], data=[]] results[i].result_count = count # depends on [control=['for'], data=[]] return iter(results) # depends on [control=['if'], data=[]] return results
def is_clicked(self, MouseStateType): """ Did the user depress and release the button to signify a click? MouseStateType is the button to query. Values found under StateTypes.py """ return self.previous_mouse_state.query_state(MouseStateType) and ( not self.current_mouse_state.query_state(MouseStateType))
def function[is_clicked, parameter[self, MouseStateType]]: constant[ Did the user depress and release the button to signify a click? MouseStateType is the button to query. Values found under StateTypes.py ] return[<ast.BoolOp object at 0x7da18f722b00>]
keyword[def] identifier[is_clicked] ( identifier[self] , identifier[MouseStateType] ): literal[string] keyword[return] identifier[self] . identifier[previous_mouse_state] . identifier[query_state] ( identifier[MouseStateType] ) keyword[and] ( keyword[not] identifier[self] . identifier[current_mouse_state] . identifier[query_state] ( identifier[MouseStateType] ))
def is_clicked(self, MouseStateType): """ Did the user depress and release the button to signify a click? MouseStateType is the button to query. Values found under StateTypes.py """ return self.previous_mouse_state.query_state(MouseStateType) and (not self.current_mouse_state.query_state(MouseStateType))
def get_file_mode_for_writing(context): """Get file mode for writing from tar['format']. This should return w:, w:gz, w:bz2 or w:xz. If user specified something wacky in tar.Format, that's their business. """ format = context['tar'].get('format', None) # slightly weird double-check because falsy format could mean either format # doesn't exist in input, OR that it exists and is empty. Exists-but-empty # has special meaning - default to no compression. if format or format == '': mode = f"w:{context.get_formatted_string(format)}" else: mode = 'w:xz' return mode
def function[get_file_mode_for_writing, parameter[context]]: constant[Get file mode for writing from tar['format']. This should return w:, w:gz, w:bz2 or w:xz. If user specified something wacky in tar.Format, that's their business. ] variable[format] assign[=] call[call[name[context]][constant[tar]].get, parameter[constant[format], constant[None]]] if <ast.BoolOp object at 0x7da20c9934f0> begin[:] variable[mode] assign[=] <ast.JoinedStr object at 0x7da20c990a30> return[name[mode]]
keyword[def] identifier[get_file_mode_for_writing] ( identifier[context] ): literal[string] identifier[format] = identifier[context] [ literal[string] ]. identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[format] keyword[or] identifier[format] == literal[string] : identifier[mode] = literal[string] keyword[else] : identifier[mode] = literal[string] keyword[return] identifier[mode]
def get_file_mode_for_writing(context): """Get file mode for writing from tar['format']. This should return w:, w:gz, w:bz2 or w:xz. If user specified something wacky in tar.Format, that's their business. """ format = context['tar'].get('format', None) # slightly weird double-check because falsy format could mean either format # doesn't exist in input, OR that it exists and is empty. Exists-but-empty # has special meaning - default to no compression. if format or format == '': mode = f'w:{context.get_formatted_string(format)}' # depends on [control=['if'], data=[]] else: mode = 'w:xz' return mode
def _verify_cert(self, sock: ssl.SSLSocket): '''Check if certificate matches hostname.''' # Based on tornado.iostream.SSLIOStream # Needed for older OpenSSL (<0.9.8f) versions verify_mode = self._ssl_context.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL), \ 'Unknown verify mode {}'.format(verify_mode) if verify_mode == ssl.CERT_NONE: return cert = sock.getpeercert() if not cert and verify_mode == ssl.CERT_OPTIONAL: return if not cert: raise SSLVerificationError('No SSL certificate given') try: ssl.match_hostname(cert, self._hostname) except ssl.CertificateError as error: raise SSLVerificationError('Invalid SSL certificate') from error
def function[_verify_cert, parameter[self, sock]]: constant[Check if certificate matches hostname.] variable[verify_mode] assign[=] name[self]._ssl_context.verify_mode assert[compare[name[verify_mode] in tuple[[<ast.Attribute object at 0x7da20eb294b0>, <ast.Attribute object at 0x7da2041d9840>, <ast.Attribute object at 0x7da2041d95d0>]]]] if compare[name[verify_mode] equal[==] name[ssl].CERT_NONE] begin[:] return[None] variable[cert] assign[=] call[name[sock].getpeercert, parameter[]] if <ast.BoolOp object at 0x7da2041d9cc0> begin[:] return[None] if <ast.UnaryOp object at 0x7da2041d8460> begin[:] <ast.Raise object at 0x7da2041d8a90> <ast.Try object at 0x7da2041dbfa0>
keyword[def] identifier[_verify_cert] ( identifier[self] , identifier[sock] : identifier[ssl] . identifier[SSLSocket] ): literal[string] identifier[verify_mode] = identifier[self] . identifier[_ssl_context] . identifier[verify_mode] keyword[assert] identifier[verify_mode] keyword[in] ( identifier[ssl] . identifier[CERT_NONE] , identifier[ssl] . identifier[CERT_REQUIRED] , identifier[ssl] . identifier[CERT_OPTIONAL] ), literal[string] . identifier[format] ( identifier[verify_mode] ) keyword[if] identifier[verify_mode] == identifier[ssl] . identifier[CERT_NONE] : keyword[return] identifier[cert] = identifier[sock] . identifier[getpeercert] () keyword[if] keyword[not] identifier[cert] keyword[and] identifier[verify_mode] == identifier[ssl] . identifier[CERT_OPTIONAL] : keyword[return] keyword[if] keyword[not] identifier[cert] : keyword[raise] identifier[SSLVerificationError] ( literal[string] ) keyword[try] : identifier[ssl] . identifier[match_hostname] ( identifier[cert] , identifier[self] . identifier[_hostname] ) keyword[except] identifier[ssl] . identifier[CertificateError] keyword[as] identifier[error] : keyword[raise] identifier[SSLVerificationError] ( literal[string] ) keyword[from] identifier[error]
def _verify_cert(self, sock: ssl.SSLSocket): """Check if certificate matches hostname.""" # Based on tornado.iostream.SSLIOStream # Needed for older OpenSSL (<0.9.8f) versions verify_mode = self._ssl_context.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL), 'Unknown verify mode {}'.format(verify_mode) if verify_mode == ssl.CERT_NONE: return # depends on [control=['if'], data=[]] cert = sock.getpeercert() if not cert and verify_mode == ssl.CERT_OPTIONAL: return # depends on [control=['if'], data=[]] if not cert: raise SSLVerificationError('No SSL certificate given') # depends on [control=['if'], data=[]] try: ssl.match_hostname(cert, self._hostname) # depends on [control=['try'], data=[]] except ssl.CertificateError as error: raise SSLVerificationError('Invalid SSL certificate') from error # depends on [control=['except'], data=['error']]
def deny(cls, action, **kwargs): """Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance. """ return cls.create(action, exclude=True, **kwargs)
def function[deny, parameter[cls, action]]: constant[Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance. ] return[call[name[cls].create, parameter[name[action]]]]
keyword[def] identifier[deny] ( identifier[cls] , identifier[action] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[cls] . identifier[create] ( identifier[action] , identifier[exclude] = keyword[True] ,** identifier[kwargs] )
def deny(cls, action, **kwargs): """Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance. """ return cls.create(action, exclude=True, **kwargs)
def get_kvlayer_stream_item_by_doc_id(client, doc_id): '''Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`. Namely, it returns an iterator over all documents with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of :class:`streamcorpus.StreamItem` ''' if client is None: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) doc_id_range = make_doc_id_range(doc_id) for k, v in client.scan(STREAM_ITEMS_TABLE, doc_id_range): if v is not None: errors, bytestr = streamcorpus.decrypt_and_uncompress(v) yield streamcorpus.deserialize(bytestr)
def function[get_kvlayer_stream_item_by_doc_id, parameter[client, doc_id]]: constant[Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`. Namely, it returns an iterator over all documents with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of :class:`streamcorpus.StreamItem` ] if compare[name[client] is constant[None]] begin[:] variable[client] assign[=] call[name[kvlayer].client, parameter[]] call[name[client].setup_namespace, parameter[name[STREAM_ITEM_TABLE_DEFS], name[STREAM_ITEM_VALUE_DEFS]]] variable[doc_id_range] assign[=] call[name[make_doc_id_range], parameter[name[doc_id]]] for taget[tuple[[<ast.Name object at 0x7da20cabece0>, <ast.Name object at 0x7da20cabcfd0>]]] in starred[call[name[client].scan, parameter[name[STREAM_ITEMS_TABLE], name[doc_id_range]]]] begin[:] if compare[name[v] is_not constant[None]] begin[:] <ast.Tuple object at 0x7da20cabfcd0> assign[=] call[name[streamcorpus].decrypt_and_uncompress, parameter[name[v]]] <ast.Yield object at 0x7da20cabf460>
keyword[def] identifier[get_kvlayer_stream_item_by_doc_id] ( identifier[client] , identifier[doc_id] ): literal[string] keyword[if] identifier[client] keyword[is] keyword[None] : identifier[client] = identifier[kvlayer] . identifier[client] () identifier[client] . identifier[setup_namespace] ( identifier[STREAM_ITEM_TABLE_DEFS] , identifier[STREAM_ITEM_VALUE_DEFS] ) identifier[doc_id_range] = identifier[make_doc_id_range] ( identifier[doc_id] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[client] . identifier[scan] ( identifier[STREAM_ITEMS_TABLE] , identifier[doc_id_range] ): keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] : identifier[errors] , identifier[bytestr] = identifier[streamcorpus] . identifier[decrypt_and_uncompress] ( identifier[v] ) keyword[yield] identifier[streamcorpus] . identifier[deserialize] ( identifier[bytestr] )
def get_kvlayer_stream_item_by_doc_id(client, doc_id): """Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`. Namely, it returns an iterator over all documents with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of :class:`streamcorpus.StreamItem` """ if client is None: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) # depends on [control=['if'], data=['client']] doc_id_range = make_doc_id_range(doc_id) for (k, v) in client.scan(STREAM_ITEMS_TABLE, doc_id_range): if v is not None: (errors, bytestr) = streamcorpus.decrypt_and_uncompress(v) yield streamcorpus.deserialize(bytestr) # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=[]]
def attach_binary(self, content, filename): """ Attaches given binary data. :param bytes content: Binary data to be attached. :param str filename: :return: None. """ content_type = guess_content_type(filename) payload = {"Name": filename, "Content": b64encode(content).decode("utf-8"), "ContentType": content_type} self.attach(payload)
def function[attach_binary, parameter[self, content, filename]]: constant[ Attaches given binary data. :param bytes content: Binary data to be attached. :param str filename: :return: None. ] variable[content_type] assign[=] call[name[guess_content_type], parameter[name[filename]]] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f43ca0>, <ast.Constant object at 0x7da1b0f43250>, <ast.Constant object at 0x7da1b0f42140>], [<ast.Name object at 0x7da1b0f432b0>, <ast.Call object at 0x7da1b0f43130>, <ast.Name object at 0x7da1b0f437f0>]] call[name[self].attach, parameter[name[payload]]]
keyword[def] identifier[attach_binary] ( identifier[self] , identifier[content] , identifier[filename] ): literal[string] identifier[content_type] = identifier[guess_content_type] ( identifier[filename] ) identifier[payload] ={ literal[string] : identifier[filename] , literal[string] : identifier[b64encode] ( identifier[content] ). identifier[decode] ( literal[string] ), literal[string] : identifier[content_type] } identifier[self] . identifier[attach] ( identifier[payload] )
def attach_binary(self, content, filename): """ Attaches given binary data. :param bytes content: Binary data to be attached. :param str filename: :return: None. """ content_type = guess_content_type(filename) payload = {'Name': filename, 'Content': b64encode(content).decode('utf-8'), 'ContentType': content_type} self.attach(payload)
def new_table_graphicFrame(cls, id_, name, rows, cols, x, y, cx, cy): """ Return a ``<p:graphicFrame>`` element tree populated with a table element. """ graphicFrame = cls.new_graphicFrame(id_, name, x, y, cx, cy) graphicFrame.graphic.graphicData.uri = GRAPHIC_DATA_URI_TABLE graphicFrame.graphic.graphicData.append( CT_Table.new_tbl(rows, cols, cx, cy) ) return graphicFrame
def function[new_table_graphicFrame, parameter[cls, id_, name, rows, cols, x, y, cx, cy]]: constant[ Return a ``<p:graphicFrame>`` element tree populated with a table element. ] variable[graphicFrame] assign[=] call[name[cls].new_graphicFrame, parameter[name[id_], name[name], name[x], name[y], name[cx], name[cy]]] name[graphicFrame].graphic.graphicData.uri assign[=] name[GRAPHIC_DATA_URI_TABLE] call[name[graphicFrame].graphic.graphicData.append, parameter[call[name[CT_Table].new_tbl, parameter[name[rows], name[cols], name[cx], name[cy]]]]] return[name[graphicFrame]]
keyword[def] identifier[new_table_graphicFrame] ( identifier[cls] , identifier[id_] , identifier[name] , identifier[rows] , identifier[cols] , identifier[x] , identifier[y] , identifier[cx] , identifier[cy] ): literal[string] identifier[graphicFrame] = identifier[cls] . identifier[new_graphicFrame] ( identifier[id_] , identifier[name] , identifier[x] , identifier[y] , identifier[cx] , identifier[cy] ) identifier[graphicFrame] . identifier[graphic] . identifier[graphicData] . identifier[uri] = identifier[GRAPHIC_DATA_URI_TABLE] identifier[graphicFrame] . identifier[graphic] . identifier[graphicData] . identifier[append] ( identifier[CT_Table] . identifier[new_tbl] ( identifier[rows] , identifier[cols] , identifier[cx] , identifier[cy] ) ) keyword[return] identifier[graphicFrame]
def new_table_graphicFrame(cls, id_, name, rows, cols, x, y, cx, cy): """ Return a ``<p:graphicFrame>`` element tree populated with a table element. """ graphicFrame = cls.new_graphicFrame(id_, name, x, y, cx, cy) graphicFrame.graphic.graphicData.uri = GRAPHIC_DATA_URI_TABLE graphicFrame.graphic.graphicData.append(CT_Table.new_tbl(rows, cols, cx, cy)) return graphicFrame
def parse_device(lines): """Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). """ name, status_line, device = parse_device_header(lines.pop(0)) # There are edge cases when the device list is empty and the status line is # merged with the header line, in those cases, the status line is returned # from parse_device_header(), the rest of the time, it's the next line. if not status_line: status_line = lines.pop(0) status = parse_device_status(status_line, device["personality"]) bitmap = None resync = None for line in lines: if line.startswith(" bitmap:"): bitmap = parse_device_bitmap(line) elif line.startswith(" ["): resync = parse_device_resync_progress(line) elif line.startswith(" \tresync="): resync = parse_device_resync_standby(line) else: raise NotImplementedError("unknown device line: {0}".format(line)) device.update({ "status": status, "bitmap": bitmap, "resync": resync, }) return (name, device)
def function[parse_device, parameter[lines]]: constant[Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). ] <ast.Tuple object at 0x7da18f00dde0> assign[=] call[name[parse_device_header], parameter[call[name[lines].pop, parameter[constant[0]]]]] if <ast.UnaryOp object at 0x7da18f00e050> begin[:] variable[status_line] assign[=] call[name[lines].pop, parameter[constant[0]]] variable[status] assign[=] call[name[parse_device_status], parameter[name[status_line], call[name[device]][constant[personality]]]] variable[bitmap] assign[=] constant[None] variable[resync] assign[=] constant[None] for taget[name[line]] in starred[name[lines]] begin[:] if call[name[line].startswith, parameter[constant[ bitmap:]]] begin[:] variable[bitmap] assign[=] call[name[parse_device_bitmap], parameter[name[line]]] call[name[device].update, parameter[dictionary[[<ast.Constant object at 0x7da18f00f160>, <ast.Constant object at 0x7da18f00d6c0>, <ast.Constant object at 0x7da18f00d3f0>], [<ast.Name object at 0x7da18f00cca0>, <ast.Name object at 0x7da18f00d750>, <ast.Name object at 0x7da18f00db70>]]]] return[tuple[[<ast.Name object at 0x7da18f00e9e0>, <ast.Name object at 0x7da18f00fc10>]]]
keyword[def] identifier[parse_device] ( identifier[lines] ): literal[string] identifier[name] , identifier[status_line] , identifier[device] = identifier[parse_device_header] ( identifier[lines] . identifier[pop] ( literal[int] )) keyword[if] keyword[not] identifier[status_line] : identifier[status_line] = identifier[lines] . identifier[pop] ( literal[int] ) identifier[status] = identifier[parse_device_status] ( identifier[status_line] , identifier[device] [ literal[string] ]) identifier[bitmap] = keyword[None] identifier[resync] = keyword[None] keyword[for] identifier[line] keyword[in] identifier[lines] : keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[bitmap] = identifier[parse_device_bitmap] ( identifier[line] ) keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ): identifier[resync] = identifier[parse_device_resync_progress] ( identifier[line] ) keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ): identifier[resync] = identifier[parse_device_resync_standby] ( identifier[line] ) keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[line] )) identifier[device] . identifier[update] ({ literal[string] : identifier[status] , literal[string] : identifier[bitmap] , literal[string] : identifier[resync] , }) keyword[return] ( identifier[name] , identifier[device] )
def parse_device(lines): """Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). """ (name, status_line, device) = parse_device_header(lines.pop(0)) # There are edge cases when the device list is empty and the status line is # merged with the header line, in those cases, the status line is returned # from parse_device_header(), the rest of the time, it's the next line. if not status_line: status_line = lines.pop(0) # depends on [control=['if'], data=[]] status = parse_device_status(status_line, device['personality']) bitmap = None resync = None for line in lines: if line.startswith(' bitmap:'): bitmap = parse_device_bitmap(line) # depends on [control=['if'], data=[]] elif line.startswith(' ['): resync = parse_device_resync_progress(line) # depends on [control=['if'], data=[]] elif line.startswith(' \tresync='): resync = parse_device_resync_standby(line) # depends on [control=['if'], data=[]] else: raise NotImplementedError('unknown device line: {0}'.format(line)) # depends on [control=['for'], data=['line']] device.update({'status': status, 'bitmap': bitmap, 'resync': resync}) return (name, device)
def catch_result(task_func): """Catch printed result from Celery Task and return it in task response """ @functools.wraps(task_func, assigned=available_attrs(task_func)) def dec(*args, **kwargs): # inicialize orig_stdout = sys.stdout sys.stdout = content = StringIO() task_response = task_func(*args, **kwargs) # catch sys.stdout = orig_stdout content.seek(0) # propagate to the response task_response['stdout'] = content.read() return task_response return dec
def function[catch_result, parameter[task_func]]: constant[Catch printed result from Celery Task and return it in task response ] def function[dec, parameter[]]: variable[orig_stdout] assign[=] name[sys].stdout name[sys].stdout assign[=] call[name[StringIO], parameter[]] variable[task_response] assign[=] call[name[task_func], parameter[<ast.Starred object at 0x7da1b0f58370>]] name[sys].stdout assign[=] name[orig_stdout] call[name[content].seek, parameter[constant[0]]] call[name[task_response]][constant[stdout]] assign[=] call[name[content].read, parameter[]] return[name[task_response]] return[name[dec]]
keyword[def] identifier[catch_result] ( identifier[task_func] ): literal[string] @ identifier[functools] . identifier[wraps] ( identifier[task_func] , identifier[assigned] = identifier[available_attrs] ( identifier[task_func] )) keyword[def] identifier[dec] (* identifier[args] ,** identifier[kwargs] ): identifier[orig_stdout] = identifier[sys] . identifier[stdout] identifier[sys] . identifier[stdout] = identifier[content] = identifier[StringIO] () identifier[task_response] = identifier[task_func] (* identifier[args] ,** identifier[kwargs] ) identifier[sys] . identifier[stdout] = identifier[orig_stdout] identifier[content] . identifier[seek] ( literal[int] ) identifier[task_response] [ literal[string] ]= identifier[content] . identifier[read] () keyword[return] identifier[task_response] keyword[return] identifier[dec]
def catch_result(task_func): """Catch printed result from Celery Task and return it in task response """ @functools.wraps(task_func, assigned=available_attrs(task_func)) def dec(*args, **kwargs): # inicialize orig_stdout = sys.stdout sys.stdout = content = StringIO() task_response = task_func(*args, **kwargs) # catch sys.stdout = orig_stdout content.seek(0) # propagate to the response task_response['stdout'] = content.read() return task_response return dec
def _direct_render(self, name, attrs): """Render the widget the old way - using field_template or output_format.""" context = { 'image': self.image_url(), 'name': name, 'key': self._key, 'id': u'%s_%s' % (self.id_prefix, attrs.get('id')) if self.id_prefix else attrs.get('id'), 'audio': self.audio_url(), } self.image_and_audio = render_to_string(settings.CAPTCHA_IMAGE_TEMPLATE, context) self.hidden_field = render_to_string(settings.CAPTCHA_HIDDEN_FIELD_TEMPLATE, context) self.text_field = render_to_string(settings.CAPTCHA_TEXT_FIELD_TEMPLATE, context) return self.format_output(None)
def function[_direct_render, parameter[self, name, attrs]]: constant[Render the widget the old way - using field_template or output_format.] variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da2041db0a0>, <ast.Constant object at 0x7da2041da620>, <ast.Constant object at 0x7da2041dba60>, <ast.Constant object at 0x7da2041d9000>, <ast.Constant object at 0x7da2041dbdf0>], [<ast.Call object at 0x7da2041d9270>, <ast.Name object at 0x7da2041d9ae0>, <ast.Attribute object at 0x7da2041d8460>, <ast.IfExp object at 0x7da2041d9180>, <ast.Call object at 0x7da18bc709d0>]] name[self].image_and_audio assign[=] call[name[render_to_string], parameter[name[settings].CAPTCHA_IMAGE_TEMPLATE, name[context]]] name[self].hidden_field assign[=] call[name[render_to_string], parameter[name[settings].CAPTCHA_HIDDEN_FIELD_TEMPLATE, name[context]]] name[self].text_field assign[=] call[name[render_to_string], parameter[name[settings].CAPTCHA_TEXT_FIELD_TEMPLATE, name[context]]] return[call[name[self].format_output, parameter[constant[None]]]]
keyword[def] identifier[_direct_render] ( identifier[self] , identifier[name] , identifier[attrs] ): literal[string] identifier[context] ={ literal[string] : identifier[self] . identifier[image_url] (), literal[string] : identifier[name] , literal[string] : identifier[self] . identifier[_key] , literal[string] : literal[string] %( identifier[self] . identifier[id_prefix] , identifier[attrs] . identifier[get] ( literal[string] )) keyword[if] identifier[self] . identifier[id_prefix] keyword[else] identifier[attrs] . identifier[get] ( literal[string] ), literal[string] : identifier[self] . identifier[audio_url] (), } identifier[self] . identifier[image_and_audio] = identifier[render_to_string] ( identifier[settings] . identifier[CAPTCHA_IMAGE_TEMPLATE] , identifier[context] ) identifier[self] . identifier[hidden_field] = identifier[render_to_string] ( identifier[settings] . identifier[CAPTCHA_HIDDEN_FIELD_TEMPLATE] , identifier[context] ) identifier[self] . identifier[text_field] = identifier[render_to_string] ( identifier[settings] . identifier[CAPTCHA_TEXT_FIELD_TEMPLATE] , identifier[context] ) keyword[return] identifier[self] . identifier[format_output] ( keyword[None] )
def _direct_render(self, name, attrs): """Render the widget the old way - using field_template or output_format.""" context = {'image': self.image_url(), 'name': name, 'key': self._key, 'id': u'%s_%s' % (self.id_prefix, attrs.get('id')) if self.id_prefix else attrs.get('id'), 'audio': self.audio_url()} self.image_and_audio = render_to_string(settings.CAPTCHA_IMAGE_TEMPLATE, context) self.hidden_field = render_to_string(settings.CAPTCHA_HIDDEN_FIELD_TEMPLATE, context) self.text_field = render_to_string(settings.CAPTCHA_TEXT_FIELD_TEMPLATE, context) return self.format_output(None)
def post(self, request, *args, **kwargs): """ Handles POST requests. """ self.init_attachment_cache() # Stores a boolean indicating if we are considering a preview self.preview = 'preview' in self.request.POST # Initializes the forms post_form_class = self.get_post_form_class() post_form = self.get_post_form(post_form_class) attachment_formset_class = self.get_attachment_formset_class() attachment_formset = self.get_attachment_formset(attachment_formset_class) poll_option_formset_class = self.get_poll_option_formset_class() poll_option_formset = self.get_poll_option_formset(poll_option_formset_class) post_form_valid = post_form.is_valid() attachment_formset_valid = ( attachment_formset.is_valid() if attachment_formset else None ) poll_option_formset_valid = ( poll_option_formset.is_valid() if poll_option_formset and len(post_form.cleaned_data['poll_question']) else None ) self.attachment_preview = self.preview if attachment_formset_valid else None self.poll_preview = self.preview if poll_option_formset_valid else None poll_options_validated = poll_option_formset_valid is not None if ( post_form_valid and attachment_formset_valid is not False and poll_option_formset_valid is not False ): return self.form_valid( post_form, attachment_formset, poll_option_formset, poll_options_validated=poll_options_validated, ) else: return self.form_invalid( post_form, attachment_formset, poll_option_formset, poll_options_validated=poll_options_validated, )
def function[post, parameter[self, request]]: constant[ Handles POST requests. ] call[name[self].init_attachment_cache, parameter[]] name[self].preview assign[=] compare[constant[preview] in name[self].request.POST] variable[post_form_class] assign[=] call[name[self].get_post_form_class, parameter[]] variable[post_form] assign[=] call[name[self].get_post_form, parameter[name[post_form_class]]] variable[attachment_formset_class] assign[=] call[name[self].get_attachment_formset_class, parameter[]] variable[attachment_formset] assign[=] call[name[self].get_attachment_formset, parameter[name[attachment_formset_class]]] variable[poll_option_formset_class] assign[=] call[name[self].get_poll_option_formset_class, parameter[]] variable[poll_option_formset] assign[=] call[name[self].get_poll_option_formset, parameter[name[poll_option_formset_class]]] variable[post_form_valid] assign[=] call[name[post_form].is_valid, parameter[]] variable[attachment_formset_valid] assign[=] <ast.IfExp object at 0x7da18f810e80> variable[poll_option_formset_valid] assign[=] <ast.IfExp object at 0x7da18f8133d0> name[self].attachment_preview assign[=] <ast.IfExp object at 0x7da20c7c9450> name[self].poll_preview assign[=] <ast.IfExp object at 0x7da20c7caa70> variable[poll_options_validated] assign[=] compare[name[poll_option_formset_valid] is_not constant[None]] if <ast.BoolOp object at 0x7da20c7ca680> begin[:] return[call[name[self].form_valid, parameter[name[post_form], name[attachment_formset], name[poll_option_formset]]]]
keyword[def] identifier[post] ( identifier[self] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[init_attachment_cache] () identifier[self] . identifier[preview] = literal[string] keyword[in] identifier[self] . identifier[request] . identifier[POST] identifier[post_form_class] = identifier[self] . identifier[get_post_form_class] () identifier[post_form] = identifier[self] . identifier[get_post_form] ( identifier[post_form_class] ) identifier[attachment_formset_class] = identifier[self] . identifier[get_attachment_formset_class] () identifier[attachment_formset] = identifier[self] . identifier[get_attachment_formset] ( identifier[attachment_formset_class] ) identifier[poll_option_formset_class] = identifier[self] . identifier[get_poll_option_formset_class] () identifier[poll_option_formset] = identifier[self] . identifier[get_poll_option_formset] ( identifier[poll_option_formset_class] ) identifier[post_form_valid] = identifier[post_form] . identifier[is_valid] () identifier[attachment_formset_valid] =( identifier[attachment_formset] . identifier[is_valid] () keyword[if] identifier[attachment_formset] keyword[else] keyword[None] ) identifier[poll_option_formset_valid] =( identifier[poll_option_formset] . identifier[is_valid] () keyword[if] identifier[poll_option_formset] keyword[and] identifier[len] ( identifier[post_form] . identifier[cleaned_data] [ literal[string] ]) keyword[else] keyword[None] ) identifier[self] . identifier[attachment_preview] = identifier[self] . identifier[preview] keyword[if] identifier[attachment_formset_valid] keyword[else] keyword[None] identifier[self] . identifier[poll_preview] = identifier[self] . identifier[preview] keyword[if] identifier[poll_option_formset_valid] keyword[else] keyword[None] identifier[poll_options_validated] = identifier[poll_option_formset_valid] keyword[is] keyword[not] keyword[None] keyword[if] ( identifier[post_form_valid] keyword[and] identifier[attachment_formset_valid] keyword[is] keyword[not] keyword[False] keyword[and] identifier[poll_option_formset_valid] keyword[is] keyword[not] keyword[False] ): keyword[return] identifier[self] . identifier[form_valid] ( identifier[post_form] , identifier[attachment_formset] , identifier[poll_option_formset] , identifier[poll_options_validated] = identifier[poll_options_validated] , ) keyword[else] : keyword[return] identifier[self] . identifier[form_invalid] ( identifier[post_form] , identifier[attachment_formset] , identifier[poll_option_formset] , identifier[poll_options_validated] = identifier[poll_options_validated] , )
def post(self, request, *args, **kwargs): """ Handles POST requests. """ self.init_attachment_cache() # Stores a boolean indicating if we are considering a preview self.preview = 'preview' in self.request.POST # Initializes the forms post_form_class = self.get_post_form_class() post_form = self.get_post_form(post_form_class) attachment_formset_class = self.get_attachment_formset_class() attachment_formset = self.get_attachment_formset(attachment_formset_class) poll_option_formset_class = self.get_poll_option_formset_class() poll_option_formset = self.get_poll_option_formset(poll_option_formset_class) post_form_valid = post_form.is_valid() attachment_formset_valid = attachment_formset.is_valid() if attachment_formset else None poll_option_formset_valid = poll_option_formset.is_valid() if poll_option_formset and len(post_form.cleaned_data['poll_question']) else None self.attachment_preview = self.preview if attachment_formset_valid else None self.poll_preview = self.preview if poll_option_formset_valid else None poll_options_validated = poll_option_formset_valid is not None if post_form_valid and attachment_formset_valid is not False and (poll_option_formset_valid is not False): return self.form_valid(post_form, attachment_formset, poll_option_formset, poll_options_validated=poll_options_validated) # depends on [control=['if'], data=[]] else: return self.form_invalid(post_form, attachment_formset, poll_option_formset, poll_options_validated=poll_options_validated)
def get_complexes(self): """Extract INDRA Complex Statements from the BioPAX model. This method searches for org.biopax.paxtools.model.level3.Complex objects which represent molecular complexes. It doesn't reuse BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith query since that retrieves pairs of complex members rather than the full complex. """ for obj in self.model.getObjects().toArray(): bpe = _cast_biopax_element(obj) if not _is_complex(bpe): continue ev = self._get_evidence(bpe) members = self._get_complex_members(bpe) if members is not None: if len(members) > 10: logger.debug('Skipping complex with more than 10 members.') continue complexes = _get_combinations(members) for c in complexes: self.statements.append(decode_obj(Complex(c, ev), encoding='utf-8'))
def function[get_complexes, parameter[self]]: constant[Extract INDRA Complex Statements from the BioPAX model. This method searches for org.biopax.paxtools.model.level3.Complex objects which represent molecular complexes. It doesn't reuse BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith query since that retrieves pairs of complex members rather than the full complex. ] for taget[name[obj]] in starred[call[call[name[self].model.getObjects, parameter[]].toArray, parameter[]]] begin[:] variable[bpe] assign[=] call[name[_cast_biopax_element], parameter[name[obj]]] if <ast.UnaryOp object at 0x7da18fe92170> begin[:] continue variable[ev] assign[=] call[name[self]._get_evidence, parameter[name[bpe]]] variable[members] assign[=] call[name[self]._get_complex_members, parameter[name[bpe]]] if compare[name[members] is_not constant[None]] begin[:] if compare[call[name[len], parameter[name[members]]] greater[>] constant[10]] begin[:] call[name[logger].debug, parameter[constant[Skipping complex with more than 10 members.]]] continue variable[complexes] assign[=] call[name[_get_combinations], parameter[name[members]]] for taget[name[c]] in starred[name[complexes]] begin[:] call[name[self].statements.append, parameter[call[name[decode_obj], parameter[call[name[Complex], parameter[name[c], name[ev]]]]]]]
keyword[def] identifier[get_complexes] ( identifier[self] ): literal[string] keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[model] . identifier[getObjects] (). identifier[toArray] (): identifier[bpe] = identifier[_cast_biopax_element] ( identifier[obj] ) keyword[if] keyword[not] identifier[_is_complex] ( identifier[bpe] ): keyword[continue] identifier[ev] = identifier[self] . identifier[_get_evidence] ( identifier[bpe] ) identifier[members] = identifier[self] . identifier[_get_complex_members] ( identifier[bpe] ) keyword[if] identifier[members] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[len] ( identifier[members] )> literal[int] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[continue] identifier[complexes] = identifier[_get_combinations] ( identifier[members] ) keyword[for] identifier[c] keyword[in] identifier[complexes] : identifier[self] . identifier[statements] . identifier[append] ( identifier[decode_obj] ( identifier[Complex] ( identifier[c] , identifier[ev] ), identifier[encoding] = literal[string] ))
def get_complexes(self): """Extract INDRA Complex Statements from the BioPAX model. This method searches for org.biopax.paxtools.model.level3.Complex objects which represent molecular complexes. It doesn't reuse BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith query since that retrieves pairs of complex members rather than the full complex. """ for obj in self.model.getObjects().toArray(): bpe = _cast_biopax_element(obj) if not _is_complex(bpe): continue # depends on [control=['if'], data=[]] ev = self._get_evidence(bpe) members = self._get_complex_members(bpe) if members is not None: if len(members) > 10: logger.debug('Skipping complex with more than 10 members.') continue # depends on [control=['if'], data=[]] complexes = _get_combinations(members) for c in complexes: self.statements.append(decode_obj(Complex(c, ev), encoding='utf-8')) # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=['members']] # depends on [control=['for'], data=['obj']]
def as_partition(self, **kwargs): """Return a PartitionName based on this name.""" return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items())))
def function[as_partition, parameter[self]]: constant[Return a PartitionName based on this name.] return[call[name[PartitionName], parameter[]]]
keyword[def] identifier[as_partition] ( identifier[self] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[PartitionName] (** identifier[dict] ( identifier[list] ( identifier[self] . identifier[dict] . identifier[items] ())+ identifier[list] ( identifier[kwargs] . identifier[items] ())))
def as_partition(self, **kwargs): """Return a PartitionName based on this name.""" return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items())))
def _get_slot(self): "Returns the next coordinates for a preview" x = y = 10 for k, p in self.previews.items(): y += p.height() + self.padding return x, y
def function[_get_slot, parameter[self]]: constant[Returns the next coordinates for a preview] variable[x] assign[=] constant[10] for taget[tuple[[<ast.Name object at 0x7da1b17ce4d0>, <ast.Name object at 0x7da1b17cddb0>]]] in starred[call[name[self].previews.items, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b17ccd60> return[tuple[[<ast.Name object at 0x7da204565030>, <ast.Name object at 0x7da204566c80>]]]
keyword[def] identifier[_get_slot] ( identifier[self] ): literal[string] identifier[x] = identifier[y] = literal[int] keyword[for] identifier[k] , identifier[p] keyword[in] identifier[self] . identifier[previews] . identifier[items] (): identifier[y] += identifier[p] . identifier[height] ()+ identifier[self] . identifier[padding] keyword[return] identifier[x] , identifier[y]
def _get_slot(self): """Returns the next coordinates for a preview""" x = y = 10 for (k, p) in self.previews.items(): y += p.height() + self.padding # depends on [control=['for'], data=[]] return (x, y)
def render(self, *args, **kwargs): """Renders the element and all his childrens.""" # args kwargs API provided for last minute content injection # self._reverse_mro_func('pre_render') pretty = kwargs.pop("pretty", False) if pretty and self._stable != "pretty": self._stable = False for arg in args: self._stable = False if isinstance(arg, dict): self.inject(arg) if kwargs: self._stable = False self.inject(kwargs) # If the tag or his contents are not changed and we already have rendered it # with the same attrs we skip all the work if self._stable and self._render: return self._render pretty_pre = pretty_inner = "" if pretty: pretty_pre = "\n" + ("\t" * self._depth) if pretty else "" pretty_inner = "\n" + ("\t" * self._depth) if len(self.childs) > 1 else "" inner = self.render_childs(pretty) if not self._void else "" # We declare the tag is stable and have an official render: tag_data = ( pretty_pre, self._get__tag(), self.render_attrs(), inner, pretty_inner, self._get__tag() )[: 6 - [0, 3][self._void]] self._render = self._template % tag_data self._stable = "pretty" if pretty else True return self._render
def function[render, parameter[self]]: constant[Renders the element and all his childrens.] variable[pretty] assign[=] call[name[kwargs].pop, parameter[constant[pretty], constant[False]]] if <ast.BoolOp object at 0x7da1b0f3a4d0> begin[:] name[self]._stable assign[=] constant[False] for taget[name[arg]] in starred[name[args]] begin[:] name[self]._stable assign[=] constant[False] if call[name[isinstance], parameter[name[arg], name[dict]]] begin[:] call[name[self].inject, parameter[name[arg]]] if name[kwargs] begin[:] name[self]._stable assign[=] constant[False] call[name[self].inject, parameter[name[kwargs]]] if <ast.BoolOp object at 0x7da1b0f3ac20> begin[:] return[name[self]._render] variable[pretty_pre] assign[=] constant[] if name[pretty] begin[:] variable[pretty_pre] assign[=] <ast.IfExp object at 0x7da1b0f3a980> variable[pretty_inner] assign[=] <ast.IfExp object at 0x7da1b0e72da0> variable[inner] assign[=] <ast.IfExp object at 0x7da1b0e73e20> variable[tag_data] assign[=] call[tuple[[<ast.Name object at 0x7da1b0e734f0>, <ast.Call object at 0x7da1b0e72f50>, <ast.Call object at 0x7da1b0e73d90>, <ast.Name object at 0x7da1b0e728f0>, <ast.Name object at 0x7da1b0e72c20>, <ast.Call object at 0x7da1b0e736d0>]]][<ast.Slice object at 0x7da1b0e73790>] name[self]._render assign[=] binary_operation[name[self]._template <ast.Mod object at 0x7da2590d6920> name[tag_data]] name[self]._stable assign[=] <ast.IfExp object at 0x7da1b0e72b30> return[name[self]._render]
keyword[def] identifier[render] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[pretty] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) keyword[if] identifier[pretty] keyword[and] identifier[self] . identifier[_stable] != literal[string] : identifier[self] . identifier[_stable] = keyword[False] keyword[for] identifier[arg] keyword[in] identifier[args] : identifier[self] . identifier[_stable] = keyword[False] keyword[if] identifier[isinstance] ( identifier[arg] , identifier[dict] ): identifier[self] . identifier[inject] ( identifier[arg] ) keyword[if] identifier[kwargs] : identifier[self] . identifier[_stable] = keyword[False] identifier[self] . identifier[inject] ( identifier[kwargs] ) keyword[if] identifier[self] . identifier[_stable] keyword[and] identifier[self] . identifier[_render] : keyword[return] identifier[self] . identifier[_render] identifier[pretty_pre] = identifier[pretty_inner] = literal[string] keyword[if] identifier[pretty] : identifier[pretty_pre] = literal[string] +( literal[string] * identifier[self] . identifier[_depth] ) keyword[if] identifier[pretty] keyword[else] literal[string] identifier[pretty_inner] = literal[string] +( literal[string] * identifier[self] . identifier[_depth] ) keyword[if] identifier[len] ( identifier[self] . identifier[childs] )> literal[int] keyword[else] literal[string] identifier[inner] = identifier[self] . identifier[render_childs] ( identifier[pretty] ) keyword[if] keyword[not] identifier[self] . identifier[_void] keyword[else] literal[string] identifier[tag_data] =( identifier[pretty_pre] , identifier[self] . identifier[_get__tag] (), identifier[self] . identifier[render_attrs] (), identifier[inner] , identifier[pretty_inner] , identifier[self] . identifier[_get__tag] () )[: literal[int] -[ literal[int] , literal[int] ][ identifier[self] . identifier[_void] ]] identifier[self] . identifier[_render] = identifier[self] . identifier[_template] % identifier[tag_data] identifier[self] . identifier[_stable] = literal[string] keyword[if] identifier[pretty] keyword[else] keyword[True] keyword[return] identifier[self] . identifier[_render]
def render(self, *args, **kwargs): """Renders the element and all his childrens.""" # args kwargs API provided for last minute content injection # self._reverse_mro_func('pre_render') pretty = kwargs.pop('pretty', False) if pretty and self._stable != 'pretty': self._stable = False # depends on [control=['if'], data=[]] for arg in args: self._stable = False if isinstance(arg, dict): self.inject(arg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg']] if kwargs: self._stable = False self.inject(kwargs) # depends on [control=['if'], data=[]] # If the tag or his contents are not changed and we already have rendered it # with the same attrs we skip all the work if self._stable and self._render: return self._render # depends on [control=['if'], data=[]] pretty_pre = pretty_inner = '' if pretty: pretty_pre = '\n' + '\t' * self._depth if pretty else '' pretty_inner = '\n' + '\t' * self._depth if len(self.childs) > 1 else '' # depends on [control=['if'], data=[]] inner = self.render_childs(pretty) if not self._void else '' # We declare the tag is stable and have an official render: tag_data = (pretty_pre, self._get__tag(), self.render_attrs(), inner, pretty_inner, self._get__tag())[:6 - [0, 3][self._void]] self._render = self._template % tag_data self._stable = 'pretty' if pretty else True return self._render
def _compute_mean(self, C, rup, rjb): """ Compute mean value according to equation 30, page 1021. """ mean = (C['c1'] + self._compute_magnitude_term(C, rup) + self._compute_distance_term(C, rup, rjb)) return mean
def function[_compute_mean, parameter[self, C, rup, rjb]]: constant[ Compute mean value according to equation 30, page 1021. ] variable[mean] assign[=] binary_operation[binary_operation[call[name[C]][constant[c1]] + call[name[self]._compute_magnitude_term, parameter[name[C], name[rup]]]] + call[name[self]._compute_distance_term, parameter[name[C], name[rup], name[rjb]]]] return[name[mean]]
keyword[def] identifier[_compute_mean] ( identifier[self] , identifier[C] , identifier[rup] , identifier[rjb] ): literal[string] identifier[mean] =( identifier[C] [ literal[string] ]+ identifier[self] . identifier[_compute_magnitude_term] ( identifier[C] , identifier[rup] )+ identifier[self] . identifier[_compute_distance_term] ( identifier[C] , identifier[rup] , identifier[rjb] )) keyword[return] identifier[mean]
def _compute_mean(self, C, rup, rjb): """ Compute mean value according to equation 30, page 1021. """ mean = C['c1'] + self._compute_magnitude_term(C, rup) + self._compute_distance_term(C, rup, rjb) return mean
def import_locations(self, osm_file): """Import OSM data files. ``import_locations()`` returns a list of ``Node`` and ``Way`` objects. It expects data files conforming to the `OpenStreetMap 0.5 DTD`_, which is XML such as:: <?xml version="1.0" encoding="UTF-8"?> <osm version="0.5" generator="upoints/0.9.0"> <node id="0" lat="52.015749" lon="-0.221765" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:11+00:00" /> <node id="1" lat="52.015761" lon="-0.221767" visible="true" timestamp="2008-01-25T12:53:00+00:00"> <tag k="created_by" v="hand" /> <tag k="highway" v="crossing" /> </node> <node id="2" lat="52.015754" lon="-0.221766" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:30+00:00"> <tag k="amenity" v="pub" /> </node> <way id="0" visible="true" timestamp="2008-01-25T13:00:00+0000"> <nd ref="0" /> <nd ref="1" /> <nd ref="2" /> <tag k="ref" v="My Way" /> <tag k="highway" v="primary" /> </way> </osm> The reader uses the :mod:`ElementTree` module, so should be very fast when importing data. The above file processed by ``import_locations()`` will return the following `Osm` object:: Osm([ Node(0, 52.015749, -0.221765, True, "jnrowe", utils.Timestamp(2008, 1, 25, 12, 52, 11), None), Node(1, 52.015761, -0.221767, True, utils.Timestamp(2008, 1, 25, 12, 53), None, {"created_by": "hand", "highway": "crossing"}), Node(2, 52.015754, -0.221766, True, "jnrowe", utils.Timestamp(2008, 1, 25, 12, 52, 30), {"amenity": "pub"}), Way(0, [0, 1, 2], True, None, utils.Timestamp(2008, 1, 25, 13, 00), {"ref": "My Way", "highway": "primary"})], generator="upoints/0.9.0") Args: osm_file (iter): OpenStreetMap data to read Returns: Osm: Nodes and ways from the data .. _OpenStreetMap 0.5 DTD: http://wiki.openstreetmap.org/wiki/OSM_Protocol_Version_0.5/DTD """ self._osm_file = osm_file data = utils.prepare_xml_read(osm_file, objectify=True) # This would be a lot simpler if OSM exports defined a namespace if not data.tag == 'osm': raise ValueError("Root element %r is not `osm'" % data.tag) self.version = data.get('version') if not self.version: raise ValueError('No specified OSM version') elif not self.version == '0.5': raise ValueError('Unsupported OSM version %r' % data) self.generator = data.get('generator') for elem in data.getchildren(): if elem.tag == 'node': self.append(Node.parse_elem(elem)) elif elem.tag == 'way': self.append(Way.parse_elem(elem))
def function[import_locations, parameter[self, osm_file]]: constant[Import OSM data files. ``import_locations()`` returns a list of ``Node`` and ``Way`` objects. It expects data files conforming to the `OpenStreetMap 0.5 DTD`_, which is XML such as:: <?xml version="1.0" encoding="UTF-8"?> <osm version="0.5" generator="upoints/0.9.0"> <node id="0" lat="52.015749" lon="-0.221765" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:11+00:00" /> <node id="1" lat="52.015761" lon="-0.221767" visible="true" timestamp="2008-01-25T12:53:00+00:00"> <tag k="created_by" v="hand" /> <tag k="highway" v="crossing" /> </node> <node id="2" lat="52.015754" lon="-0.221766" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:30+00:00"> <tag k="amenity" v="pub" /> </node> <way id="0" visible="true" timestamp="2008-01-25T13:00:00+0000"> <nd ref="0" /> <nd ref="1" /> <nd ref="2" /> <tag k="ref" v="My Way" /> <tag k="highway" v="primary" /> </way> </osm> The reader uses the :mod:`ElementTree` module, so should be very fast when importing data. The above file processed by ``import_locations()`` will return the following `Osm` object:: Osm([ Node(0, 52.015749, -0.221765, True, "jnrowe", utils.Timestamp(2008, 1, 25, 12, 52, 11), None), Node(1, 52.015761, -0.221767, True, utils.Timestamp(2008, 1, 25, 12, 53), None, {"created_by": "hand", "highway": "crossing"}), Node(2, 52.015754, -0.221766, True, "jnrowe", utils.Timestamp(2008, 1, 25, 12, 52, 30), {"amenity": "pub"}), Way(0, [0, 1, 2], True, None, utils.Timestamp(2008, 1, 25, 13, 00), {"ref": "My Way", "highway": "primary"})], generator="upoints/0.9.0") Args: osm_file (iter): OpenStreetMap data to read Returns: Osm: Nodes and ways from the data .. _OpenStreetMap 0.5 DTD: http://wiki.openstreetmap.org/wiki/OSM_Protocol_Version_0.5/DTD ] name[self]._osm_file assign[=] name[osm_file] variable[data] assign[=] call[name[utils].prepare_xml_read, parameter[name[osm_file]]] if <ast.UnaryOp object at 0x7da18f00e620> begin[:] <ast.Raise object at 0x7da18f00ebf0> name[self].version assign[=] call[name[data].get, parameter[constant[version]]] if <ast.UnaryOp object at 0x7da18f00ed10> begin[:] <ast.Raise object at 0x7da18f00f850> name[self].generator assign[=] call[name[data].get, parameter[constant[generator]]] for taget[name[elem]] in starred[call[name[data].getchildren, parameter[]]] begin[:] if compare[name[elem].tag equal[==] constant[node]] begin[:] call[name[self].append, parameter[call[name[Node].parse_elem, parameter[name[elem]]]]]
keyword[def] identifier[import_locations] ( identifier[self] , identifier[osm_file] ): literal[string] identifier[self] . identifier[_osm_file] = identifier[osm_file] identifier[data] = identifier[utils] . identifier[prepare_xml_read] ( identifier[osm_file] , identifier[objectify] = keyword[True] ) keyword[if] keyword[not] identifier[data] . identifier[tag] == literal[string] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[data] . identifier[tag] ) identifier[self] . identifier[version] = identifier[data] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[version] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] keyword[not] identifier[self] . identifier[version] == literal[string] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[data] ) identifier[self] . identifier[generator] = identifier[data] . identifier[get] ( literal[string] ) keyword[for] identifier[elem] keyword[in] identifier[data] . identifier[getchildren] (): keyword[if] identifier[elem] . identifier[tag] == literal[string] : identifier[self] . identifier[append] ( identifier[Node] . identifier[parse_elem] ( identifier[elem] )) keyword[elif] identifier[elem] . identifier[tag] == literal[string] : identifier[self] . identifier[append] ( identifier[Way] . identifier[parse_elem] ( identifier[elem] ))
def import_locations(self, osm_file): """Import OSM data files. ``import_locations()`` returns a list of ``Node`` and ``Way`` objects. It expects data files conforming to the `OpenStreetMap 0.5 DTD`_, which is XML such as:: <?xml version="1.0" encoding="UTF-8"?> <osm version="0.5" generator="upoints/0.9.0"> <node id="0" lat="52.015749" lon="-0.221765" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:11+00:00" /> <node id="1" lat="52.015761" lon="-0.221767" visible="true" timestamp="2008-01-25T12:53:00+00:00"> <tag k="created_by" v="hand" /> <tag k="highway" v="crossing" /> </node> <node id="2" lat="52.015754" lon="-0.221766" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:30+00:00"> <tag k="amenity" v="pub" /> </node> <way id="0" visible="true" timestamp="2008-01-25T13:00:00+0000"> <nd ref="0" /> <nd ref="1" /> <nd ref="2" /> <tag k="ref" v="My Way" /> <tag k="highway" v="primary" /> </way> </osm> The reader uses the :mod:`ElementTree` module, so should be very fast when importing data. The above file processed by ``import_locations()`` will return the following `Osm` object:: Osm([ Node(0, 52.015749, -0.221765, True, "jnrowe", utils.Timestamp(2008, 1, 25, 12, 52, 11), None), Node(1, 52.015761, -0.221767, True, utils.Timestamp(2008, 1, 25, 12, 53), None, {"created_by": "hand", "highway": "crossing"}), Node(2, 52.015754, -0.221766, True, "jnrowe", utils.Timestamp(2008, 1, 25, 12, 52, 30), {"amenity": "pub"}), Way(0, [0, 1, 2], True, None, utils.Timestamp(2008, 1, 25, 13, 00), {"ref": "My Way", "highway": "primary"})], generator="upoints/0.9.0") Args: osm_file (iter): OpenStreetMap data to read Returns: Osm: Nodes and ways from the data .. _OpenStreetMap 0.5 DTD: http://wiki.openstreetmap.org/wiki/OSM_Protocol_Version_0.5/DTD """ self._osm_file = osm_file data = utils.prepare_xml_read(osm_file, objectify=True) # This would be a lot simpler if OSM exports defined a namespace if not data.tag == 'osm': raise ValueError("Root element %r is not `osm'" % data.tag) # depends on [control=['if'], data=[]] self.version = data.get('version') if not self.version: raise ValueError('No specified OSM version') # depends on [control=['if'], data=[]] elif not self.version == '0.5': raise ValueError('Unsupported OSM version %r' % data) # depends on [control=['if'], data=[]] self.generator = data.get('generator') for elem in data.getchildren(): if elem.tag == 'node': self.append(Node.parse_elem(elem)) # depends on [control=['if'], data=[]] elif elem.tag == 'way': self.append(Way.parse_elem(elem)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['elem']]
def get_HDX_code_from_location(location, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str] """Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: HDX code or None """ if locations is None: locations = Locations.validlocations(configuration) locationupper = location.upper() for locdict in locations: locationcode = locdict['name'].upper() if locationupper == locationcode: return locationcode for locdict in locations: if locationupper == locdict['title'].upper(): return locdict['name'].upper() return None
def function[get_HDX_code_from_location, parameter[location, locations, configuration]]: constant[Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: HDX code or None ] if compare[name[locations] is constant[None]] begin[:] variable[locations] assign[=] call[name[Locations].validlocations, parameter[name[configuration]]] variable[locationupper] assign[=] call[name[location].upper, parameter[]] for taget[name[locdict]] in starred[name[locations]] begin[:] variable[locationcode] assign[=] call[call[name[locdict]][constant[name]].upper, parameter[]] if compare[name[locationupper] equal[==] name[locationcode]] begin[:] return[name[locationcode]] for taget[name[locdict]] in starred[name[locations]] begin[:] if compare[name[locationupper] equal[==] call[call[name[locdict]][constant[title]].upper, parameter[]]] begin[:] return[call[call[name[locdict]][constant[name]].upper, parameter[]]] return[constant[None]]
keyword[def] identifier[get_HDX_code_from_location] ( identifier[location] , identifier[locations] = keyword[None] , identifier[configuration] = keyword[None] ): literal[string] keyword[if] identifier[locations] keyword[is] keyword[None] : identifier[locations] = identifier[Locations] . identifier[validlocations] ( identifier[configuration] ) identifier[locationupper] = identifier[location] . identifier[upper] () keyword[for] identifier[locdict] keyword[in] identifier[locations] : identifier[locationcode] = identifier[locdict] [ literal[string] ]. identifier[upper] () keyword[if] identifier[locationupper] == identifier[locationcode] : keyword[return] identifier[locationcode] keyword[for] identifier[locdict] keyword[in] identifier[locations] : keyword[if] identifier[locationupper] == identifier[locdict] [ literal[string] ]. identifier[upper] (): keyword[return] identifier[locdict] [ literal[string] ]. identifier[upper] () keyword[return] keyword[None]
def get_HDX_code_from_location(location, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str] 'Get HDX code for location\n\n Args:\n location (str): Location for which to get HDX code\n locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\n Returns:\n Optional[str]: HDX code or None\n ' if locations is None: locations = Locations.validlocations(configuration) # depends on [control=['if'], data=['locations']] locationupper = location.upper() for locdict in locations: locationcode = locdict['name'].upper() if locationupper == locationcode: return locationcode # depends on [control=['if'], data=['locationcode']] # depends on [control=['for'], data=['locdict']] for locdict in locations: if locationupper == locdict['title'].upper(): return locdict['name'].upper() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['locdict']] return None
def download_image(url: str = '', save_path: str = '', unverified_ctx: bool = False) -> Union[None, str]: """Download image and save in current directory on local machine. :param url: URL to image. :param save_path: Saving path. :param unverified_ctx: Create unverified context. :return: Path to downloaded image. :rtype: str or None """ if unverified_ctx: ssl._create_default_https_context = ssl._create_unverified_context if url: image_name = url.rsplit('/')[-1] splitted_name = image_name.rsplit('.') if len(splitted_name) < 2: image_name = '{}.jpg'.format(uuid4()) else: image_name = '{}.{}'.format(uuid4(), splitted_name[-1]) full_image_path = path.join(save_path, image_name) request.urlretrieve(url, full_image_path) return full_image_path return None
def function[download_image, parameter[url, save_path, unverified_ctx]]: constant[Download image and save in current directory on local machine. :param url: URL to image. :param save_path: Saving path. :param unverified_ctx: Create unverified context. :return: Path to downloaded image. :rtype: str or None ] if name[unverified_ctx] begin[:] name[ssl]._create_default_https_context assign[=] name[ssl]._create_unverified_context if name[url] begin[:] variable[image_name] assign[=] call[call[name[url].rsplit, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da20e957b20>] variable[splitted_name] assign[=] call[name[image_name].rsplit, parameter[constant[.]]] if compare[call[name[len], parameter[name[splitted_name]]] less[<] constant[2]] begin[:] variable[image_name] assign[=] call[constant[{}.jpg].format, parameter[call[name[uuid4], parameter[]]]] variable[full_image_path] assign[=] call[name[path].join, parameter[name[save_path], name[image_name]]] call[name[request].urlretrieve, parameter[name[url], name[full_image_path]]] return[name[full_image_path]] return[constant[None]]
keyword[def] identifier[download_image] ( identifier[url] : identifier[str] = literal[string] , identifier[save_path] : identifier[str] = literal[string] , identifier[unverified_ctx] : identifier[bool] = keyword[False] )-> identifier[Union] [ keyword[None] , identifier[str] ]: literal[string] keyword[if] identifier[unverified_ctx] : identifier[ssl] . identifier[_create_default_https_context] = identifier[ssl] . identifier[_create_unverified_context] keyword[if] identifier[url] : identifier[image_name] = identifier[url] . identifier[rsplit] ( literal[string] )[- literal[int] ] identifier[splitted_name] = identifier[image_name] . identifier[rsplit] ( literal[string] ) keyword[if] identifier[len] ( identifier[splitted_name] )< literal[int] : identifier[image_name] = literal[string] . identifier[format] ( identifier[uuid4] ()) keyword[else] : identifier[image_name] = literal[string] . identifier[format] ( identifier[uuid4] (), identifier[splitted_name] [- literal[int] ]) identifier[full_image_path] = identifier[path] . identifier[join] ( identifier[save_path] , identifier[image_name] ) identifier[request] . identifier[urlretrieve] ( identifier[url] , identifier[full_image_path] ) keyword[return] identifier[full_image_path] keyword[return] keyword[None]
def download_image(url: str='', save_path: str='', unverified_ctx: bool=False) -> Union[None, str]: """Download image and save in current directory on local machine. :param url: URL to image. :param save_path: Saving path. :param unverified_ctx: Create unverified context. :return: Path to downloaded image. :rtype: str or None """ if unverified_ctx: ssl._create_default_https_context = ssl._create_unverified_context # depends on [control=['if'], data=[]] if url: image_name = url.rsplit('/')[-1] splitted_name = image_name.rsplit('.') if len(splitted_name) < 2: image_name = '{}.jpg'.format(uuid4()) # depends on [control=['if'], data=[]] else: image_name = '{}.{}'.format(uuid4(), splitted_name[-1]) full_image_path = path.join(save_path, image_name) request.urlretrieve(url, full_image_path) return full_image_path # depends on [control=['if'], data=[]] return None
def signature_split(signatures: bytes, pos: int) -> Tuple[int, int, int]: """ :param signatures: signatures in form of {bytes32 r}{bytes32 s}{uint8 v} :param pos: position of the signature :return: Tuple with v, r, s """ signature_pos = 65 * pos v = signatures[64 + signature_pos] r = int.from_bytes(signatures[signature_pos:32 + signature_pos], 'big') s = int.from_bytes(signatures[32 + signature_pos:64 + signature_pos], 'big') return v, r, s
def function[signature_split, parameter[signatures, pos]]: constant[ :param signatures: signatures in form of {bytes32 r}{bytes32 s}{uint8 v} :param pos: position of the signature :return: Tuple with v, r, s ] variable[signature_pos] assign[=] binary_operation[constant[65] * name[pos]] variable[v] assign[=] call[name[signatures]][binary_operation[constant[64] + name[signature_pos]]] variable[r] assign[=] call[name[int].from_bytes, parameter[call[name[signatures]][<ast.Slice object at 0x7da18dc070a0>], constant[big]]] variable[s] assign[=] call[name[int].from_bytes, parameter[call[name[signatures]][<ast.Slice object at 0x7da18dc06740>], constant[big]]] return[tuple[[<ast.Name object at 0x7da18dc074f0>, <ast.Name object at 0x7da18dc06560>, <ast.Name object at 0x7da18dc07af0>]]]
keyword[def] identifier[signature_split] ( identifier[signatures] : identifier[bytes] , identifier[pos] : identifier[int] )-> identifier[Tuple] [ identifier[int] , identifier[int] , identifier[int] ]: literal[string] identifier[signature_pos] = literal[int] * identifier[pos] identifier[v] = identifier[signatures] [ literal[int] + identifier[signature_pos] ] identifier[r] = identifier[int] . identifier[from_bytes] ( identifier[signatures] [ identifier[signature_pos] : literal[int] + identifier[signature_pos] ], literal[string] ) identifier[s] = identifier[int] . identifier[from_bytes] ( identifier[signatures] [ literal[int] + identifier[signature_pos] : literal[int] + identifier[signature_pos] ], literal[string] ) keyword[return] identifier[v] , identifier[r] , identifier[s]
def signature_split(signatures: bytes, pos: int) -> Tuple[int, int, int]: """ :param signatures: signatures in form of {bytes32 r}{bytes32 s}{uint8 v} :param pos: position of the signature :return: Tuple with v, r, s """ signature_pos = 65 * pos v = signatures[64 + signature_pos] r = int.from_bytes(signatures[signature_pos:32 + signature_pos], 'big') s = int.from_bytes(signatures[32 + signature_pos:64 + signature_pos], 'big') return (v, r, s)
def roc_auc(logits, labels, weights_fn=None): """Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights """ del weights_fn with tf.variable_scope("roc_auc", values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) _, auc = tf.metrics.auc(labels, predictions, curve="ROC") return auc, tf.constant(1.0)
def function[roc_auc, parameter[logits, labels, weights_fn]]: constant[Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights ] <ast.Delete object at 0x7da20c6e70d0> with call[name[tf].variable_scope, parameter[constant[roc_auc]]] begin[:] variable[predictions] assign[=] call[name[tf].argmax, parameter[name[logits]]] <ast.Tuple object at 0x7da20c6e45e0> assign[=] call[name[tf].metrics.auc, parameter[name[labels], name[predictions]]] return[tuple[[<ast.Name object at 0x7da20c6e7640>, <ast.Call object at 0x7da20c6e55d0>]]]
keyword[def] identifier[roc_auc] ( identifier[logits] , identifier[labels] , identifier[weights_fn] = keyword[None] ): literal[string] keyword[del] identifier[weights_fn] keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] , identifier[values] =[ identifier[logits] , identifier[labels] ]): identifier[predictions] = identifier[tf] . identifier[argmax] ( identifier[logits] , identifier[axis] =- literal[int] ) identifier[_] , identifier[auc] = identifier[tf] . identifier[metrics] . identifier[auc] ( identifier[labels] , identifier[predictions] , identifier[curve] = literal[string] ) keyword[return] identifier[auc] , identifier[tf] . identifier[constant] ( literal[int] )
def roc_auc(logits, labels, weights_fn=None): """Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights """ del weights_fn with tf.variable_scope('roc_auc', values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) (_, auc) = tf.metrics.auc(labels, predictions, curve='ROC') return (auc, tf.constant(1.0)) # depends on [control=['with'], data=[]]