code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _read_n(self, l, ret): """ Reads 'N' mask from 1 bit file to convert bases to 'N'. In the 2 bit file, 'N' or any other invalid base is written as 'A'. Therefore the 'N' mask file is required to correctly identify where invalid bases are. Parameters ---------- l : tuple location ret : list List of bases which will be modified in place. """ file = os.path.join(self.dir, l.chr + ".n.1bit") if not os.path.exists(file): return if file != self.__n_file: print('Caching {}...'.format(file)) f = open(file, 'rb') self.__n_data = f.read() f.close() self.__n_file = file d = DNA2Bit._read1bit(self.__n_data, l, offset=True) for i in range(0, len(ret)): if d[i] == 1: ret[i] = DNA_N_UC
def function[_read_n, parameter[self, l, ret]]: constant[ Reads 'N' mask from 1 bit file to convert bases to 'N'. In the 2 bit file, 'N' or any other invalid base is written as 'A'. Therefore the 'N' mask file is required to correctly identify where invalid bases are. Parameters ---------- l : tuple location ret : list List of bases which will be modified in place. ] variable[file] assign[=] call[name[os].path.join, parameter[name[self].dir, binary_operation[name[l].chr + constant[.n.1bit]]]] if <ast.UnaryOp object at 0x7da18f00da20> begin[:] return[None] if compare[name[file] not_equal[!=] name[self].__n_file] begin[:] call[name[print], parameter[call[constant[Caching {}...].format, parameter[name[file]]]]] variable[f] assign[=] call[name[open], parameter[name[file], constant[rb]]] name[self].__n_data assign[=] call[name[f].read, parameter[]] call[name[f].close, parameter[]] name[self].__n_file assign[=] name[file] variable[d] assign[=] call[name[DNA2Bit]._read1bit, parameter[name[self].__n_data, name[l]]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[ret]]]]]] begin[:] if compare[call[name[d]][name[i]] equal[==] constant[1]] begin[:] call[name[ret]][name[i]] assign[=] name[DNA_N_UC]
keyword[def] identifier[_read_n] ( identifier[self] , identifier[l] , identifier[ret] ): literal[string] identifier[file] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[dir] , identifier[l] . identifier[chr] + literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[file] ): keyword[return] keyword[if] identifier[file] != identifier[self] . identifier[__n_file] : identifier[print] ( literal[string] . identifier[format] ( identifier[file] )) identifier[f] = identifier[open] ( identifier[file] , literal[string] ) identifier[self] . identifier[__n_data] = identifier[f] . identifier[read] () identifier[f] . identifier[close] () identifier[self] . identifier[__n_file] = identifier[file] identifier[d] = identifier[DNA2Bit] . identifier[_read1bit] ( identifier[self] . identifier[__n_data] , identifier[l] , identifier[offset] = keyword[True] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[ret] )): keyword[if] identifier[d] [ identifier[i] ]== literal[int] : identifier[ret] [ identifier[i] ]= identifier[DNA_N_UC]
def _read_n(self, l, ret): """ Reads 'N' mask from 1 bit file to convert bases to 'N'. In the 2 bit file, 'N' or any other invalid base is written as 'A'. Therefore the 'N' mask file is required to correctly identify where invalid bases are. Parameters ---------- l : tuple location ret : list List of bases which will be modified in place. """ file = os.path.join(self.dir, l.chr + '.n.1bit') if not os.path.exists(file): return # depends on [control=['if'], data=[]] if file != self.__n_file: print('Caching {}...'.format(file)) f = open(file, 'rb') self.__n_data = f.read() f.close() self.__n_file = file # depends on [control=['if'], data=['file']] d = DNA2Bit._read1bit(self.__n_data, l, offset=True) for i in range(0, len(ret)): if d[i] == 1: ret[i] = DNA_N_UC # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
def get_comment_lesson_info(self): # 获取课程序列 """ 获取教学评估内所有需要课程 :return: 返回所以有需要进行教学评估的课程 :rtype: list """ echo = self._echo response = self._post('http://bkjws.sdu.edu.cn/b/pg/xs/list', data=self._aodata(echo, ['kch', 'kcm', 'jsm', 'function', 'function']), ) if self._check_response(response, echo=echo): return response['object']['aaData'] else: self._unexpected(response)
def function[get_comment_lesson_info, parameter[self]]: constant[ 获取教学评估内所有需要课程 :return: 返回所以有需要进行教学评估的课程 :rtype: list ] variable[echo] assign[=] name[self]._echo variable[response] assign[=] call[name[self]._post, parameter[constant[http://bkjws.sdu.edu.cn/b/pg/xs/list]]] if call[name[self]._check_response, parameter[name[response]]] begin[:] return[call[call[name[response]][constant[object]]][constant[aaData]]]
keyword[def] identifier[get_comment_lesson_info] ( identifier[self] ): literal[string] identifier[echo] = identifier[self] . identifier[_echo] identifier[response] = identifier[self] . identifier[_post] ( literal[string] , identifier[data] = identifier[self] . identifier[_aodata] ( identifier[echo] ,[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]),) keyword[if] identifier[self] . identifier[_check_response] ( identifier[response] , identifier[echo] = identifier[echo] ): keyword[return] identifier[response] [ literal[string] ][ literal[string] ] keyword[else] : identifier[self] . identifier[_unexpected] ( identifier[response] )
def get_comment_lesson_info(self): # 获取课程序列 '\n 获取教学评估内所有需要课程\n\n :return: 返回所以有需要进行教学评估的课程\n :rtype: list\n ' echo = self._echo response = self._post('http://bkjws.sdu.edu.cn/b/pg/xs/list', data=self._aodata(echo, ['kch', 'kcm', 'jsm', 'function', 'function'])) if self._check_response(response, echo=echo): return response['object']['aaData'] # depends on [control=['if'], data=[]] else: self._unexpected(response)
def address_reencode(address, blockchain='bitcoin', **blockchain_opts): """ Reencode an address """ if blockchain == 'bitcoin': return btc_address_reencode(address, **blockchain_opts) else: raise ValueError("Unknown blockchain '{}'".format(blockchain))
def function[address_reencode, parameter[address, blockchain]]: constant[ Reencode an address ] if compare[name[blockchain] equal[==] constant[bitcoin]] begin[:] return[call[name[btc_address_reencode], parameter[name[address]]]]
keyword[def] identifier[address_reencode] ( identifier[address] , identifier[blockchain] = literal[string] ,** identifier[blockchain_opts] ): literal[string] keyword[if] identifier[blockchain] == literal[string] : keyword[return] identifier[btc_address_reencode] ( identifier[address] ,** identifier[blockchain_opts] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[blockchain] ))
def address_reencode(address, blockchain='bitcoin', **blockchain_opts): """ Reencode an address """ if blockchain == 'bitcoin': return btc_address_reencode(address, **blockchain_opts) # depends on [control=['if'], data=[]] else: raise ValueError("Unknown blockchain '{}'".format(blockchain))
def _pop_letters(char_list): """Pop consecutive letters from the front of a list and return them Pops any and all consecutive letters from the start of the provided character list and returns them as a list of characters. Operates on (and possibly alters) the passed list :param list char_list: a list of characters :return: a list of characters :rtype: list """ logger.debug('_pop_letters(%s)', char_list) letters = [] while len(char_list) != 0 and char_list[0].isalpha(): letters.append(char_list.pop(0)) logger.debug('got letters: %s', letters) logger.debug('updated char list: %s', char_list) return letters
def function[_pop_letters, parameter[char_list]]: constant[Pop consecutive letters from the front of a list and return them Pops any and all consecutive letters from the start of the provided character list and returns them as a list of characters. Operates on (and possibly alters) the passed list :param list char_list: a list of characters :return: a list of characters :rtype: list ] call[name[logger].debug, parameter[constant[_pop_letters(%s)], name[char_list]]] variable[letters] assign[=] list[[]] while <ast.BoolOp object at 0x7da1b23805e0> begin[:] call[name[letters].append, parameter[call[name[char_list].pop, parameter[constant[0]]]]] call[name[logger].debug, parameter[constant[got letters: %s], name[letters]]] call[name[logger].debug, parameter[constant[updated char list: %s], name[char_list]]] return[name[letters]]
keyword[def] identifier[_pop_letters] ( identifier[char_list] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] , identifier[char_list] ) identifier[letters] =[] keyword[while] identifier[len] ( identifier[char_list] )!= literal[int] keyword[and] identifier[char_list] [ literal[int] ]. identifier[isalpha] (): identifier[letters] . identifier[append] ( identifier[char_list] . identifier[pop] ( literal[int] )) identifier[logger] . identifier[debug] ( literal[string] , identifier[letters] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[char_list] ) keyword[return] identifier[letters]
def _pop_letters(char_list): """Pop consecutive letters from the front of a list and return them Pops any and all consecutive letters from the start of the provided character list and returns them as a list of characters. Operates on (and possibly alters) the passed list :param list char_list: a list of characters :return: a list of characters :rtype: list """ logger.debug('_pop_letters(%s)', char_list) letters = [] while len(char_list) != 0 and char_list[0].isalpha(): letters.append(char_list.pop(0)) # depends on [control=['while'], data=[]] logger.debug('got letters: %s', letters) logger.debug('updated char list: %s', char_list) return letters
async def parse_prod_staff_results(soup): """ Parse a page of producer or staff results :param soup: The BS4 class object :return: A list of dictionaries containing a name and nationality. """ soup = soup.find_all('li') producers = [] for item in soup: producers.append({'nationality': item.abbr.get('title'), 'name': item.a.string}) return producers
<ast.AsyncFunctionDef object at 0x7da18bc70b80>
keyword[async] keyword[def] identifier[parse_prod_staff_results] ( identifier[soup] ): literal[string] identifier[soup] = identifier[soup] . identifier[find_all] ( literal[string] ) identifier[producers] =[] keyword[for] identifier[item] keyword[in] identifier[soup] : identifier[producers] . identifier[append] ({ literal[string] : identifier[item] . identifier[abbr] . identifier[get] ( literal[string] ), literal[string] : identifier[item] . identifier[a] . identifier[string] }) keyword[return] identifier[producers]
async def parse_prod_staff_results(soup): """ Parse a page of producer or staff results :param soup: The BS4 class object :return: A list of dictionaries containing a name and nationality. """ soup = soup.find_all('li') producers = [] for item in soup: producers.append({'nationality': item.abbr.get('title'), 'name': item.a.string}) # depends on [control=['for'], data=['item']] return producers
def from_dict(cls, d): """ Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object """ return cls(distance_cutoff=d["distance_cutoff"], angle_cutoff=d["angle_cutoff"], additional_condition=d["additional_condition"], continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"], symmetry_measure_type=d["symmetry_measure_type"])
def function[from_dict, parameter[cls, d]]: constant[ Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object ] return[call[name[cls], parameter[]]]
keyword[def] identifier[from_dict] ( identifier[cls] , identifier[d] ): literal[string] keyword[return] identifier[cls] ( identifier[distance_cutoff] = identifier[d] [ literal[string] ], identifier[angle_cutoff] = identifier[d] [ literal[string] ], identifier[additional_condition] = identifier[d] [ literal[string] ], identifier[continuous_symmetry_measure_cutoff] = identifier[d] [ literal[string] ], identifier[symmetry_measure_type] = identifier[d] [ literal[string] ])
def from_dict(cls, d): """ Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object """ return cls(distance_cutoff=d['distance_cutoff'], angle_cutoff=d['angle_cutoff'], additional_condition=d['additional_condition'], continuous_symmetry_measure_cutoff=d['continuous_symmetry_measure_cutoff'], symmetry_measure_type=d['symmetry_measure_type'])
def unique(self, sort=False): """ Return unique set of values in image """ unique_vals = np.unique(self.numpy()) if sort: unique_vals = np.sort(unique_vals) return unique_vals
def function[unique, parameter[self, sort]]: constant[ Return unique set of values in image ] variable[unique_vals] assign[=] call[name[np].unique, parameter[call[name[self].numpy, parameter[]]]] if name[sort] begin[:] variable[unique_vals] assign[=] call[name[np].sort, parameter[name[unique_vals]]] return[name[unique_vals]]
keyword[def] identifier[unique] ( identifier[self] , identifier[sort] = keyword[False] ): literal[string] identifier[unique_vals] = identifier[np] . identifier[unique] ( identifier[self] . identifier[numpy] ()) keyword[if] identifier[sort] : identifier[unique_vals] = identifier[np] . identifier[sort] ( identifier[unique_vals] ) keyword[return] identifier[unique_vals]
def unique(self, sort=False): """ Return unique set of values in image """ unique_vals = np.unique(self.numpy()) if sort: unique_vals = np.sort(unique_vals) # depends on [control=['if'], data=[]] return unique_vals
def annotation_path(cls, project, incident, annotation): """Return a fully-qualified annotation string.""" return google.api_core.path_template.expand( "projects/{project}/incidents/{incident}/annotations/{annotation}", project=project, incident=incident, annotation=annotation, )
def function[annotation_path, parameter[cls, project, incident, annotation]]: constant[Return a fully-qualified annotation string.] return[call[name[google].api_core.path_template.expand, parameter[constant[projects/{project}/incidents/{incident}/annotations/{annotation}]]]]
keyword[def] identifier[annotation_path] ( identifier[cls] , identifier[project] , identifier[incident] , identifier[annotation] ): literal[string] keyword[return] identifier[google] . identifier[api_core] . identifier[path_template] . identifier[expand] ( literal[string] , identifier[project] = identifier[project] , identifier[incident] = identifier[incident] , identifier[annotation] = identifier[annotation] , )
def annotation_path(cls, project, incident, annotation): """Return a fully-qualified annotation string.""" return google.api_core.path_template.expand('projects/{project}/incidents/{incident}/annotations/{annotation}', project=project, incident=incident, annotation=annotation)
def reset_offsets_if_needed(self, partitions): """Lookup and set offsets for any partitions which are awaiting an explicit reset. Arguments: partitions (set of TopicPartitions): the partitions to reset """ for tp in partitions: # TODO: If there are several offsets to reset, we could submit offset requests in parallel if self._subscriptions.is_assigned(tp) and self._subscriptions.is_offset_reset_needed(tp): self._reset_offset(tp)
def function[reset_offsets_if_needed, parameter[self, partitions]]: constant[Lookup and set offsets for any partitions which are awaiting an explicit reset. Arguments: partitions (set of TopicPartitions): the partitions to reset ] for taget[name[tp]] in starred[name[partitions]] begin[:] if <ast.BoolOp object at 0x7da1b1c4add0> begin[:] call[name[self]._reset_offset, parameter[name[tp]]]
keyword[def] identifier[reset_offsets_if_needed] ( identifier[self] , identifier[partitions] ): literal[string] keyword[for] identifier[tp] keyword[in] identifier[partitions] : keyword[if] identifier[self] . identifier[_subscriptions] . identifier[is_assigned] ( identifier[tp] ) keyword[and] identifier[self] . identifier[_subscriptions] . identifier[is_offset_reset_needed] ( identifier[tp] ): identifier[self] . identifier[_reset_offset] ( identifier[tp] )
def reset_offsets_if_needed(self, partitions): """Lookup and set offsets for any partitions which are awaiting an explicit reset. Arguments: partitions (set of TopicPartitions): the partitions to reset """ for tp in partitions: # TODO: If there are several offsets to reset, we could submit offset requests in parallel if self._subscriptions.is_assigned(tp) and self._subscriptions.is_offset_reset_needed(tp): self._reset_offset(tp) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tp']]
def connect(self, target, acceptor, wrapper=None): """ Initiate a connection from the tendril manager's endpoint. Once the connection is completed, a Tendril object will be created and passed to the given acceptor. :param target: The target of the connection attempt. :param acceptor: A callable which will initialize the state of the new Tendril object. :param wrapper: A callable taking, as its first argument, a socket.socket object. The callable must return a valid proxy for the socket.socket object, which will subsequently be used to communicate on the connection. For passing extra arguments to the acceptor or the wrapper, see the ``TendrilPartial`` class; for chaining together multiple wrappers, see the ``WrapperChain`` class. """ if not self.running: raise ValueError("TendrilManager not running") # Check the target address fam = utils.addr_info(target) # Verify that we're in the right family if self.addr_family != fam: raise ValueError("address family mismatch")
def function[connect, parameter[self, target, acceptor, wrapper]]: constant[ Initiate a connection from the tendril manager's endpoint. Once the connection is completed, a Tendril object will be created and passed to the given acceptor. :param target: The target of the connection attempt. :param acceptor: A callable which will initialize the state of the new Tendril object. :param wrapper: A callable taking, as its first argument, a socket.socket object. The callable must return a valid proxy for the socket.socket object, which will subsequently be used to communicate on the connection. For passing extra arguments to the acceptor or the wrapper, see the ``TendrilPartial`` class; for chaining together multiple wrappers, see the ``WrapperChain`` class. ] if <ast.UnaryOp object at 0x7da204963430> begin[:] <ast.Raise object at 0x7da204963e80> variable[fam] assign[=] call[name[utils].addr_info, parameter[name[target]]] if compare[name[self].addr_family not_equal[!=] name[fam]] begin[:] <ast.Raise object at 0x7da204962a70>
keyword[def] identifier[connect] ( identifier[self] , identifier[target] , identifier[acceptor] , identifier[wrapper] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[running] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[fam] = identifier[utils] . identifier[addr_info] ( identifier[target] ) keyword[if] identifier[self] . identifier[addr_family] != identifier[fam] : keyword[raise] identifier[ValueError] ( literal[string] )
def connect(self, target, acceptor, wrapper=None): """ Initiate a connection from the tendril manager's endpoint. Once the connection is completed, a Tendril object will be created and passed to the given acceptor. :param target: The target of the connection attempt. :param acceptor: A callable which will initialize the state of the new Tendril object. :param wrapper: A callable taking, as its first argument, a socket.socket object. The callable must return a valid proxy for the socket.socket object, which will subsequently be used to communicate on the connection. For passing extra arguments to the acceptor or the wrapper, see the ``TendrilPartial`` class; for chaining together multiple wrappers, see the ``WrapperChain`` class. """ if not self.running: raise ValueError('TendrilManager not running') # depends on [control=['if'], data=[]] # Check the target address fam = utils.addr_info(target) # Verify that we're in the right family if self.addr_family != fam: raise ValueError('address family mismatch') # depends on [control=['if'], data=[]]
def get(self, terrain, num=1): """ Add a certain number of resources to the trade from getter->giver :param terrain: resource type, models.Terrain :param num: number to add, int :return: None """ for _ in range(num): logging.debug('terrain={}'.format(terrain)) self._get.append(terrain)
def function[get, parameter[self, terrain, num]]: constant[ Add a certain number of resources to the trade from getter->giver :param terrain: resource type, models.Terrain :param num: number to add, int :return: None ] for taget[name[_]] in starred[call[name[range], parameter[name[num]]]] begin[:] call[name[logging].debug, parameter[call[constant[terrain={}].format, parameter[name[terrain]]]]] call[name[self]._get.append, parameter[name[terrain]]]
keyword[def] identifier[get] ( identifier[self] , identifier[terrain] , identifier[num] = literal[int] ): literal[string] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[num] ): identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[terrain] )) identifier[self] . identifier[_get] . identifier[append] ( identifier[terrain] )
def get(self, terrain, num=1): """ Add a certain number of resources to the trade from getter->giver :param terrain: resource type, models.Terrain :param num: number to add, int :return: None """ for _ in range(num): logging.debug('terrain={}'.format(terrain)) self._get.append(terrain) # depends on [control=['for'], data=[]]
def final_bearing(self, format='numeric'): """Calculate final bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments """ bearings = [] for segment in self: if len(segment) < 2: bearings.append([]) else: bearings.append(segment.final_bearing(format)) return bearings
def function[final_bearing, parameter[self, format]]: constant[Calculate final bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments ] variable[bearings] assign[=] list[[]] for taget[name[segment]] in starred[name[self]] begin[:] if compare[call[name[len], parameter[name[segment]]] less[<] constant[2]] begin[:] call[name[bearings].append, parameter[list[[]]]] return[name[bearings]]
keyword[def] identifier[final_bearing] ( identifier[self] , identifier[format] = literal[string] ): literal[string] identifier[bearings] =[] keyword[for] identifier[segment] keyword[in] identifier[self] : keyword[if] identifier[len] ( identifier[segment] )< literal[int] : identifier[bearings] . identifier[append] ([]) keyword[else] : identifier[bearings] . identifier[append] ( identifier[segment] . identifier[final_bearing] ( identifier[format] )) keyword[return] identifier[bearings]
def final_bearing(self, format='numeric'): """Calculate final bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments """ bearings = [] for segment in self: if len(segment) < 2: bearings.append([]) # depends on [control=['if'], data=[]] else: bearings.append(segment.final_bearing(format)) # depends on [control=['for'], data=['segment']] return bearings
def ignore_missing_email_protection_eku_cb(ok, ctx): """ For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify(). The latter requires that ExtendedKeyUsage extension, if present, contains 'emailProtection' OID. (Is it because S/MIME is/was the primary use case for PKCS7?) We do not want to fail the verification in this case. At present, M2Crypto lacks possibility of removing or modifying an existing extension. Let's assign a custom verification callback. """ # The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE. err = ctx.get_error() if err != m2.X509_V_ERR_INVALID_PURPOSE: return ok # PKCS7_verify() has this requriement only for the signing certificate. # Do not modify the behavior for certificates upper in the chain. if ctx.get_error_depth() > 0: return ok # There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage. # Do not modify the default behavior in this case. cert = ctx.get_current_cert() try: key_usage = cert.get_ext('keyUsage').get_value() if 'digitalSignature' not in key_usage \ and 'nonRepudiation' not in key_usage: return ok except LookupError: pass # Here, keyUsage is either absent, or contains the needed bit(s). # So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'. # Ignore this error. return 1
def function[ignore_missing_email_protection_eku_cb, parameter[ok, ctx]]: constant[ For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify(). The latter requires that ExtendedKeyUsage extension, if present, contains 'emailProtection' OID. (Is it because S/MIME is/was the primary use case for PKCS7?) We do not want to fail the verification in this case. At present, M2Crypto lacks possibility of removing or modifying an existing extension. Let's assign a custom verification callback. ] variable[err] assign[=] call[name[ctx].get_error, parameter[]] if compare[name[err] not_equal[!=] name[m2].X509_V_ERR_INVALID_PURPOSE] begin[:] return[name[ok]] if compare[call[name[ctx].get_error_depth, parameter[]] greater[>] constant[0]] begin[:] return[name[ok]] variable[cert] assign[=] call[name[ctx].get_current_cert, parameter[]] <ast.Try object at 0x7da1b0b05ed0> return[constant[1]]
keyword[def] identifier[ignore_missing_email_protection_eku_cb] ( identifier[ok] , identifier[ctx] ): literal[string] identifier[err] = identifier[ctx] . identifier[get_error] () keyword[if] identifier[err] != identifier[m2] . identifier[X509_V_ERR_INVALID_PURPOSE] : keyword[return] identifier[ok] keyword[if] identifier[ctx] . identifier[get_error_depth] ()> literal[int] : keyword[return] identifier[ok] identifier[cert] = identifier[ctx] . identifier[get_current_cert] () keyword[try] : identifier[key_usage] = identifier[cert] . identifier[get_ext] ( literal[string] ). identifier[get_value] () keyword[if] literal[string] keyword[not] keyword[in] identifier[key_usage] keyword[and] literal[string] keyword[not] keyword[in] identifier[key_usage] : keyword[return] identifier[ok] keyword[except] identifier[LookupError] : keyword[pass] keyword[return] literal[int]
def ignore_missing_email_protection_eku_cb(ok, ctx): """ For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify(). The latter requires that ExtendedKeyUsage extension, if present, contains 'emailProtection' OID. (Is it because S/MIME is/was the primary use case for PKCS7?) We do not want to fail the verification in this case. At present, M2Crypto lacks possibility of removing or modifying an existing extension. Let's assign a custom verification callback. """ # The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE. err = ctx.get_error() if err != m2.X509_V_ERR_INVALID_PURPOSE: return ok # depends on [control=['if'], data=[]] # PKCS7_verify() has this requriement only for the signing certificate. # Do not modify the behavior for certificates upper in the chain. if ctx.get_error_depth() > 0: return ok # depends on [control=['if'], data=[]] # There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage. # Do not modify the default behavior in this case. cert = ctx.get_current_cert() try: key_usage = cert.get_ext('keyUsage').get_value() if 'digitalSignature' not in key_usage and 'nonRepudiation' not in key_usage: return ok # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except LookupError: pass # depends on [control=['except'], data=[]] # Here, keyUsage is either absent, or contains the needed bit(s). # So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'. # Ignore this error. return 1
def add( self, symbol, base_value, dimensions, tex_repr=None, offset=None, prefixable=False, ): """ Add a symbol to this registry. Parameters ---------- symbol : str The name of the unit base_value : float The scaling from the units value to the equivalent SI unit with the same dimensions dimensions : expr The dimensions of the unit tex_repr : str, optional The LaTeX representation of the unit. If not provided a LaTeX representation is automatically generated from the name of the unit. offset : float, optional If set, the zero-point offset to apply to the unit to convert to SI. This is mostly used for units like Farhenheit and Celcius that are not defined on an absolute scale. prefixable : bool If True, then SI-prefix versions of the unit will be created along with the unit itself. """ from unyt.unit_object import _validate_dimensions self._unit_system_id = None # Validate if not isinstance(base_value, float): raise UnitParseError( "base_value (%s) must be a float, got a %s." % (base_value, type(base_value)) ) if offset is not None: if not isinstance(offset, float): raise UnitParseError( "offset value (%s) must be a float, got a %s." % (offset, type(offset)) ) else: offset = 0.0 _validate_dimensions(dimensions) if tex_repr is None: # make educated guess that will look nice in most cases tex_repr = r"\rm{" + symbol.replace("_", r"\ ") + "}" # Add to lut self.lut[symbol] = (base_value, dimensions, offset, tex_repr, prefixable)
def function[add, parameter[self, symbol, base_value, dimensions, tex_repr, offset, prefixable]]: constant[ Add a symbol to this registry. Parameters ---------- symbol : str The name of the unit base_value : float The scaling from the units value to the equivalent SI unit with the same dimensions dimensions : expr The dimensions of the unit tex_repr : str, optional The LaTeX representation of the unit. If not provided a LaTeX representation is automatically generated from the name of the unit. offset : float, optional If set, the zero-point offset to apply to the unit to convert to SI. This is mostly used for units like Farhenheit and Celcius that are not defined on an absolute scale. prefixable : bool If True, then SI-prefix versions of the unit will be created along with the unit itself. ] from relative_module[unyt.unit_object] import module[_validate_dimensions] name[self]._unit_system_id assign[=] constant[None] if <ast.UnaryOp object at 0x7da1b111af50> begin[:] <ast.Raise object at 0x7da1b111ae30> if compare[name[offset] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da1b111bd00> begin[:] <ast.Raise object at 0x7da1b1119f00> call[name[_validate_dimensions], parameter[name[dimensions]]] if compare[name[tex_repr] is constant[None]] begin[:] variable[tex_repr] assign[=] binary_operation[binary_operation[constant[\rm{] + call[name[symbol].replace, parameter[constant[_], constant[\ ]]]] + constant[}]] call[name[self].lut][name[symbol]] assign[=] tuple[[<ast.Name object at 0x7da1b11d02e0>, <ast.Name object at 0x7da1b11d27a0>, <ast.Name object at 0x7da1b11d13f0>, <ast.Name object at 0x7da1b11d15d0>, <ast.Name object at 0x7da1b11d03a0>]]
keyword[def] identifier[add] ( identifier[self] , identifier[symbol] , identifier[base_value] , identifier[dimensions] , identifier[tex_repr] = keyword[None] , identifier[offset] = keyword[None] , identifier[prefixable] = keyword[False] , ): literal[string] keyword[from] identifier[unyt] . identifier[unit_object] keyword[import] identifier[_validate_dimensions] identifier[self] . identifier[_unit_system_id] = keyword[None] keyword[if] keyword[not] identifier[isinstance] ( identifier[base_value] , identifier[float] ): keyword[raise] identifier[UnitParseError] ( literal[string] %( identifier[base_value] , identifier[type] ( identifier[base_value] )) ) keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[isinstance] ( identifier[offset] , identifier[float] ): keyword[raise] identifier[UnitParseError] ( literal[string] %( identifier[offset] , identifier[type] ( identifier[offset] )) ) keyword[else] : identifier[offset] = literal[int] identifier[_validate_dimensions] ( identifier[dimensions] ) keyword[if] identifier[tex_repr] keyword[is] keyword[None] : identifier[tex_repr] = literal[string] + identifier[symbol] . identifier[replace] ( literal[string] , literal[string] )+ literal[string] identifier[self] . identifier[lut] [ identifier[symbol] ]=( identifier[base_value] , identifier[dimensions] , identifier[offset] , identifier[tex_repr] , identifier[prefixable] )
def add(self, symbol, base_value, dimensions, tex_repr=None, offset=None, prefixable=False): """ Add a symbol to this registry. Parameters ---------- symbol : str The name of the unit base_value : float The scaling from the units value to the equivalent SI unit with the same dimensions dimensions : expr The dimensions of the unit tex_repr : str, optional The LaTeX representation of the unit. If not provided a LaTeX representation is automatically generated from the name of the unit. offset : float, optional If set, the zero-point offset to apply to the unit to convert to SI. This is mostly used for units like Farhenheit and Celcius that are not defined on an absolute scale. prefixable : bool If True, then SI-prefix versions of the unit will be created along with the unit itself. """ from unyt.unit_object import _validate_dimensions self._unit_system_id = None # Validate if not isinstance(base_value, float): raise UnitParseError('base_value (%s) must be a float, got a %s.' % (base_value, type(base_value))) # depends on [control=['if'], data=[]] if offset is not None: if not isinstance(offset, float): raise UnitParseError('offset value (%s) must be a float, got a %s.' % (offset, type(offset))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['offset']] else: offset = 0.0 _validate_dimensions(dimensions) if tex_repr is None: # make educated guess that will look nice in most cases tex_repr = '\\rm{' + symbol.replace('_', '\\ ') + '}' # depends on [control=['if'], data=['tex_repr']] # Add to lut self.lut[symbol] = (base_value, dimensions, offset, tex_repr, prefixable)
def sendPygletEvent(self,event_type,args,window=None): """ Handles a pyglet event. This method is called by :py:meth:`PengWindow.dispatch_event()` and handles all events. See :py:meth:`registerEventHandler()` for how to listen to these events. This method should be used to send pyglet events. For new code, it is recommended to use :py:meth:`sendEvent()` instead. For "tunneling" pyglet events, use event names of the format ``pyglet:<event>`` and for the data use ``{"args":<args as list>,"window":<window object or none>,"src":<event source>,"event_type":<event type>}`` Note that you should send pyglet events only via this method, the above event will be sent automatically. Do not use this method to send custom events, use :py:meth:`sendEvent` instead. """ args = list(args) self.sendEvent("pyglet:%s"%event_type,{"peng":self,"args":args,"window":window,"src":self,"event_type":event_type}) self.sendEvent("peng3d:pyglet",{"peng":self,"args":args,"window":window,"src":self,"event_type":event_type}) if event_type not in ["on_draw","on_mouse_motion"] and self.cfg["debug.events.dump"]: print("Event %s with args %s"%(event_type,args)) if event_type in self.pygletEventHandlers: for whandler in self.pygletEventHandlers[event_type]: # This allows for proper collection of deleted handler methods by using weak references handler = whandler() if handler is None: del self.pygletEventHandlers[event_type][self.pygletEventHandlers[event_type].index(whandler)] handler(*args)
def function[sendPygletEvent, parameter[self, event_type, args, window]]: constant[ Handles a pyglet event. This method is called by :py:meth:`PengWindow.dispatch_event()` and handles all events. See :py:meth:`registerEventHandler()` for how to listen to these events. This method should be used to send pyglet events. For new code, it is recommended to use :py:meth:`sendEvent()` instead. For "tunneling" pyglet events, use event names of the format ``pyglet:<event>`` and for the data use ``{"args":<args as list>,"window":<window object or none>,"src":<event source>,"event_type":<event type>}`` Note that you should send pyglet events only via this method, the above event will be sent automatically. Do not use this method to send custom events, use :py:meth:`sendEvent` instead. ] variable[args] assign[=] call[name[list], parameter[name[args]]] call[name[self].sendEvent, parameter[binary_operation[constant[pyglet:%s] <ast.Mod object at 0x7da2590d6920> name[event_type]], dictionary[[<ast.Constant object at 0x7da1b01923b0>, <ast.Constant object at 0x7da1b0192350>, <ast.Constant object at 0x7da1b0192260>, <ast.Constant object at 0x7da1b0193340>, <ast.Constant object at 0x7da1b0193310>], [<ast.Name object at 0x7da1b01938b0>, <ast.Name object at 0x7da1b0193880>, <ast.Name object at 0x7da1b0193850>, <ast.Name object at 0x7da1b0193820>, <ast.Name object at 0x7da1b01937f0>]]]] call[name[self].sendEvent, parameter[constant[peng3d:pyglet], dictionary[[<ast.Constant object at 0x7da1b0190eb0>, <ast.Constant object at 0x7da1b0192290>, <ast.Constant object at 0x7da1b0192380>, <ast.Constant object at 0x7da1b0190040>, <ast.Constant object at 0x7da1b0192110>], [<ast.Name object at 0x7da1b0192230>, <ast.Name object at 0x7da1b0192200>, <ast.Name object at 0x7da1b0193e20>, <ast.Name object at 0x7da1b0193df0>, <ast.Name object at 0x7da1b0193dc0>]]]] if <ast.BoolOp object at 0x7da1b01901c0> begin[:] call[name[print], parameter[binary_operation[constant[Event %s with args %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0191f60>, <ast.Name object at 0x7da1b0191ea0>]]]]] if compare[name[event_type] in name[self].pygletEventHandlers] begin[:] for taget[name[whandler]] in starred[call[name[self].pygletEventHandlers][name[event_type]]] begin[:] variable[handler] assign[=] call[name[whandler], parameter[]] if compare[name[handler] is constant[None]] begin[:] <ast.Delete object at 0x7da1b0191e40> call[name[handler], parameter[<ast.Starred object at 0x7da1b0191b10>]]
keyword[def] identifier[sendPygletEvent] ( identifier[self] , identifier[event_type] , identifier[args] , identifier[window] = keyword[None] ): literal[string] identifier[args] = identifier[list] ( identifier[args] ) identifier[self] . identifier[sendEvent] ( literal[string] % identifier[event_type] ,{ literal[string] : identifier[self] , literal[string] : identifier[args] , literal[string] : identifier[window] , literal[string] : identifier[self] , literal[string] : identifier[event_type] }) identifier[self] . identifier[sendEvent] ( literal[string] ,{ literal[string] : identifier[self] , literal[string] : identifier[args] , literal[string] : identifier[window] , literal[string] : identifier[self] , literal[string] : identifier[event_type] }) keyword[if] identifier[event_type] keyword[not] keyword[in] [ literal[string] , literal[string] ] keyword[and] identifier[self] . identifier[cfg] [ literal[string] ]: identifier[print] ( literal[string] %( identifier[event_type] , identifier[args] )) keyword[if] identifier[event_type] keyword[in] identifier[self] . identifier[pygletEventHandlers] : keyword[for] identifier[whandler] keyword[in] identifier[self] . identifier[pygletEventHandlers] [ identifier[event_type] ]: identifier[handler] = identifier[whandler] () keyword[if] identifier[handler] keyword[is] keyword[None] : keyword[del] identifier[self] . identifier[pygletEventHandlers] [ identifier[event_type] ][ identifier[self] . identifier[pygletEventHandlers] [ identifier[event_type] ]. identifier[index] ( identifier[whandler] )] identifier[handler] (* identifier[args] )
def sendPygletEvent(self, event_type, args, window=None): """ Handles a pyglet event. This method is called by :py:meth:`PengWindow.dispatch_event()` and handles all events. See :py:meth:`registerEventHandler()` for how to listen to these events. This method should be used to send pyglet events. For new code, it is recommended to use :py:meth:`sendEvent()` instead. For "tunneling" pyglet events, use event names of the format ``pyglet:<event>`` and for the data use ``{"args":<args as list>,"window":<window object or none>,"src":<event source>,"event_type":<event type>}`` Note that you should send pyglet events only via this method, the above event will be sent automatically. Do not use this method to send custom events, use :py:meth:`sendEvent` instead. """ args = list(args) self.sendEvent('pyglet:%s' % event_type, {'peng': self, 'args': args, 'window': window, 'src': self, 'event_type': event_type}) self.sendEvent('peng3d:pyglet', {'peng': self, 'args': args, 'window': window, 'src': self, 'event_type': event_type}) if event_type not in ['on_draw', 'on_mouse_motion'] and self.cfg['debug.events.dump']: print('Event %s with args %s' % (event_type, args)) # depends on [control=['if'], data=[]] if event_type in self.pygletEventHandlers: for whandler in self.pygletEventHandlers[event_type]: # This allows for proper collection of deleted handler methods by using weak references handler = whandler() if handler is None: del self.pygletEventHandlers[event_type][self.pygletEventHandlers[event_type].index(whandler)] # depends on [control=['if'], data=[]] handler(*args) # depends on [control=['for'], data=['whandler']] # depends on [control=['if'], data=['event_type']]
def remove(self, doc_type, doc_ids, **kwargs): """ Implements call to remove the documents from the index """ try: # ignore is flagged as an unexpected-keyword-arg; ES python client documents that it can be used # pylint: disable=unexpected-keyword-arg actions = [] for doc_id in doc_ids: log.debug("Removing document of type %s and index %s", doc_type, doc_id) action = { '_op_type': 'delete', "_index": self.index_name, "_type": doc_type, "_id": doc_id } actions.append(action) bulk(self._es, actions, **kwargs) except BulkIndexError as ex: valid_errors = [error for error in ex.errors if error['delete']['status'] != 404] if valid_errors: log.exception("An error occurred while removing documents from the index.") raise
def function[remove, parameter[self, doc_type, doc_ids]]: constant[ Implements call to remove the documents from the index ] <ast.Try object at 0x7da1b00df940>
keyword[def] identifier[remove] ( identifier[self] , identifier[doc_type] , identifier[doc_ids] ,** identifier[kwargs] ): literal[string] keyword[try] : identifier[actions] =[] keyword[for] identifier[doc_id] keyword[in] identifier[doc_ids] : identifier[log] . identifier[debug] ( literal[string] , identifier[doc_type] , identifier[doc_id] ) identifier[action] ={ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[index_name] , literal[string] : identifier[doc_type] , literal[string] : identifier[doc_id] } identifier[actions] . identifier[append] ( identifier[action] ) identifier[bulk] ( identifier[self] . identifier[_es] , identifier[actions] ,** identifier[kwargs] ) keyword[except] identifier[BulkIndexError] keyword[as] identifier[ex] : identifier[valid_errors] =[ identifier[error] keyword[for] identifier[error] keyword[in] identifier[ex] . identifier[errors] keyword[if] identifier[error] [ literal[string] ][ literal[string] ]!= literal[int] ] keyword[if] identifier[valid_errors] : identifier[log] . identifier[exception] ( literal[string] ) keyword[raise]
def remove(self, doc_type, doc_ids, **kwargs): """ Implements call to remove the documents from the index """ try: # ignore is flagged as an unexpected-keyword-arg; ES python client documents that it can be used # pylint: disable=unexpected-keyword-arg actions = [] for doc_id in doc_ids: log.debug('Removing document of type %s and index %s', doc_type, doc_id) action = {'_op_type': 'delete', '_index': self.index_name, '_type': doc_type, '_id': doc_id} actions.append(action) # depends on [control=['for'], data=['doc_id']] bulk(self._es, actions, **kwargs) # depends on [control=['try'], data=[]] except BulkIndexError as ex: valid_errors = [error for error in ex.errors if error['delete']['status'] != 404] if valid_errors: log.exception('An error occurred while removing documents from the index.') raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']]
def getTransitionUsers(obj, action_id, last_user=False): """ This function returns a list with the users who have done the transition. :action_id: a sring as the transition id. :last_user: a boolean to return only the last user triggering the transition or all of them. :returns: a list of user ids. """ workflow = getToolByName(obj, 'portal_workflow') users = [] try: # https://jira.bikalabs.com/browse/LIMS-2242: # Sometimes the workflow history is inexplicably missing! review_history = list(workflow.getInfoFor(obj, 'review_history')) except WorkflowException: logger.error( "workflow history is inexplicably missing." " https://jira.bikalabs.com/browse/LIMS-2242") return users # invert the list, so we always see the most recent matching event review_history.reverse() for event in review_history: if event.get('action', '') == action_id: value = event.get('actor', '') users.append(value) if last_user: return users return users
def function[getTransitionUsers, parameter[obj, action_id, last_user]]: constant[ This function returns a list with the users who have done the transition. :action_id: a sring as the transition id. :last_user: a boolean to return only the last user triggering the transition or all of them. :returns: a list of user ids. ] variable[workflow] assign[=] call[name[getToolByName], parameter[name[obj], constant[portal_workflow]]] variable[users] assign[=] list[[]] <ast.Try object at 0x7da1b23475b0> call[name[review_history].reverse, parameter[]] for taget[name[event]] in starred[name[review_history]] begin[:] if compare[call[name[event].get, parameter[constant[action], constant[]]] equal[==] name[action_id]] begin[:] variable[value] assign[=] call[name[event].get, parameter[constant[actor], constant[]]] call[name[users].append, parameter[name[value]]] if name[last_user] begin[:] return[name[users]] return[name[users]]
keyword[def] identifier[getTransitionUsers] ( identifier[obj] , identifier[action_id] , identifier[last_user] = keyword[False] ): literal[string] identifier[workflow] = identifier[getToolByName] ( identifier[obj] , literal[string] ) identifier[users] =[] keyword[try] : identifier[review_history] = identifier[list] ( identifier[workflow] . identifier[getInfoFor] ( identifier[obj] , literal[string] )) keyword[except] identifier[WorkflowException] : identifier[logger] . identifier[error] ( literal[string] literal[string] ) keyword[return] identifier[users] identifier[review_history] . identifier[reverse] () keyword[for] identifier[event] keyword[in] identifier[review_history] : keyword[if] identifier[event] . identifier[get] ( literal[string] , literal[string] )== identifier[action_id] : identifier[value] = identifier[event] . identifier[get] ( literal[string] , literal[string] ) identifier[users] . identifier[append] ( identifier[value] ) keyword[if] identifier[last_user] : keyword[return] identifier[users] keyword[return] identifier[users]
def getTransitionUsers(obj, action_id, last_user=False): """ This function returns a list with the users who have done the transition. :action_id: a sring as the transition id. :last_user: a boolean to return only the last user triggering the transition or all of them. :returns: a list of user ids. """ workflow = getToolByName(obj, 'portal_workflow') users = [] try: # https://jira.bikalabs.com/browse/LIMS-2242: # Sometimes the workflow history is inexplicably missing! review_history = list(workflow.getInfoFor(obj, 'review_history')) # depends on [control=['try'], data=[]] except WorkflowException: logger.error('workflow history is inexplicably missing. https://jira.bikalabs.com/browse/LIMS-2242') return users # depends on [control=['except'], data=[]] # invert the list, so we always see the most recent matching event review_history.reverse() for event in review_history: if event.get('action', '') == action_id: value = event.get('actor', '') users.append(value) if last_user: return users # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['event']] return users
def get_extended_map(self): """ Creates a copy of this map which includes all non-abstract configurations in their extended form. :return: Copy of this map. :rtype: ContainerMap """ map_copy = self.__class__(self.name) map_copy.update_from_obj(self, copy=True, update_containers=False) for c_name, c_config in self: map_copy._containers[c_name] = self.get_extended(c_config) map_copy._extended = True return map_copy
def function[get_extended_map, parameter[self]]: constant[ Creates a copy of this map which includes all non-abstract configurations in their extended form. :return: Copy of this map. :rtype: ContainerMap ] variable[map_copy] assign[=] call[name[self].__class__, parameter[name[self].name]] call[name[map_copy].update_from_obj, parameter[name[self]]] for taget[tuple[[<ast.Name object at 0x7da20e9b0df0>, <ast.Name object at 0x7da20e9b1b10>]]] in starred[name[self]] begin[:] call[name[map_copy]._containers][name[c_name]] assign[=] call[name[self].get_extended, parameter[name[c_config]]] name[map_copy]._extended assign[=] constant[True] return[name[map_copy]]
keyword[def] identifier[get_extended_map] ( identifier[self] ): literal[string] identifier[map_copy] = identifier[self] . identifier[__class__] ( identifier[self] . identifier[name] ) identifier[map_copy] . identifier[update_from_obj] ( identifier[self] , identifier[copy] = keyword[True] , identifier[update_containers] = keyword[False] ) keyword[for] identifier[c_name] , identifier[c_config] keyword[in] identifier[self] : identifier[map_copy] . identifier[_containers] [ identifier[c_name] ]= identifier[self] . identifier[get_extended] ( identifier[c_config] ) identifier[map_copy] . identifier[_extended] = keyword[True] keyword[return] identifier[map_copy]
def get_extended_map(self): """ Creates a copy of this map which includes all non-abstract configurations in their extended form. :return: Copy of this map. :rtype: ContainerMap """ map_copy = self.__class__(self.name) map_copy.update_from_obj(self, copy=True, update_containers=False) for (c_name, c_config) in self: map_copy._containers[c_name] = self.get_extended(c_config) # depends on [control=['for'], data=[]] map_copy._extended = True return map_copy
def modify_log_flags(self, settings): """Modifies the debug or release logger flags. in settings of type str The flags settings string. See iprt/log.h for details. To target the release logger, prefix the string with "release:". """ if not isinstance(settings, basestring): raise TypeError("settings can only be an instance of type basestring") self._call("modifyLogFlags", in_p=[settings])
def function[modify_log_flags, parameter[self, settings]]: constant[Modifies the debug or release logger flags. in settings of type str The flags settings string. See iprt/log.h for details. To target the release logger, prefix the string with "release:". ] if <ast.UnaryOp object at 0x7da204345150> begin[:] <ast.Raise object at 0x7da2043474f0> call[name[self]._call, parameter[constant[modifyLogFlags]]]
keyword[def] identifier[modify_log_flags] ( identifier[self] , identifier[settings] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[settings] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[settings] ])
def modify_log_flags(self, settings): """Modifies the debug or release logger flags. in settings of type str The flags settings string. See iprt/log.h for details. To target the release logger, prefix the string with "release:". """ if not isinstance(settings, basestring): raise TypeError('settings can only be an instance of type basestring') # depends on [control=['if'], data=[]] self._call('modifyLogFlags', in_p=[settings])
def movie_body_count_r_classify(data_set='movie_body_count'): """Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.""" data = movie_body_count()['Y'] import pandas as pd import numpy as np X = data[['Year', 'Body_Count']] Y = data['MPAA_Rating']=='R' # set label to be positive for R rated films. # Create series of movie genres with the relevant index s = data['Genre'].str.split('|').apply(pd.Series, 1).stack() s.index = s.index.droplevel(-1) # to line up with df's index # Extract from the series the unique list of genres. genres = s.unique() # For each genre extract the indices where it is present and add a column to X for genre in genres: index = s[s==genre].index.tolist() values = pd.Series(np.zeros(X.shape[0]), index=X.index) values[index] = 1 X[genre] = values return data_details_return({'X': X, 'Y': Y, 'info' : "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R. In this variant we aim to classify whether the film is rated R or not depending on the genre, the years and the body count.", }, data_set)
def function[movie_body_count_r_classify, parameter[data_set]]: constant[Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.] variable[data] assign[=] call[call[name[movie_body_count], parameter[]]][constant[Y]] import module[pandas] as alias[pd] import module[numpy] as alias[np] variable[X] assign[=] call[name[data]][list[[<ast.Constant object at 0x7da1b26ad810>, <ast.Constant object at 0x7da1b26af040>]]] variable[Y] assign[=] compare[call[name[data]][constant[MPAA_Rating]] equal[==] constant[R]] variable[s] assign[=] call[call[call[call[name[data]][constant[Genre]].str.split, parameter[constant[|]]].apply, parameter[name[pd].Series, constant[1]]].stack, parameter[]] name[s].index assign[=] call[name[s].index.droplevel, parameter[<ast.UnaryOp object at 0x7da20c76d180>]] variable[genres] assign[=] call[name[s].unique, parameter[]] for taget[name[genre]] in starred[name[genres]] begin[:] variable[index] assign[=] call[call[name[s]][compare[name[s] equal[==] name[genre]]].index.tolist, parameter[]] variable[values] assign[=] call[name[pd].Series, parameter[call[name[np].zeros, parameter[call[name[X].shape][constant[0]]]]]] call[name[values]][name[index]] assign[=] constant[1] call[name[X]][name[genre]] assign[=] name[values] return[call[name[data_details_return], parameter[dictionary[[<ast.Constant object at 0x7da1b0fe67d0>, <ast.Constant object at 0x7da1b0fe6ad0>, <ast.Constant object at 0x7da1b0fe7f10>], [<ast.Name object at 0x7da1b0fe79d0>, <ast.Name object at 0x7da1b0fe78e0>, <ast.Constant object at 0x7da1b0fe4fa0>]], name[data_set]]]]
keyword[def] identifier[movie_body_count_r_classify] ( identifier[data_set] = literal[string] ): literal[string] identifier[data] = identifier[movie_body_count] ()[ literal[string] ] keyword[import] identifier[pandas] keyword[as] identifier[pd] keyword[import] identifier[numpy] keyword[as] identifier[np] identifier[X] = identifier[data] [[ literal[string] , literal[string] ]] identifier[Y] = identifier[data] [ literal[string] ]== literal[string] identifier[s] = identifier[data] [ literal[string] ]. identifier[str] . identifier[split] ( literal[string] ). identifier[apply] ( identifier[pd] . identifier[Series] , literal[int] ). identifier[stack] () identifier[s] . identifier[index] = identifier[s] . identifier[index] . identifier[droplevel] (- literal[int] ) identifier[genres] = identifier[s] . identifier[unique] () keyword[for] identifier[genre] keyword[in] identifier[genres] : identifier[index] = identifier[s] [ identifier[s] == identifier[genre] ]. identifier[index] . identifier[tolist] () identifier[values] = identifier[pd] . identifier[Series] ( identifier[np] . identifier[zeros] ( identifier[X] . identifier[shape] [ literal[int] ]), identifier[index] = identifier[X] . identifier[index] ) identifier[values] [ identifier[index] ]= literal[int] identifier[X] [ identifier[genre] ]= identifier[values] keyword[return] identifier[data_details_return] ({ literal[string] : identifier[X] , literal[string] : identifier[Y] , literal[string] : literal[string] , }, identifier[data_set] )
def movie_body_count_r_classify(data_set='movie_body_count'): """Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.""" data = movie_body_count()['Y'] import pandas as pd import numpy as np X = data[['Year', 'Body_Count']] Y = data['MPAA_Rating'] == 'R' # set label to be positive for R rated films. # Create series of movie genres with the relevant index s = data['Genre'].str.split('|').apply(pd.Series, 1).stack() s.index = s.index.droplevel(-1) # to line up with df's index # Extract from the series the unique list of genres. genres = s.unique() # For each genre extract the indices where it is present and add a column to X for genre in genres: index = s[s == genre].index.tolist() values = pd.Series(np.zeros(X.shape[0]), index=X.index) values[index] = 1 X[genre] = values # depends on [control=['for'], data=['genre']] return data_details_return({'X': X, 'Y': Y, 'info': 'Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R. In this variant we aim to classify whether the film is rated R or not depending on the genre, the years and the body count.'}, data_set)
def style_checkboxes(widget): """ Iterates over widget children to change checkboxes stylesheet. The default rendering of checkboxes does not allow to tell a focused one from an unfocused one. """ ww = widget.findChildren(QCheckBox) for w in ww: w.setStyleSheet("QCheckBox:focus {border: 1px solid #000000;}")
def function[style_checkboxes, parameter[widget]]: constant[ Iterates over widget children to change checkboxes stylesheet. The default rendering of checkboxes does not allow to tell a focused one from an unfocused one. ] variable[ww] assign[=] call[name[widget].findChildren, parameter[name[QCheckBox]]] for taget[name[w]] in starred[name[ww]] begin[:] call[name[w].setStyleSheet, parameter[constant[QCheckBox:focus {border: 1px solid #000000;}]]]
keyword[def] identifier[style_checkboxes] ( identifier[widget] ): literal[string] identifier[ww] = identifier[widget] . identifier[findChildren] ( identifier[QCheckBox] ) keyword[for] identifier[w] keyword[in] identifier[ww] : identifier[w] . identifier[setStyleSheet] ( literal[string] )
def style_checkboxes(widget): """ Iterates over widget children to change checkboxes stylesheet. The default rendering of checkboxes does not allow to tell a focused one from an unfocused one. """ ww = widget.findChildren(QCheckBox) for w in ww: w.setStyleSheet('QCheckBox:focus {border: 1px solid #000000;}') # depends on [control=['for'], data=['w']]
def bck_chunk(self): """ Returns the chunk backward from this chunk in the list of free chunks. """ raise NotImplementedError("%s not implemented for %s" % (self.bck_chunk.__func__.__name__, self.__class__.__name__))
def function[bck_chunk, parameter[self]]: constant[ Returns the chunk backward from this chunk in the list of free chunks. ] <ast.Raise object at 0x7da18ede6e30>
keyword[def] identifier[bck_chunk] ( identifier[self] ): literal[string] keyword[raise] identifier[NotImplementedError] ( literal[string] %( identifier[self] . identifier[bck_chunk] . identifier[__func__] . identifier[__name__] , identifier[self] . identifier[__class__] . identifier[__name__] ))
def bck_chunk(self): """ Returns the chunk backward from this chunk in the list of free chunks. """ raise NotImplementedError('%s not implemented for %s' % (self.bck_chunk.__func__.__name__, self.__class__.__name__))
def calculate(self, **state): """ Calculate the density at the specified temperature, pressure, and composition. :param T: [K] temperature :param P: [Pa] pressure :param x: [mole fraction] dictionary of compounds and mole fractions :returns: [kg/m3] density The **state parameter contains the keyword argument(s) specified above\ that are used to describe the state of the material. """ super().calculate(**state) mm_average = 0.0 for compound, molefraction in state["x"].items(): mm_average += molefraction * mm(compound) mm_average /= 1000.0 return mm_average * state["P"] / R / state["T"]
def function[calculate, parameter[self]]: constant[ Calculate the density at the specified temperature, pressure, and composition. :param T: [K] temperature :param P: [Pa] pressure :param x: [mole fraction] dictionary of compounds and mole fractions :returns: [kg/m3] density The **state parameter contains the keyword argument(s) specified above that are used to describe the state of the material. ] call[call[name[super], parameter[]].calculate, parameter[]] variable[mm_average] assign[=] constant[0.0] for taget[tuple[[<ast.Name object at 0x7da1b00f4970>, <ast.Name object at 0x7da1b00f4910>]]] in starred[call[call[name[state]][constant[x]].items, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b00f4af0> <ast.AugAssign object at 0x7da1b00f7bb0> return[binary_operation[binary_operation[binary_operation[name[mm_average] * call[name[state]][constant[P]]] / name[R]] / call[name[state]][constant[T]]]]
keyword[def] identifier[calculate] ( identifier[self] ,** identifier[state] ): literal[string] identifier[super] (). identifier[calculate] (** identifier[state] ) identifier[mm_average] = literal[int] keyword[for] identifier[compound] , identifier[molefraction] keyword[in] identifier[state] [ literal[string] ]. identifier[items] (): identifier[mm_average] += identifier[molefraction] * identifier[mm] ( identifier[compound] ) identifier[mm_average] /= literal[int] keyword[return] identifier[mm_average] * identifier[state] [ literal[string] ]/ identifier[R] / identifier[state] [ literal[string] ]
def calculate(self, **state): """ Calculate the density at the specified temperature, pressure, and composition. :param T: [K] temperature :param P: [Pa] pressure :param x: [mole fraction] dictionary of compounds and mole fractions :returns: [kg/m3] density The **state parameter contains the keyword argument(s) specified above that are used to describe the state of the material. """ super().calculate(**state) mm_average = 0.0 for (compound, molefraction) in state['x'].items(): mm_average += molefraction * mm(compound) # depends on [control=['for'], data=[]] mm_average /= 1000.0 return mm_average * state['P'] / R / state['T']
def parsebam(self): """ Parse the dictionaries of the sorted bam files extracted using pysam """ # Threading is actually the worst - need multiprocessing to make this work at all logging.info('Parsing BAM files') # The sample objects are too big to get pickled. To hack our way around this, try to dump the sample object to # json, and have the processing function turn the object into a dictionary. json_files = list() with tempfile.TemporaryDirectory() as tmpdir: best_assemblies = list() sample_names = list() for sample in self.runmetadata: json_name = os.path.join(tmpdir, '{sn}.json'.format(sn=sample.name)) best_assemblies.append(sample.general.bestassemblyfile) sample_names.append(sample.name) with open(json_name, 'w') as f: json.dump(sample[self.analysistype].dump(), f, sort_keys=True, indent=4) json_files.append(json_name) p = multiprocessing.Pool(processes=self.cpus) analysis_type_list = [self.analysistype] * len(self.runmetadata) iupac_list = [self.iupac] * len(self.runmetadata) cutoff_list = [self.cutoff] * len(self.runmetadata) depth_list = [self.averagedepth] * len(self.runmetadata) allow_soft_clip_list = [self.allow_soft_clips] * len(self.runmetadata) sample_results = p.starmap(Sippr.parse_one_sample, zip(json_files, sample_names, best_assemblies, analysis_type_list, iupac_list, cutoff_list, depth_list, allow_soft_clip_list)) p.close() p.join() # Since we had to json-ize the sample objects, we now need to update the metadata for everything. for sample in self.runmetadata: sample[self.analysistype].faidict = dict() sample[self.analysistype].results = dict() sample[self.analysistype].avgdepth = dict() sample[self.analysistype].resultssnp = dict() sample[self.analysistype].snplocations = dict() sample[self.analysistype].resultsgap = dict() sample[self.analysistype].gaplocations = dict() sample[self.analysistype].sequences = dict() sample[self.analysistype].maxcoverage = dict() sample[self.analysistype].mincoverage = dict() sample[self.analysistype].standarddev = dict() # Figure out which of the sample results to use. for sample_result in sample_results: if sample_result['name'] == sample.name: sample[self.analysistype].faidict = sample_result['faidict'] sample[self.analysistype].results = sample_result['results'] sample[self.analysistype].avgdepth = sample_result['avgdepth'] sample[self.analysistype].resultssnp = sample_result['resultssnp'] sample[self.analysistype].snplocations = sample_result['snplocations'] sample[self.analysistype].resultsgap = sample_result['resultsgap'] sample[self.analysistype].gaplocations = sample_result['gaplocations'] sample[self.analysistype].sequences = sample_result['sequences'] sample[self.analysistype].maxcoverage = sample_result['maxcoverage'] sample[self.analysistype].mincoverage = sample_result['mincoverage'] sample[self.analysistype].standarddev = sample_result['standarddev'] logging.info('Done parsing BAM files')
def function[parsebam, parameter[self]]: constant[ Parse the dictionaries of the sorted bam files extracted using pysam ] call[name[logging].info, parameter[constant[Parsing BAM files]]] variable[json_files] assign[=] call[name[list], parameter[]] with call[name[tempfile].TemporaryDirectory, parameter[]] begin[:] variable[best_assemblies] assign[=] call[name[list], parameter[]] variable[sample_names] assign[=] call[name[list], parameter[]] for taget[name[sample]] in starred[name[self].runmetadata] begin[:] variable[json_name] assign[=] call[name[os].path.join, parameter[name[tmpdir], call[constant[{sn}.json].format, parameter[]]]] call[name[best_assemblies].append, parameter[name[sample].general.bestassemblyfile]] call[name[sample_names].append, parameter[name[sample].name]] with call[name[open], parameter[name[json_name], constant[w]]] begin[:] call[name[json].dump, parameter[call[call[name[sample]][name[self].analysistype].dump, parameter[]], name[f]]] call[name[json_files].append, parameter[name[json_name]]] variable[p] assign[=] call[name[multiprocessing].Pool, parameter[]] variable[analysis_type_list] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b1ff72b0>]] * call[name[len], parameter[name[self].runmetadata]]] variable[iupac_list] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b1ff6b90>]] * call[name[len], parameter[name[self].runmetadata]]] variable[cutoff_list] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b1ff7340>]] * call[name[len], parameter[name[self].runmetadata]]] variable[depth_list] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b1ff70a0>]] * call[name[len], parameter[name[self].runmetadata]]] variable[allow_soft_clip_list] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b1ff6f50>]] * call[name[len], parameter[name[self].runmetadata]]] variable[sample_results] assign[=] call[name[p].starmap, parameter[name[Sippr].parse_one_sample, call[name[zip], parameter[name[json_files], name[sample_names], name[best_assemblies], name[analysis_type_list], name[iupac_list], name[cutoff_list], name[depth_list], name[allow_soft_clip_list]]]]] call[name[p].close, parameter[]] call[name[p].join, parameter[]] for taget[name[sample]] in starred[name[self].runmetadata] begin[:] call[name[sample]][name[self].analysistype].faidict assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].results assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].avgdepth assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].resultssnp assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].snplocations assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].resultsgap assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].gaplocations assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].sequences assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].maxcoverage assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].mincoverage assign[=] call[name[dict], parameter[]] call[name[sample]][name[self].analysistype].standarddev assign[=] call[name[dict], parameter[]] for taget[name[sample_result]] in starred[name[sample_results]] begin[:] if compare[call[name[sample_result]][constant[name]] equal[==] name[sample].name] begin[:] call[name[sample]][name[self].analysistype].faidict assign[=] call[name[sample_result]][constant[faidict]] call[name[sample]][name[self].analysistype].results assign[=] call[name[sample_result]][constant[results]] call[name[sample]][name[self].analysistype].avgdepth assign[=] call[name[sample_result]][constant[avgdepth]] call[name[sample]][name[self].analysistype].resultssnp assign[=] call[name[sample_result]][constant[resultssnp]] call[name[sample]][name[self].analysistype].snplocations assign[=] call[name[sample_result]][constant[snplocations]] call[name[sample]][name[self].analysistype].resultsgap assign[=] call[name[sample_result]][constant[resultsgap]] call[name[sample]][name[self].analysistype].gaplocations assign[=] call[name[sample_result]][constant[gaplocations]] call[name[sample]][name[self].analysistype].sequences assign[=] call[name[sample_result]][constant[sequences]] call[name[sample]][name[self].analysistype].maxcoverage assign[=] call[name[sample_result]][constant[maxcoverage]] call[name[sample]][name[self].analysistype].mincoverage assign[=] call[name[sample_result]][constant[mincoverage]] call[name[sample]][name[self].analysistype].standarddev assign[=] call[name[sample_result]][constant[standarddev]] call[name[logging].info, parameter[constant[Done parsing BAM files]]]
keyword[def] identifier[parsebam] ( identifier[self] ): literal[string] identifier[logging] . identifier[info] ( literal[string] ) identifier[json_files] = identifier[list] () keyword[with] identifier[tempfile] . identifier[TemporaryDirectory] () keyword[as] identifier[tmpdir] : identifier[best_assemblies] = identifier[list] () identifier[sample_names] = identifier[list] () keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[runmetadata] : identifier[json_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] . identifier[format] ( identifier[sn] = identifier[sample] . identifier[name] )) identifier[best_assemblies] . identifier[append] ( identifier[sample] . identifier[general] . identifier[bestassemblyfile] ) identifier[sample_names] . identifier[append] ( identifier[sample] . identifier[name] ) keyword[with] identifier[open] ( identifier[json_name] , literal[string] ) keyword[as] identifier[f] : identifier[json] . identifier[dump] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[dump] (), identifier[f] , identifier[sort_keys] = keyword[True] , identifier[indent] = literal[int] ) identifier[json_files] . identifier[append] ( identifier[json_name] ) identifier[p] = identifier[multiprocessing] . identifier[Pool] ( identifier[processes] = identifier[self] . identifier[cpus] ) identifier[analysis_type_list] =[ identifier[self] . identifier[analysistype] ]* identifier[len] ( identifier[self] . identifier[runmetadata] ) identifier[iupac_list] =[ identifier[self] . identifier[iupac] ]* identifier[len] ( identifier[self] . identifier[runmetadata] ) identifier[cutoff_list] =[ identifier[self] . identifier[cutoff] ]* identifier[len] ( identifier[self] . identifier[runmetadata] ) identifier[depth_list] =[ identifier[self] . identifier[averagedepth] ]* identifier[len] ( identifier[self] . identifier[runmetadata] ) identifier[allow_soft_clip_list] =[ identifier[self] . identifier[allow_soft_clips] ]* identifier[len] ( identifier[self] . identifier[runmetadata] ) identifier[sample_results] = identifier[p] . identifier[starmap] ( identifier[Sippr] . identifier[parse_one_sample] , identifier[zip] ( identifier[json_files] , identifier[sample_names] , identifier[best_assemblies] , identifier[analysis_type_list] , identifier[iupac_list] , identifier[cutoff_list] , identifier[depth_list] , identifier[allow_soft_clip_list] )) identifier[p] . identifier[close] () identifier[p] . identifier[join] () keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[runmetadata] : identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[faidict] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[results] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[avgdepth] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[resultssnp] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[snplocations] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[resultsgap] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[gaplocations] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[sequences] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[maxcoverage] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[mincoverage] = identifier[dict] () identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[standarddev] = identifier[dict] () keyword[for] identifier[sample_result] keyword[in] identifier[sample_results] : keyword[if] identifier[sample_result] [ literal[string] ]== identifier[sample] . identifier[name] : identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[faidict] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[results] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[avgdepth] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[resultssnp] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[snplocations] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[resultsgap] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[gaplocations] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[sequences] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[maxcoverage] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[mincoverage] = identifier[sample_result] [ literal[string] ] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[standarddev] = identifier[sample_result] [ literal[string] ] identifier[logging] . identifier[info] ( literal[string] )
def parsebam(self): """ Parse the dictionaries of the sorted bam files extracted using pysam """ # Threading is actually the worst - need multiprocessing to make this work at all logging.info('Parsing BAM files') # The sample objects are too big to get pickled. To hack our way around this, try to dump the sample object to # json, and have the processing function turn the object into a dictionary. json_files = list() with tempfile.TemporaryDirectory() as tmpdir: best_assemblies = list() sample_names = list() for sample in self.runmetadata: json_name = os.path.join(tmpdir, '{sn}.json'.format(sn=sample.name)) best_assemblies.append(sample.general.bestassemblyfile) sample_names.append(sample.name) with open(json_name, 'w') as f: json.dump(sample[self.analysistype].dump(), f, sort_keys=True, indent=4) # depends on [control=['with'], data=['f']] json_files.append(json_name) # depends on [control=['for'], data=['sample']] p = multiprocessing.Pool(processes=self.cpus) analysis_type_list = [self.analysistype] * len(self.runmetadata) iupac_list = [self.iupac] * len(self.runmetadata) cutoff_list = [self.cutoff] * len(self.runmetadata) depth_list = [self.averagedepth] * len(self.runmetadata) allow_soft_clip_list = [self.allow_soft_clips] * len(self.runmetadata) sample_results = p.starmap(Sippr.parse_one_sample, zip(json_files, sample_names, best_assemblies, analysis_type_list, iupac_list, cutoff_list, depth_list, allow_soft_clip_list)) p.close() p.join() # depends on [control=['with'], data=['tmpdir']] # Since we had to json-ize the sample objects, we now need to update the metadata for everything. for sample in self.runmetadata: sample[self.analysistype].faidict = dict() sample[self.analysistype].results = dict() sample[self.analysistype].avgdepth = dict() sample[self.analysistype].resultssnp = dict() sample[self.analysistype].snplocations = dict() sample[self.analysistype].resultsgap = dict() sample[self.analysistype].gaplocations = dict() sample[self.analysistype].sequences = dict() sample[self.analysistype].maxcoverage = dict() sample[self.analysistype].mincoverage = dict() sample[self.analysistype].standarddev = dict() # Figure out which of the sample results to use. for sample_result in sample_results: if sample_result['name'] == sample.name: sample[self.analysistype].faidict = sample_result['faidict'] sample[self.analysistype].results = sample_result['results'] sample[self.analysistype].avgdepth = sample_result['avgdepth'] sample[self.analysistype].resultssnp = sample_result['resultssnp'] sample[self.analysistype].snplocations = sample_result['snplocations'] sample[self.analysistype].resultsgap = sample_result['resultsgap'] sample[self.analysistype].gaplocations = sample_result['gaplocations'] sample[self.analysistype].sequences = sample_result['sequences'] sample[self.analysistype].maxcoverage = sample_result['maxcoverage'] sample[self.analysistype].mincoverage = sample_result['mincoverage'] sample[self.analysistype].standarddev = sample_result['standarddev'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sample_result']] # depends on [control=['for'], data=['sample']] logging.info('Done parsing BAM files')
def pop(self): """Return the popped value. Raise KeyError if empty.""" it = iter(self) try: value = next(it) except StopIteration: raise KeyError self.discard(value) return value
def function[pop, parameter[self]]: constant[Return the popped value. Raise KeyError if empty.] variable[it] assign[=] call[name[iter], parameter[name[self]]] <ast.Try object at 0x7da1b2344c10> call[name[self].discard, parameter[name[value]]] return[name[value]]
keyword[def] identifier[pop] ( identifier[self] ): literal[string] identifier[it] = identifier[iter] ( identifier[self] ) keyword[try] : identifier[value] = identifier[next] ( identifier[it] ) keyword[except] identifier[StopIteration] : keyword[raise] identifier[KeyError] identifier[self] . identifier[discard] ( identifier[value] ) keyword[return] identifier[value]
def pop(self): """Return the popped value. Raise KeyError if empty.""" it = iter(self) try: value = next(it) # depends on [control=['try'], data=[]] except StopIteration: raise KeyError # depends on [control=['except'], data=[]] self.discard(value) return value
def xatom(self, atom): """(atom)->return the atom at the other end of this bond or None if atom is not part of this bond""" handle = atom.handle if handle == self.atoms[0].handle: return self.atoms[1] elif handle == self.atoms[1].handle: return self.atoms[0] return None
def function[xatom, parameter[self, atom]]: constant[(atom)->return the atom at the other end of this bond or None if atom is not part of this bond] variable[handle] assign[=] name[atom].handle if compare[name[handle] equal[==] call[name[self].atoms][constant[0]].handle] begin[:] return[call[name[self].atoms][constant[1]]] return[constant[None]]
keyword[def] identifier[xatom] ( identifier[self] , identifier[atom] ): literal[string] identifier[handle] = identifier[atom] . identifier[handle] keyword[if] identifier[handle] == identifier[self] . identifier[atoms] [ literal[int] ]. identifier[handle] : keyword[return] identifier[self] . identifier[atoms] [ literal[int] ] keyword[elif] identifier[handle] == identifier[self] . identifier[atoms] [ literal[int] ]. identifier[handle] : keyword[return] identifier[self] . identifier[atoms] [ literal[int] ] keyword[return] keyword[None]
def xatom(self, atom): """(atom)->return the atom at the other end of this bond or None if atom is not part of this bond""" handle = atom.handle if handle == self.atoms[0].handle: return self.atoms[1] # depends on [control=['if'], data=[]] elif handle == self.atoms[1].handle: return self.atoms[0] # depends on [control=['if'], data=[]] return None
def _parse_version(version): """ Parse a version string. Args: version (str): A string representing a version e.g. '1.9rc2' Returns: tuple: major, minor, patch parts cast as integer and whether or not it was a pre-release version. """ parsed_version = parse_version(version) return tuple( int(dot_version) for dot_version in parsed_version.base_version.split('.') ) + (parsed_version.is_prerelease,)
def function[_parse_version, parameter[version]]: constant[ Parse a version string. Args: version (str): A string representing a version e.g. '1.9rc2' Returns: tuple: major, minor, patch parts cast as integer and whether or not it was a pre-release version. ] variable[parsed_version] assign[=] call[name[parse_version], parameter[name[version]]] return[binary_operation[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20c6a8d30>]] + tuple[[<ast.Attribute object at 0x7da20c6a8c40>]]]]
keyword[def] identifier[_parse_version] ( identifier[version] ): literal[string] identifier[parsed_version] = identifier[parse_version] ( identifier[version] ) keyword[return] identifier[tuple] ( identifier[int] ( identifier[dot_version] ) keyword[for] identifier[dot_version] keyword[in] identifier[parsed_version] . identifier[base_version] . identifier[split] ( literal[string] ) )+( identifier[parsed_version] . identifier[is_prerelease] ,)
def _parse_version(version): """ Parse a version string. Args: version (str): A string representing a version e.g. '1.9rc2' Returns: tuple: major, minor, patch parts cast as integer and whether or not it was a pre-release version. """ parsed_version = parse_version(version) return tuple((int(dot_version) for dot_version in parsed_version.base_version.split('.'))) + (parsed_version.is_prerelease,)
def fcoe_fcoe_fabric_map_fcoe_fabric_map_vlan(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe") fcoe_fabric_map = ET.SubElement(fcoe, "fcoe-fabric-map") fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-name") fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name') fcoe_fabric_map_vlan = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-vlan") fcoe_fabric_map_vlan.text = kwargs.pop('fcoe_fabric_map_vlan') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[fcoe_fcoe_fabric_map_fcoe_fabric_map_vlan, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[fcoe] assign[=] call[name[ET].SubElement, parameter[name[config], constant[fcoe]]] variable[fcoe_fabric_map] assign[=] call[name[ET].SubElement, parameter[name[fcoe], constant[fcoe-fabric-map]]] variable[fcoe_fabric_map_name_key] assign[=] call[name[ET].SubElement, parameter[name[fcoe_fabric_map], constant[fcoe-fabric-map-name]]] name[fcoe_fabric_map_name_key].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_fabric_map_name]]] variable[fcoe_fabric_map_vlan] assign[=] call[name[ET].SubElement, parameter[name[fcoe_fabric_map], constant[fcoe-fabric-map-vlan]]] name[fcoe_fabric_map_vlan].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_fabric_map_vlan]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[fcoe_fcoe_fabric_map_fcoe_fabric_map_vlan] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[fcoe] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[fcoe_fabric_map] = identifier[ET] . identifier[SubElement] ( identifier[fcoe] , literal[string] ) identifier[fcoe_fabric_map_name_key] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_fabric_map] , literal[string] ) identifier[fcoe_fabric_map_name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[fcoe_fabric_map_vlan] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_fabric_map] , literal[string] ) identifier[fcoe_fabric_map_vlan] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def fcoe_fcoe_fabric_map_fcoe_fabric_map_vlan(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') fcoe = ET.SubElement(config, 'fcoe', xmlns='urn:brocade.com:mgmt:brocade-fcoe') fcoe_fabric_map = ET.SubElement(fcoe, 'fcoe-fabric-map') fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, 'fcoe-fabric-map-name') fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name') fcoe_fabric_map_vlan = ET.SubElement(fcoe_fabric_map, 'fcoe-fabric-map-vlan') fcoe_fabric_map_vlan.text = kwargs.pop('fcoe_fabric_map_vlan') callback = kwargs.pop('callback', self._callback) return callback(config)
def __get_yubico_users(username): ''' Grab the YubiKey Client ID & Secret Key ''' user = {} try: if __opts__['yubico_users'].get(username, None): (user['id'], user['key']) = list(__opts__['yubico_users'][username].values()) else: return None except KeyError: return None return user
def function[__get_yubico_users, parameter[username]]: constant[ Grab the YubiKey Client ID & Secret Key ] variable[user] assign[=] dictionary[[], []] <ast.Try object at 0x7da1b1c64f10> return[name[user]]
keyword[def] identifier[__get_yubico_users] ( identifier[username] ): literal[string] identifier[user] ={} keyword[try] : keyword[if] identifier[__opts__] [ literal[string] ]. identifier[get] ( identifier[username] , keyword[None] ): ( identifier[user] [ literal[string] ], identifier[user] [ literal[string] ])= identifier[list] ( identifier[__opts__] [ literal[string] ][ identifier[username] ]. identifier[values] ()) keyword[else] : keyword[return] keyword[None] keyword[except] identifier[KeyError] : keyword[return] keyword[None] keyword[return] identifier[user]
def __get_yubico_users(username): """ Grab the YubiKey Client ID & Secret Key """ user = {} try: if __opts__['yubico_users'].get(username, None): (user['id'], user['key']) = list(__opts__['yubico_users'][username].values()) # depends on [control=['if'], data=[]] else: return None # depends on [control=['try'], data=[]] except KeyError: return None # depends on [control=['except'], data=[]] return user
def getRootElement(self): """Get the root element of the document (doc->children is a list containing possibly comments, PIs, etc ...). """ ret = libxml2mod.xmlDocGetRootElement(self._o) if ret is None:raise treeError('xmlDocGetRootElement() failed') __tmp = xmlNode(_obj=ret) return __tmp
def function[getRootElement, parameter[self]]: constant[Get the root element of the document (doc->children is a list containing possibly comments, PIs, etc ...). ] variable[ret] assign[=] call[name[libxml2mod].xmlDocGetRootElement, parameter[name[self]._o]] if compare[name[ret] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1fa7ca0> variable[__tmp] assign[=] call[name[xmlNode], parameter[]] return[name[__tmp]]
keyword[def] identifier[getRootElement] ( identifier[self] ): literal[string] identifier[ret] = identifier[libxml2mod] . identifier[xmlDocGetRootElement] ( identifier[self] . identifier[_o] ) keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] ) identifier[__tmp] = identifier[xmlNode] ( identifier[_obj] = identifier[ret] ) keyword[return] identifier[__tmp]
def getRootElement(self): """Get the root element of the document (doc->children is a list containing possibly comments, PIs, etc ...). """ ret = libxml2mod.xmlDocGetRootElement(self._o) if ret is None: raise treeError('xmlDocGetRootElement() failed') # depends on [control=['if'], data=[]] __tmp = xmlNode(_obj=ret) return __tmp
def merge_datetime(date, time='', date_format='%d/%m/%Y', time_format='%H:%M'): """Create ``datetime`` object from date and time strings.""" day = datetime.strptime(date, date_format) if time: time = datetime.strptime(time, time_format) time = datetime.time(time) day = datetime.date(day) day = datetime.combine(day, time) return day
def function[merge_datetime, parameter[date, time, date_format, time_format]]: constant[Create ``datetime`` object from date and time strings.] variable[day] assign[=] call[name[datetime].strptime, parameter[name[date], name[date_format]]] if name[time] begin[:] variable[time] assign[=] call[name[datetime].strptime, parameter[name[time], name[time_format]]] variable[time] assign[=] call[name[datetime].time, parameter[name[time]]] variable[day] assign[=] call[name[datetime].date, parameter[name[day]]] variable[day] assign[=] call[name[datetime].combine, parameter[name[day], name[time]]] return[name[day]]
keyword[def] identifier[merge_datetime] ( identifier[date] , identifier[time] = literal[string] , identifier[date_format] = literal[string] , identifier[time_format] = literal[string] ): literal[string] identifier[day] = identifier[datetime] . identifier[strptime] ( identifier[date] , identifier[date_format] ) keyword[if] identifier[time] : identifier[time] = identifier[datetime] . identifier[strptime] ( identifier[time] , identifier[time_format] ) identifier[time] = identifier[datetime] . identifier[time] ( identifier[time] ) identifier[day] = identifier[datetime] . identifier[date] ( identifier[day] ) identifier[day] = identifier[datetime] . identifier[combine] ( identifier[day] , identifier[time] ) keyword[return] identifier[day]
def merge_datetime(date, time='', date_format='%d/%m/%Y', time_format='%H:%M'): """Create ``datetime`` object from date and time strings.""" day = datetime.strptime(date, date_format) if time: time = datetime.strptime(time, time_format) time = datetime.time(time) day = datetime.date(day) day = datetime.combine(day, time) # depends on [control=['if'], data=[]] return day
def ensure_tuple(obj): """Try and make the given argument into a tuple.""" if obj is None: return tuple() if isinstance(obj, Iterable) and not isinstance(obj, six.string_types): return tuple(obj) return obj,
def function[ensure_tuple, parameter[obj]]: constant[Try and make the given argument into a tuple.] if compare[name[obj] is constant[None]] begin[:] return[call[name[tuple], parameter[]]] if <ast.BoolOp object at 0x7da20c7cbc10> begin[:] return[call[name[tuple], parameter[name[obj]]]] return[tuple[[<ast.Name object at 0x7da20c7caa40>]]]
keyword[def] identifier[ensure_tuple] ( identifier[obj] ): literal[string] keyword[if] identifier[obj] keyword[is] keyword[None] : keyword[return] identifier[tuple] () keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Iterable] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[string_types] ): keyword[return] identifier[tuple] ( identifier[obj] ) keyword[return] identifier[obj] ,
def ensure_tuple(obj): """Try and make the given argument into a tuple.""" if obj is None: return tuple() # depends on [control=['if'], data=[]] if isinstance(obj, Iterable) and (not isinstance(obj, six.string_types)): return tuple(obj) # depends on [control=['if'], data=[]] return (obj,)
def add_request(self, request): """Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item. """ queue_item = QueueItem(request, Response(request.url)) self.add(queue_item) return queue_item
def function[add_request, parameter[self, request]]: constant[Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item. ] variable[queue_item] assign[=] call[name[QueueItem], parameter[name[request], call[name[Response], parameter[name[request].url]]]] call[name[self].add, parameter[name[queue_item]]] return[name[queue_item]]
keyword[def] identifier[add_request] ( identifier[self] , identifier[request] ): literal[string] identifier[queue_item] = identifier[QueueItem] ( identifier[request] , identifier[Response] ( identifier[request] . identifier[url] )) identifier[self] . identifier[add] ( identifier[queue_item] ) keyword[return] identifier[queue_item]
def add_request(self, request): """Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item. """ queue_item = QueueItem(request, Response(request.url)) self.add(queue_item) return queue_item
def cmd(self, args=None, interact=True): """Process command-line arguments.""" if args is None: parsed_args = arguments.parse_args() else: parsed_args = arguments.parse_args(args) self.exit_code = 0 with self.handling_exceptions(): self.use_args(parsed_args, interact, original_args=args) self.exit_on_error()
def function[cmd, parameter[self, args, interact]]: constant[Process command-line arguments.] if compare[name[args] is constant[None]] begin[:] variable[parsed_args] assign[=] call[name[arguments].parse_args, parameter[]] name[self].exit_code assign[=] constant[0] with call[name[self].handling_exceptions, parameter[]] begin[:] call[name[self].use_args, parameter[name[parsed_args], name[interact]]] call[name[self].exit_on_error, parameter[]]
keyword[def] identifier[cmd] ( identifier[self] , identifier[args] = keyword[None] , identifier[interact] = keyword[True] ): literal[string] keyword[if] identifier[args] keyword[is] keyword[None] : identifier[parsed_args] = identifier[arguments] . identifier[parse_args] () keyword[else] : identifier[parsed_args] = identifier[arguments] . identifier[parse_args] ( identifier[args] ) identifier[self] . identifier[exit_code] = literal[int] keyword[with] identifier[self] . identifier[handling_exceptions] (): identifier[self] . identifier[use_args] ( identifier[parsed_args] , identifier[interact] , identifier[original_args] = identifier[args] ) identifier[self] . identifier[exit_on_error] ()
def cmd(self, args=None, interact=True): """Process command-line arguments.""" if args is None: parsed_args = arguments.parse_args() # depends on [control=['if'], data=[]] else: parsed_args = arguments.parse_args(args) self.exit_code = 0 with self.handling_exceptions(): self.use_args(parsed_args, interact, original_args=args) # depends on [control=['with'], data=[]] self.exit_on_error()
def confd_state_ha_mode(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") ha = ET.SubElement(confd_state, "ha") mode = ET.SubElement(ha, "mode") mode.text = kwargs.pop('mode') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[confd_state_ha_mode, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[confd_state] assign[=] call[name[ET].SubElement, parameter[name[config], constant[confd-state]]] variable[ha] assign[=] call[name[ET].SubElement, parameter[name[confd_state], constant[ha]]] variable[mode] assign[=] call[name[ET].SubElement, parameter[name[ha], constant[mode]]] name[mode].text assign[=] call[name[kwargs].pop, parameter[constant[mode]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[confd_state_ha_mode] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[confd_state] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[ha] = identifier[ET] . identifier[SubElement] ( identifier[confd_state] , literal[string] ) identifier[mode] = identifier[ET] . identifier[SubElement] ( identifier[ha] , literal[string] ) identifier[mode] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def confd_state_ha_mode(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') confd_state = ET.SubElement(config, 'confd-state', xmlns='http://tail-f.com/yang/confd-monitoring') ha = ET.SubElement(confd_state, 'ha') mode = ET.SubElement(ha, 'mode') mode.text = kwargs.pop('mode') callback = kwargs.pop('callback', self._callback) return callback(config)
def plot(self, figure=None): """Main plotter""" sns.set_style("whitegrid") sns.set_context("paper", font_scale=0.8) if figure is None: figure = plt.gcf() nconfounds = len(self.confounds) nspikes = len(self.spikes) nrows = 1 + nconfounds + nspikes # Create grid grid = mgs.GridSpec(nrows, 1, wspace=0.0, hspace=0.05, height_ratios=[1] * (nrows - 1) + [5]) grid_id = 0 for tsz, name, iszs in self.spikes: spikesplot(tsz, title=name, outer_gs=grid[grid_id], tr=self.tr, zscored=iszs) grid_id += 1 if self.confounds: palette = color_palette("husl", nconfounds) for i, (name, kwargs) in enumerate(self.confounds.items()): tseries = kwargs.pop('values') confoundplot( tseries, grid[grid_id], tr=self.tr, color=palette[i], name=name, **kwargs) grid_id += 1 plot_carpet(self.func_file, self.seg_data, subplot=grid[-1], tr=self.tr) # spikesplot_cb([0.7, 0.78, 0.2, 0.008]) return figure
def function[plot, parameter[self, figure]]: constant[Main plotter] call[name[sns].set_style, parameter[constant[whitegrid]]] call[name[sns].set_context, parameter[constant[paper]]] if compare[name[figure] is constant[None]] begin[:] variable[figure] assign[=] call[name[plt].gcf, parameter[]] variable[nconfounds] assign[=] call[name[len], parameter[name[self].confounds]] variable[nspikes] assign[=] call[name[len], parameter[name[self].spikes]] variable[nrows] assign[=] binary_operation[binary_operation[constant[1] + name[nconfounds]] + name[nspikes]] variable[grid] assign[=] call[name[mgs].GridSpec, parameter[name[nrows], constant[1]]] variable[grid_id] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da1b0608820>, <ast.Name object at 0x7da1b0608b80>, <ast.Name object at 0x7da1b06084f0>]]] in starred[name[self].spikes] begin[:] call[name[spikesplot], parameter[name[tsz]]] <ast.AugAssign object at 0x7da1b0608460> if name[self].confounds begin[:] variable[palette] assign[=] call[name[color_palette], parameter[constant[husl], name[nconfounds]]] for taget[tuple[[<ast.Name object at 0x7da1b060ad10>, <ast.Tuple object at 0x7da1b060ab90>]]] in starred[call[name[enumerate], parameter[call[name[self].confounds.items, parameter[]]]]] begin[:] variable[tseries] assign[=] call[name[kwargs].pop, parameter[constant[values]]] call[name[confoundplot], parameter[name[tseries], call[name[grid]][name[grid_id]]]] <ast.AugAssign object at 0x7da204961c60> call[name[plot_carpet], parameter[name[self].func_file, name[self].seg_data]] return[name[figure]]
keyword[def] identifier[plot] ( identifier[self] , identifier[figure] = keyword[None] ): literal[string] identifier[sns] . identifier[set_style] ( literal[string] ) identifier[sns] . identifier[set_context] ( literal[string] , identifier[font_scale] = literal[int] ) keyword[if] identifier[figure] keyword[is] keyword[None] : identifier[figure] = identifier[plt] . identifier[gcf] () identifier[nconfounds] = identifier[len] ( identifier[self] . identifier[confounds] ) identifier[nspikes] = identifier[len] ( identifier[self] . identifier[spikes] ) identifier[nrows] = literal[int] + identifier[nconfounds] + identifier[nspikes] identifier[grid] = identifier[mgs] . identifier[GridSpec] ( identifier[nrows] , literal[int] , identifier[wspace] = literal[int] , identifier[hspace] = literal[int] , identifier[height_ratios] =[ literal[int] ]*( identifier[nrows] - literal[int] )+[ literal[int] ]) identifier[grid_id] = literal[int] keyword[for] identifier[tsz] , identifier[name] , identifier[iszs] keyword[in] identifier[self] . identifier[spikes] : identifier[spikesplot] ( identifier[tsz] , identifier[title] = identifier[name] , identifier[outer_gs] = identifier[grid] [ identifier[grid_id] ], identifier[tr] = identifier[self] . identifier[tr] , identifier[zscored] = identifier[iszs] ) identifier[grid_id] += literal[int] keyword[if] identifier[self] . identifier[confounds] : identifier[palette] = identifier[color_palette] ( literal[string] , identifier[nconfounds] ) keyword[for] identifier[i] ,( identifier[name] , identifier[kwargs] ) keyword[in] identifier[enumerate] ( identifier[self] . identifier[confounds] . identifier[items] ()): identifier[tseries] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[confoundplot] ( identifier[tseries] , identifier[grid] [ identifier[grid_id] ], identifier[tr] = identifier[self] . identifier[tr] , identifier[color] = identifier[palette] [ identifier[i] ], identifier[name] = identifier[name] ,** identifier[kwargs] ) identifier[grid_id] += literal[int] identifier[plot_carpet] ( identifier[self] . identifier[func_file] , identifier[self] . identifier[seg_data] , identifier[subplot] = identifier[grid] [- literal[int] ], identifier[tr] = identifier[self] . identifier[tr] ) keyword[return] identifier[figure]
def plot(self, figure=None): """Main plotter""" sns.set_style('whitegrid') sns.set_context('paper', font_scale=0.8) if figure is None: figure = plt.gcf() # depends on [control=['if'], data=['figure']] nconfounds = len(self.confounds) nspikes = len(self.spikes) nrows = 1 + nconfounds + nspikes # Create grid grid = mgs.GridSpec(nrows, 1, wspace=0.0, hspace=0.05, height_ratios=[1] * (nrows - 1) + [5]) grid_id = 0 for (tsz, name, iszs) in self.spikes: spikesplot(tsz, title=name, outer_gs=grid[grid_id], tr=self.tr, zscored=iszs) grid_id += 1 # depends on [control=['for'], data=[]] if self.confounds: palette = color_palette('husl', nconfounds) # depends on [control=['if'], data=[]] for (i, (name, kwargs)) in enumerate(self.confounds.items()): tseries = kwargs.pop('values') confoundplot(tseries, grid[grid_id], tr=self.tr, color=palette[i], name=name, **kwargs) grid_id += 1 # depends on [control=['for'], data=[]] plot_carpet(self.func_file, self.seg_data, subplot=grid[-1], tr=self.tr) # spikesplot_cb([0.7, 0.78, 0.2, 0.008]) return figure
def _handleInvertAxesSelected(self, evt): """Called when the invert all menu item is selected""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): self._menu.Check(self._axisId[i], False) else: self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
def function[_handleInvertAxesSelected, parameter[self, evt]]: constant[Called when the invert all menu item is selected] if compare[call[name[len], parameter[name[self]._axisId]] equal[==] constant[0]] begin[:] return[None] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._axisId]]]]] begin[:] if call[name[self]._menu.IsChecked, parameter[call[name[self]._axisId][name[i]]]] begin[:] call[name[self]._menu.Check, parameter[call[name[self]._axisId][name[i]], constant[False]]] call[name[self]._toolbar.set_active, parameter[call[name[self].getActiveAxes, parameter[]]]] call[name[evt].Skip, parameter[]]
keyword[def] identifier[_handleInvertAxesSelected] ( identifier[self] , identifier[evt] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[_axisId] )== literal[int] : keyword[return] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_axisId] )): keyword[if] identifier[self] . identifier[_menu] . identifier[IsChecked] ( identifier[self] . identifier[_axisId] [ identifier[i] ]): identifier[self] . identifier[_menu] . identifier[Check] ( identifier[self] . identifier[_axisId] [ identifier[i] ], keyword[False] ) keyword[else] : identifier[self] . identifier[_menu] . identifier[Check] ( identifier[self] . identifier[_axisId] [ identifier[i] ], keyword[True] ) identifier[self] . identifier[_toolbar] . identifier[set_active] ( identifier[self] . identifier[getActiveAxes] ()) identifier[evt] . identifier[Skip] ()
def _handleInvertAxesSelected(self, evt): """Called when the invert all menu item is selected""" if len(self._axisId) == 0: return # depends on [control=['if'], data=[]] for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): self._menu.Check(self._axisId[i], False) # depends on [control=['if'], data=[]] else: self._menu.Check(self._axisId[i], True) # depends on [control=['for'], data=['i']] self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
def _cosmoid_request(self, resource, cosmoid, **kwargs): """ Maps to the Generic API method for requests who's only parameter is ``cosmoid`` """ params = { 'cosmoid': cosmoid, } params.update(kwargs) return self.make_request(resource, params)
def function[_cosmoid_request, parameter[self, resource, cosmoid]]: constant[ Maps to the Generic API method for requests who's only parameter is ``cosmoid`` ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2047e9150>], [<ast.Name object at 0x7da2047eabf0>]] call[name[params].update, parameter[name[kwargs]]] return[call[name[self].make_request, parameter[name[resource], name[params]]]]
keyword[def] identifier[_cosmoid_request] ( identifier[self] , identifier[resource] , identifier[cosmoid] ,** identifier[kwargs] ): literal[string] identifier[params] ={ literal[string] : identifier[cosmoid] , } identifier[params] . identifier[update] ( identifier[kwargs] ) keyword[return] identifier[self] . identifier[make_request] ( identifier[resource] , identifier[params] )
def _cosmoid_request(self, resource, cosmoid, **kwargs): """ Maps to the Generic API method for requests who's only parameter is ``cosmoid`` """ params = {'cosmoid': cosmoid} params.update(kwargs) return self.make_request(resource, params)
def path(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 Output from from 'detect_cid_in_current_path' function } """ o=i.get('out','') r=detect_cid_in_current_path(i) if r['return']>0: return r rx=convert_entry_to_cid(r) if rx['return']>0: return rx cuoa=rx['cuoa'] cid=rx['cid'] xcuoa=rx['xcuoa'] xcid=rx['xcid'] # If console, print CIDs if o=='con': out(cuoa) out(cid) out(xcuoa) out(xcid) return r
def function[path, parameter[i]]: constant[ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 Output from from 'detect_cid_in_current_path' function } ] variable[o] assign[=] call[name[i].get, parameter[constant[out], constant[]]] variable[r] assign[=] call[name[detect_cid_in_current_path], parameter[name[i]]] if compare[call[name[r]][constant[return]] greater[>] constant[0]] begin[:] return[name[r]] variable[rx] assign[=] call[name[convert_entry_to_cid], parameter[name[r]]] if compare[call[name[rx]][constant[return]] greater[>] constant[0]] begin[:] return[name[rx]] variable[cuoa] assign[=] call[name[rx]][constant[cuoa]] variable[cid] assign[=] call[name[rx]][constant[cid]] variable[xcuoa] assign[=] call[name[rx]][constant[xcuoa]] variable[xcid] assign[=] call[name[rx]][constant[xcid]] if compare[name[o] equal[==] constant[con]] begin[:] call[name[out], parameter[name[cuoa]]] call[name[out], parameter[name[cid]]] call[name[out], parameter[name[xcuoa]]] call[name[out], parameter[name[xcid]]] return[name[r]]
keyword[def] identifier[path] ( identifier[i] ): literal[string] identifier[o] = identifier[i] . identifier[get] ( literal[string] , literal[string] ) identifier[r] = identifier[detect_cid_in_current_path] ( identifier[i] ) keyword[if] identifier[r] [ literal[string] ]> literal[int] : keyword[return] identifier[r] identifier[rx] = identifier[convert_entry_to_cid] ( identifier[r] ) keyword[if] identifier[rx] [ literal[string] ]> literal[int] : keyword[return] identifier[rx] identifier[cuoa] = identifier[rx] [ literal[string] ] identifier[cid] = identifier[rx] [ literal[string] ] identifier[xcuoa] = identifier[rx] [ literal[string] ] identifier[xcid] = identifier[rx] [ literal[string] ] keyword[if] identifier[o] == literal[string] : identifier[out] ( identifier[cuoa] ) identifier[out] ( identifier[cid] ) identifier[out] ( identifier[xcuoa] ) identifier[out] ( identifier[xcid] ) keyword[return] identifier[r]
def path(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 Output from from 'detect_cid_in_current_path' function } """ o = i.get('out', '') r = detect_cid_in_current_path(i) if r['return'] > 0: return r # depends on [control=['if'], data=[]] rx = convert_entry_to_cid(r) if rx['return'] > 0: return rx # depends on [control=['if'], data=[]] cuoa = rx['cuoa'] cid = rx['cid'] xcuoa = rx['xcuoa'] xcid = rx['xcid'] # If console, print CIDs if o == 'con': out(cuoa) out(cid) out(xcuoa) out(xcid) # depends on [control=['if'], data=[]] return r
def irfs(self, **kwargs): """ Get the name of IFRs associted with a particular dataset """ dsval = kwargs.get('dataset', self.dataset(**kwargs)) tokens = dsval.split('_') irf_name = "%s_%s_%s" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])], EVCLASS_NAME_DICTIONARY[tokens[3]], kwargs.get('irf_ver')) return irf_name
def function[irfs, parameter[self]]: constant[ Get the name of IFRs associted with a particular dataset ] variable[dsval] assign[=] call[name[kwargs].get, parameter[constant[dataset], call[name[self].dataset, parameter[]]]] variable[tokens] assign[=] call[name[dsval].split, parameter[constant[_]]] variable[irf_name] assign[=] binary_operation[constant[%s_%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18f58f280>, <ast.Subscript object at 0x7da18f58d2d0>, <ast.Call object at 0x7da18f58c610>]]] return[name[irf_name]]
keyword[def] identifier[irfs] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[dsval] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[dataset] (** identifier[kwargs] )) identifier[tokens] = identifier[dsval] . identifier[split] ( literal[string] ) identifier[irf_name] = literal[string] %( identifier[DATASET_DICTIONARY] [ literal[string] %( identifier[tokens] [ literal[int] ], identifier[tokens] [ literal[int] ])], identifier[EVCLASS_NAME_DICTIONARY] [ identifier[tokens] [ literal[int] ]], identifier[kwargs] . identifier[get] ( literal[string] )) keyword[return] identifier[irf_name]
def irfs(self, **kwargs): """ Get the name of IFRs associted with a particular dataset """ dsval = kwargs.get('dataset', self.dataset(**kwargs)) tokens = dsval.split('_') irf_name = '%s_%s_%s' % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])], EVCLASS_NAME_DICTIONARY[tokens[3]], kwargs.get('irf_ver')) return irf_name
def track_job(job_id): """ Tracking is done by requesting each job and then searching for whether the job has one of the following states: - "RUN", - "PEND", - "SSUSP", - "EXIT" based on the LSF documentation """ cmd = "bjobs -noheader -o stat {}".format(job_id) track_job_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True) status = track_job_proc.communicate()[0].strip('\n') return status
def function[track_job, parameter[job_id]]: constant[ Tracking is done by requesting each job and then searching for whether the job has one of the following states: - "RUN", - "PEND", - "SSUSP", - "EXIT" based on the LSF documentation ] variable[cmd] assign[=] call[constant[bjobs -noheader -o stat {}].format, parameter[name[job_id]]] variable[track_job_proc] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]] variable[status] assign[=] call[call[call[name[track_job_proc].communicate, parameter[]]][constant[0]].strip, parameter[constant[ ]]] return[name[status]]
keyword[def] identifier[track_job] ( identifier[job_id] ): literal[string] identifier[cmd] = literal[string] . identifier[format] ( identifier[job_id] ) identifier[track_job_proc] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[shell] = keyword[True] ) identifier[status] = identifier[track_job_proc] . identifier[communicate] ()[ literal[int] ]. identifier[strip] ( literal[string] ) keyword[return] identifier[status]
def track_job(job_id): """ Tracking is done by requesting each job and then searching for whether the job has one of the following states: - "RUN", - "PEND", - "SSUSP", - "EXIT" based on the LSF documentation """ cmd = 'bjobs -noheader -o stat {}'.format(job_id) track_job_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) status = track_job_proc.communicate()[0].strip('\n') return status
def set_subparsers_args(self, *args, **kwargs): """ Sets args and kwargs that are passed when creating a subparsers group in an argparse.ArgumentParser i.e. when calling argparser.ArgumentParser.add_subparsers """ self.subparsers_args = args self.subparsers_kwargs = kwargs
def function[set_subparsers_args, parameter[self]]: constant[ Sets args and kwargs that are passed when creating a subparsers group in an argparse.ArgumentParser i.e. when calling argparser.ArgumentParser.add_subparsers ] name[self].subparsers_args assign[=] name[args] name[self].subparsers_kwargs assign[=] name[kwargs]
keyword[def] identifier[set_subparsers_args] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[subparsers_args] = identifier[args] identifier[self] . identifier[subparsers_kwargs] = identifier[kwargs]
def set_subparsers_args(self, *args, **kwargs): """ Sets args and kwargs that are passed when creating a subparsers group in an argparse.ArgumentParser i.e. when calling argparser.ArgumentParser.add_subparsers """ self.subparsers_args = args self.subparsers_kwargs = kwargs
def duration(t, now=None, precision=1, pad=', ', words=None, justnow=datetime.timedelta(seconds=10)): ''' Time delta compared to ``t``. You can override ``now`` to specify what time to compare to. :param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime` object :param now: default ``None``, optionally a :class:`datetime.datetime` object :param precision: default ``1``, number of fragments to return :param words: default ``None``, allow words like "yesterday", if set to ``None`` this will be enabled if ``precision`` is set to ``1`` :param justnow: default ``datetime.timedelta(seconds=10)``, :class:`datetime.timedelta` object passed to :func:`delta` representing tolerance for considering argument ``t`` as meaning 'just now' >>> import time >>> from datetime import datetime >>> print(duration(time.time() + 1)) just now >>> print(duration(time.time() + 11)) 11 seconds from now >>> print(duration(time.time() - 1)) just now >>> print(duration(time.time() - 11)) 11 seconds ago >>> print(duration(time.time() - 3601)) an hour ago >>> print(duration(time.time() - 7201)) 2 hours ago >>> print(duration(time.time() - 1234567)) 2 weeks ago >>> print(duration(time.time() + 7200, precision=1)) 2 hours from now >>> print(duration(time.time() - 1234567, precision=3)) 2 weeks, 6 hours, 56 minutes ago >>> print(duration(datetime(2014, 9, 8), now=datetime(2014, 9, 9))) yesterday >>> print(duration(datetime(2014, 9, 7, 23), now=datetime(2014, 9, 9))) 1 day ago >>> print(duration(datetime(2014, 9, 10), now=datetime(2014, 9, 9))) tomorrow >>> print(duration(datetime(2014, 9, 11, 1), now=datetime(2014, 9, 9, 23))) 1 day from now ''' if words is None: words = precision == 1 t1 = _to_datetime(t) t2 = _to_datetime(now or datetime.datetime.now()) if t1 < t2: format = _('%s ago') else: format = _('%s from now') result, remains = delta(t1, t2, words=words, justnow=justnow) if result in ( _('just now'), _('yesterday'), _('tomorrow'), _('last week'), _('next week'), ): return result elif precision > 1 and remains: t3 = t2 - datetime.timedelta(seconds=remains) return pad.join([ result, duration(t2, t3, precision - 1, pad, words=False), ]) else: return format % (result,)
def function[duration, parameter[t, now, precision, pad, words, justnow]]: constant[ Time delta compared to ``t``. You can override ``now`` to specify what time to compare to. :param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime` object :param now: default ``None``, optionally a :class:`datetime.datetime` object :param precision: default ``1``, number of fragments to return :param words: default ``None``, allow words like "yesterday", if set to ``None`` this will be enabled if ``precision`` is set to ``1`` :param justnow: default ``datetime.timedelta(seconds=10)``, :class:`datetime.timedelta` object passed to :func:`delta` representing tolerance for considering argument ``t`` as meaning 'just now' >>> import time >>> from datetime import datetime >>> print(duration(time.time() + 1)) just now >>> print(duration(time.time() + 11)) 11 seconds from now >>> print(duration(time.time() - 1)) just now >>> print(duration(time.time() - 11)) 11 seconds ago >>> print(duration(time.time() - 3601)) an hour ago >>> print(duration(time.time() - 7201)) 2 hours ago >>> print(duration(time.time() - 1234567)) 2 weeks ago >>> print(duration(time.time() + 7200, precision=1)) 2 hours from now >>> print(duration(time.time() - 1234567, precision=3)) 2 weeks, 6 hours, 56 minutes ago >>> print(duration(datetime(2014, 9, 8), now=datetime(2014, 9, 9))) yesterday >>> print(duration(datetime(2014, 9, 7, 23), now=datetime(2014, 9, 9))) 1 day ago >>> print(duration(datetime(2014, 9, 10), now=datetime(2014, 9, 9))) tomorrow >>> print(duration(datetime(2014, 9, 11, 1), now=datetime(2014, 9, 9, 23))) 1 day from now ] if compare[name[words] is constant[None]] begin[:] variable[words] assign[=] compare[name[precision] equal[==] constant[1]] variable[t1] assign[=] call[name[_to_datetime], parameter[name[t]]] variable[t2] assign[=] call[name[_to_datetime], parameter[<ast.BoolOp object at 0x7da1b24ad360>]] if compare[name[t1] less[<] name[t2]] begin[:] variable[format] assign[=] call[name[_], parameter[constant[%s ago]]] <ast.Tuple object at 0x7da1b24ac310> assign[=] call[name[delta], parameter[name[t1], name[t2]]] if compare[name[result] in tuple[[<ast.Call object at 0x7da1b24af220>, <ast.Call object at 0x7da1b24af310>, <ast.Call object at 0x7da1b24af370>, <ast.Call object at 0x7da1b24aec50>, <ast.Call object at 0x7da1b24aea70>]]] begin[:] return[name[result]]
keyword[def] identifier[duration] ( identifier[t] , identifier[now] = keyword[None] , identifier[precision] = literal[int] , identifier[pad] = literal[string] , identifier[words] = keyword[None] , identifier[justnow] = identifier[datetime] . identifier[timedelta] ( identifier[seconds] = literal[int] )): literal[string] keyword[if] identifier[words] keyword[is] keyword[None] : identifier[words] = identifier[precision] == literal[int] identifier[t1] = identifier[_to_datetime] ( identifier[t] ) identifier[t2] = identifier[_to_datetime] ( identifier[now] keyword[or] identifier[datetime] . identifier[datetime] . identifier[now] ()) keyword[if] identifier[t1] < identifier[t2] : identifier[format] = identifier[_] ( literal[string] ) keyword[else] : identifier[format] = identifier[_] ( literal[string] ) identifier[result] , identifier[remains] = identifier[delta] ( identifier[t1] , identifier[t2] , identifier[words] = identifier[words] , identifier[justnow] = identifier[justnow] ) keyword[if] identifier[result] keyword[in] ( identifier[_] ( literal[string] ), identifier[_] ( literal[string] ), identifier[_] ( literal[string] ), identifier[_] ( literal[string] ), identifier[_] ( literal[string] ), ): keyword[return] identifier[result] keyword[elif] identifier[precision] > literal[int] keyword[and] identifier[remains] : identifier[t3] = identifier[t2] - identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[remains] ) keyword[return] identifier[pad] . identifier[join] ([ identifier[result] , identifier[duration] ( identifier[t2] , identifier[t3] , identifier[precision] - literal[int] , identifier[pad] , identifier[words] = keyword[False] ), ]) keyword[else] : keyword[return] identifier[format] %( identifier[result] ,)
def duration(t, now=None, precision=1, pad=', ', words=None, justnow=datetime.timedelta(seconds=10)): """ Time delta compared to ``t``. You can override ``now`` to specify what time to compare to. :param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime` object :param now: default ``None``, optionally a :class:`datetime.datetime` object :param precision: default ``1``, number of fragments to return :param words: default ``None``, allow words like "yesterday", if set to ``None`` this will be enabled if ``precision`` is set to ``1`` :param justnow: default ``datetime.timedelta(seconds=10)``, :class:`datetime.timedelta` object passed to :func:`delta` representing tolerance for considering argument ``t`` as meaning 'just now' >>> import time >>> from datetime import datetime >>> print(duration(time.time() + 1)) just now >>> print(duration(time.time() + 11)) 11 seconds from now >>> print(duration(time.time() - 1)) just now >>> print(duration(time.time() - 11)) 11 seconds ago >>> print(duration(time.time() - 3601)) an hour ago >>> print(duration(time.time() - 7201)) 2 hours ago >>> print(duration(time.time() - 1234567)) 2 weeks ago >>> print(duration(time.time() + 7200, precision=1)) 2 hours from now >>> print(duration(time.time() - 1234567, precision=3)) 2 weeks, 6 hours, 56 minutes ago >>> print(duration(datetime(2014, 9, 8), now=datetime(2014, 9, 9))) yesterday >>> print(duration(datetime(2014, 9, 7, 23), now=datetime(2014, 9, 9))) 1 day ago >>> print(duration(datetime(2014, 9, 10), now=datetime(2014, 9, 9))) tomorrow >>> print(duration(datetime(2014, 9, 11, 1), now=datetime(2014, 9, 9, 23))) 1 day from now """ if words is None: words = precision == 1 # depends on [control=['if'], data=['words']] t1 = _to_datetime(t) t2 = _to_datetime(now or datetime.datetime.now()) if t1 < t2: format = _('%s ago') # depends on [control=['if'], data=[]] else: format = _('%s from now') (result, remains) = delta(t1, t2, words=words, justnow=justnow) if result in (_('just now'), _('yesterday'), _('tomorrow'), _('last week'), _('next week')): return result # depends on [control=['if'], data=['result']] elif precision > 1 and remains: t3 = t2 - datetime.timedelta(seconds=remains) return pad.join([result, duration(t2, t3, precision - 1, pad, words=False)]) # depends on [control=['if'], data=[]] else: return format % (result,)
def check_is_an_array(var, allow_none=False): """ Calls is_an_array and raises a type error if the check fails. """ if not is_an_array(var, allow_none=allow_none): raise TypeError("var must be a NumPy array, however type(var) is {}" .format(type(var)))
def function[check_is_an_array, parameter[var, allow_none]]: constant[ Calls is_an_array and raises a type error if the check fails. ] if <ast.UnaryOp object at 0x7da1b05d9d50> begin[:] <ast.Raise object at 0x7da1b05d9960>
keyword[def] identifier[check_is_an_array] ( identifier[var] , identifier[allow_none] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[is_an_array] ( identifier[var] , identifier[allow_none] = identifier[allow_none] ): keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[var] )))
def check_is_an_array(var, allow_none=False): """ Calls is_an_array and raises a type error if the check fails. """ if not is_an_array(var, allow_none=allow_none): raise TypeError('var must be a NumPy array, however type(var) is {}'.format(type(var))) # depends on [control=['if'], data=[]]
def iterate_chunks(file, chunk_size): """ Iterate chunks of size chunk_size from a file-like object """ chunk = file.read(chunk_size) while chunk: yield chunk chunk = file.read(chunk_size)
def function[iterate_chunks, parameter[file, chunk_size]]: constant[ Iterate chunks of size chunk_size from a file-like object ] variable[chunk] assign[=] call[name[file].read, parameter[name[chunk_size]]] while name[chunk] begin[:] <ast.Yield object at 0x7da1b11bc4f0> variable[chunk] assign[=] call[name[file].read, parameter[name[chunk_size]]]
keyword[def] identifier[iterate_chunks] ( identifier[file] , identifier[chunk_size] ): literal[string] identifier[chunk] = identifier[file] . identifier[read] ( identifier[chunk_size] ) keyword[while] identifier[chunk] : keyword[yield] identifier[chunk] identifier[chunk] = identifier[file] . identifier[read] ( identifier[chunk_size] )
def iterate_chunks(file, chunk_size): """ Iterate chunks of size chunk_size from a file-like object """ chunk = file.read(chunk_size) while chunk: yield chunk chunk = file.read(chunk_size) # depends on [control=['while'], data=[]]
def factory(arg1, arg2, version=-1, forceviewserveruse=False, windowId=None, uiAutomatorHelper=None): ''' View factory @type arg1: ClassType or dict @type arg2: View instance or AdbClient ''' if DEBUG_VIEW_FACTORY: print >> sys.stderr, "View.factory(%s, %s, %s, %s, %s, %s)" % (arg1, arg2, version, forceviewserveruse, windowId, uiAutomatorHelper) if type(arg1) == types.ClassType: cls = arg1 attrs = None else: cls = None attrs = arg1 if isinstance(arg2, View): view = arg2 device = None else: device = arg2 view = None if attrs and attrs.has_key('class'): clazz = attrs['class'] if DEBUG_VIEW_FACTORY: print >> sys.stderr, " View.factory: creating View with specific class: %s" % clazz if clazz == 'android.widget.TextView': return TextView(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) elif clazz == 'android.widget.EditText': return EditText(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) elif clazz == 'android.widget.ListView': return ListView(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) else: return View(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) elif cls: if view: return cls.__copy(view) else: return cls(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) elif view: return copy.copy(view) else: if DEBUG_VIEW_FACTORY: print >> sys.stderr, " View.factory: creating generic View" return View(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper)
def function[factory, parameter[arg1, arg2, version, forceviewserveruse, windowId, uiAutomatorHelper]]: constant[ View factory @type arg1: ClassType or dict @type arg2: View instance or AdbClient ] if name[DEBUG_VIEW_FACTORY] begin[:] tuple[[<ast.BinOp object at 0x7da1b1d0c9a0>, <ast.BinOp object at 0x7da1b1d0cfd0>]] if compare[call[name[type], parameter[name[arg1]]] equal[==] name[types].ClassType] begin[:] variable[cls] assign[=] name[arg1] variable[attrs] assign[=] constant[None] if call[name[isinstance], parameter[name[arg2], name[View]]] begin[:] variable[view] assign[=] name[arg2] variable[device] assign[=] constant[None] if <ast.BoolOp object at 0x7da1b1d0fa90> begin[:] variable[clazz] assign[=] call[name[attrs]][constant[class]] if name[DEBUG_VIEW_FACTORY] begin[:] tuple[[<ast.BinOp object at 0x7da1b1d0fbb0>, <ast.BinOp object at 0x7da1b1d0e410>]] if compare[name[clazz] equal[==] constant[android.widget.TextView]] begin[:] return[call[name[TextView], parameter[name[attrs], name[device], name[version], name[forceviewserveruse], name[windowId], name[uiAutomatorHelper]]]]
keyword[def] identifier[factory] ( identifier[arg1] , identifier[arg2] , identifier[version] =- literal[int] , identifier[forceviewserveruse] = keyword[False] , identifier[windowId] = keyword[None] , identifier[uiAutomatorHelper] = keyword[None] ): literal[string] keyword[if] identifier[DEBUG_VIEW_FACTORY] : identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] %( identifier[arg1] , identifier[arg2] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] ) keyword[if] identifier[type] ( identifier[arg1] )== identifier[types] . identifier[ClassType] : identifier[cls] = identifier[arg1] identifier[attrs] = keyword[None] keyword[else] : identifier[cls] = keyword[None] identifier[attrs] = identifier[arg1] keyword[if] identifier[isinstance] ( identifier[arg2] , identifier[View] ): identifier[view] = identifier[arg2] identifier[device] = keyword[None] keyword[else] : identifier[device] = identifier[arg2] identifier[view] = keyword[None] keyword[if] identifier[attrs] keyword[and] identifier[attrs] . identifier[has_key] ( literal[string] ): identifier[clazz] = identifier[attrs] [ literal[string] ] keyword[if] identifier[DEBUG_VIEW_FACTORY] : identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] % identifier[clazz] keyword[if] identifier[clazz] == literal[string] : keyword[return] identifier[TextView] ( identifier[attrs] , identifier[device] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] ) keyword[elif] identifier[clazz] == literal[string] : keyword[return] identifier[EditText] ( identifier[attrs] , identifier[device] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] ) keyword[elif] identifier[clazz] == literal[string] : keyword[return] identifier[ListView] ( identifier[attrs] , identifier[device] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] ) keyword[else] : keyword[return] identifier[View] ( identifier[attrs] , identifier[device] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] ) keyword[elif] identifier[cls] : keyword[if] identifier[view] : keyword[return] identifier[cls] . identifier[__copy] ( identifier[view] ) keyword[else] : keyword[return] identifier[cls] ( identifier[attrs] , identifier[device] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] ) keyword[elif] identifier[view] : keyword[return] identifier[copy] . identifier[copy] ( identifier[view] ) keyword[else] : keyword[if] identifier[DEBUG_VIEW_FACTORY] : identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] keyword[return] identifier[View] ( identifier[attrs] , identifier[device] , identifier[version] , identifier[forceviewserveruse] , identifier[windowId] , identifier[uiAutomatorHelper] )
def factory(arg1, arg2, version=-1, forceviewserveruse=False, windowId=None, uiAutomatorHelper=None): """ View factory @type arg1: ClassType or dict @type arg2: View instance or AdbClient """ if DEBUG_VIEW_FACTORY: (print >> sys.stderr, 'View.factory(%s, %s, %s, %s, %s, %s)' % (arg1, arg2, version, forceviewserveruse, windowId, uiAutomatorHelper)) # depends on [control=['if'], data=[]] if type(arg1) == types.ClassType: cls = arg1 attrs = None # depends on [control=['if'], data=[]] else: cls = None attrs = arg1 if isinstance(arg2, View): view = arg2 device = None # depends on [control=['if'], data=[]] else: device = arg2 view = None if attrs and attrs.has_key('class'): clazz = attrs['class'] if DEBUG_VIEW_FACTORY: (print >> sys.stderr, ' View.factory: creating View with specific class: %s' % clazz) # depends on [control=['if'], data=[]] if clazz == 'android.widget.TextView': return TextView(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) # depends on [control=['if'], data=[]] elif clazz == 'android.widget.EditText': return EditText(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) # depends on [control=['if'], data=[]] elif clazz == 'android.widget.ListView': return ListView(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) # depends on [control=['if'], data=[]] else: return View(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) # depends on [control=['if'], data=[]] elif cls: if view: return cls.__copy(view) # depends on [control=['if'], data=[]] else: return cls(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper) # depends on [control=['if'], data=[]] elif view: return copy.copy(view) # depends on [control=['if'], data=[]] else: if DEBUG_VIEW_FACTORY: (print >> sys.stderr, ' View.factory: creating generic View') # depends on [control=['if'], data=[]] return View(attrs, device, version, forceviewserveruse, windowId, uiAutomatorHelper)
def psubscribe(self, pattern, *patterns): """Switch connection to Pub/Sub mode and subscribe to specified patterns. Arguments can be instances of :class:`~aioredis.Channel`. Returns :func:`asyncio.gather()` coroutine which when done will return a list of subscribed :class:`~aioredis.Channel` objects with ``is_pattern`` property set to ``True``. """ conn = self._pool_or_conn return wait_return_channels( conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns), conn, 'pubsub_patterns')
def function[psubscribe, parameter[self, pattern]]: constant[Switch connection to Pub/Sub mode and subscribe to specified patterns. Arguments can be instances of :class:`~aioredis.Channel`. Returns :func:`asyncio.gather()` coroutine which when done will return a list of subscribed :class:`~aioredis.Channel` objects with ``is_pattern`` property set to ``True``. ] variable[conn] assign[=] name[self]._pool_or_conn return[call[name[wait_return_channels], parameter[call[name[conn].execute_pubsub, parameter[constant[b'PSUBSCRIBE'], name[pattern], <ast.Starred object at 0x7da20e9b0040>]], name[conn], constant[pubsub_patterns]]]]
keyword[def] identifier[psubscribe] ( identifier[self] , identifier[pattern] ,* identifier[patterns] ): literal[string] identifier[conn] = identifier[self] . identifier[_pool_or_conn] keyword[return] identifier[wait_return_channels] ( identifier[conn] . identifier[execute_pubsub] ( literal[string] , identifier[pattern] ,* identifier[patterns] ), identifier[conn] , literal[string] )
def psubscribe(self, pattern, *patterns): """Switch connection to Pub/Sub mode and subscribe to specified patterns. Arguments can be instances of :class:`~aioredis.Channel`. Returns :func:`asyncio.gather()` coroutine which when done will return a list of subscribed :class:`~aioredis.Channel` objects with ``is_pattern`` property set to ``True``. """ conn = self._pool_or_conn return wait_return_channels(conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns), conn, 'pubsub_patterns')
def wiki_delete(self, page_id): """Delete a specific page wiki (Requires login) (UNTESTED) (Builder+). Parameters: page_id (int): """ return self._get('wiki_pages/{0}.json'.format(page_id), auth=True, method='DELETE')
def function[wiki_delete, parameter[self, page_id]]: constant[Delete a specific page wiki (Requires login) (UNTESTED) (Builder+). Parameters: page_id (int): ] return[call[name[self]._get, parameter[call[constant[wiki_pages/{0}.json].format, parameter[name[page_id]]]]]]
keyword[def] identifier[wiki_delete] ( identifier[self] , identifier[page_id] ): literal[string] keyword[return] identifier[self] . identifier[_get] ( literal[string] . identifier[format] ( identifier[page_id] ), identifier[auth] = keyword[True] , identifier[method] = literal[string] )
def wiki_delete(self, page_id): """Delete a specific page wiki (Requires login) (UNTESTED) (Builder+). Parameters: page_id (int): """ return self._get('wiki_pages/{0}.json'.format(page_id), auth=True, method='DELETE')
def analyze_sentiment( self, document, encoding_type=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Analyzes the sentiment of the provided text. Example: >>> from google.cloud import language_v1 >>> >>> client = language_v1.LanguageServiceClient() >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.analyze_sentiment(document) Args: document (Union[dict, ~google.cloud.language_v1.types.Document]): Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.types.EncodingType): The encoding type used by the API to calculate sentence offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.language_v1.types.AnalyzeSentimentResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "analyze_sentiment" not in self._inner_api_calls: self._inner_api_calls[ "analyze_sentiment" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.analyze_sentiment, default_retry=self._method_configs["AnalyzeSentiment"].retry, default_timeout=self._method_configs["AnalyzeSentiment"].timeout, client_info=self._client_info, ) request = language_service_pb2.AnalyzeSentimentRequest( document=document, encoding_type=encoding_type ) return self._inner_api_calls["analyze_sentiment"]( request, retry=retry, timeout=timeout, metadata=metadata )
def function[analyze_sentiment, parameter[self, document, encoding_type, retry, timeout, metadata]]: constant[ Analyzes the sentiment of the provided text. Example: >>> from google.cloud import language_v1 >>> >>> client = language_v1.LanguageServiceClient() >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.analyze_sentiment(document) Args: document (Union[dict, ~google.cloud.language_v1.types.Document]): Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.types.EncodingType): The encoding type used by the API to calculate sentence offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.language_v1.types.AnalyzeSentimentResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. ] if compare[constant[analyze_sentiment] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:] call[name[self]._inner_api_calls][constant[analyze_sentiment]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.analyze_sentiment]] variable[request] assign[=] call[name[language_service_pb2].AnalyzeSentimentRequest, parameter[]] return[call[call[name[self]._inner_api_calls][constant[analyze_sentiment]], parameter[name[request]]]]
keyword[def] identifier[analyze_sentiment] ( identifier[self] , identifier[document] , identifier[encoding_type] = keyword[None] , identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] , identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] , identifier[metadata] = keyword[None] , ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] : identifier[self] . identifier[_inner_api_calls] [ literal[string] ]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] ( identifier[self] . identifier[transport] . identifier[analyze_sentiment] , identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] , identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] , identifier[client_info] = identifier[self] . identifier[_client_info] , ) identifier[request] = identifier[language_service_pb2] . identifier[AnalyzeSentimentRequest] ( identifier[document] = identifier[document] , identifier[encoding_type] = identifier[encoding_type] ) keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ]( identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata] )
def analyze_sentiment(self, document, encoding_type=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Analyzes the sentiment of the provided text. Example: >>> from google.cloud import language_v1 >>> >>> client = language_v1.LanguageServiceClient() >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.analyze_sentiment(document) Args: document (Union[dict, ~google.cloud.language_v1.types.Document]): Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.types.EncodingType): The encoding type used by the API to calculate sentence offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.language_v1.types.AnalyzeSentimentResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'analyze_sentiment' not in self._inner_api_calls: self._inner_api_calls['analyze_sentiment'] = google.api_core.gapic_v1.method.wrap_method(self.transport.analyze_sentiment, default_retry=self._method_configs['AnalyzeSentiment'].retry, default_timeout=self._method_configs['AnalyzeSentiment'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]] request = language_service_pb2.AnalyzeSentimentRequest(document=document, encoding_type=encoding_type) return self._inner_api_calls['analyze_sentiment'](request, retry=retry, timeout=timeout, metadata=metadata)
def sync(self): """ :returns: Version sync of preview :rtype: twilio.rest.preview.sync.Sync """ if self._sync is None: self._sync = Sync(self) return self._sync
def function[sync, parameter[self]]: constant[ :returns: Version sync of preview :rtype: twilio.rest.preview.sync.Sync ] if compare[name[self]._sync is constant[None]] begin[:] name[self]._sync assign[=] call[name[Sync], parameter[name[self]]] return[name[self]._sync]
keyword[def] identifier[sync] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_sync] keyword[is] keyword[None] : identifier[self] . identifier[_sync] = identifier[Sync] ( identifier[self] ) keyword[return] identifier[self] . identifier[_sync]
def sync(self): """ :returns: Version sync of preview :rtype: twilio.rest.preview.sync.Sync """ if self._sync is None: self._sync = Sync(self) # depends on [control=['if'], data=[]] return self._sync
def get_submission_archive(self, submissions, sub_folders, aggregations, archive_file=None): """ :param submissions: a list of submissions :param sub_folders: possible values: []: put all submissions in / ['taskid']: put all submissions for each task in a different directory /taskid/ ['username']: put all submissions for each user in a different directory /username/ ['taskid','username']: /taskid/username/ ['username','taskid']: /username/taskid/ :return: a file-like object containing a tgz archive of all the submissions """ tmpfile = archive_file if archive_file is not None else tempfile.TemporaryFile() tar = tarfile.open(fileobj=tmpfile, mode='w:gz') for submission in submissions: submission = self.get_input_from_submission(submission) submission_yaml = io.BytesIO(inginious.common.custom_yaml.dump(submission).encode('utf-8')) # Considering multiple single submissions for each user for username in submission["username"]: # Compute base path in the tar file base_path = "/" for sub_folder in sub_folders: if sub_folder == 'taskid': base_path = submission['taskid'] + base_path elif sub_folder == 'username': base_path = '_' + '-'.join(submission['username']) + base_path base_path = base_path[1:] elif sub_folder == 'aggregation': if username in aggregations: if aggregations[username] is None: # If classrooms are not used, and user is not grouped, his classroom is replaced by None base_path = '_' + '-'.join(submission['username']) + base_path base_path = base_path[1:] else: base_path = (aggregations[username]["description"] + " (" + str(aggregations[username]["_id"]) + ")").replace(" ", "_") + base_path base_path = '/' + base_path base_path = base_path[1:] submission_yaml_fname = base_path + str(submission["_id"]) + '/submission.test' # Avoid putting two times the same submission on the same place if submission_yaml_fname not in tar.getnames(): info = tarfile.TarInfo(name=submission_yaml_fname) info.size = submission_yaml.getbuffer().nbytes info.mtime = time.mktime(submission["submitted_on"].timetuple()) # Add file in tar archive tar.addfile(info, fileobj=submission_yaml) # If there is an archive, add it too if 'archive' in submission and submission['archive'] is not None and submission['archive'] != "": subfile = self._gridfs.get(submission['archive']) subtar = tarfile.open(fileobj=subfile, mode="r:gz") for member in subtar.getmembers(): subtarfile = subtar.extractfile(member) member.name = base_path + str(submission["_id"]) + "/archive/" + member.name tar.addfile(member, subtarfile) subtar.close() subfile.close() # If there files that were uploaded by the student, add them if submission['input'] is not None: for pid, problem in submission['input'].items(): # If problem is a dict, it is a file (from the specification of the problems) if isinstance(problem, dict): # Get the extension (match extensions with more than one dot too) DOUBLE_EXTENSIONS = ['.tar.gz', '.tar.bz2', '.tar.bz', '.tar.xz'] ext = "" if not problem['filename'].endswith(tuple(DOUBLE_EXTENSIONS)): _, ext = os.path.splitext(problem['filename']) else: for t_ext in DOUBLE_EXTENSIONS: if problem['filename'].endswith(t_ext): ext = t_ext subfile = io.BytesIO(problem['value']) taskfname = base_path + str(submission["_id"]) + '/uploaded_files/' + pid + ext # Generate file info info = tarfile.TarInfo(name=taskfname) info.size = subfile.getbuffer().nbytes info.mtime = time.mktime(submission["submitted_on"].timetuple()) # Add file in tar archive tar.addfile(info, fileobj=subfile) # Close tarfile and put tempfile cursor at 0 tar.close() tmpfile.seek(0) return tmpfile
def function[get_submission_archive, parameter[self, submissions, sub_folders, aggregations, archive_file]]: constant[ :param submissions: a list of submissions :param sub_folders: possible values: []: put all submissions in / ['taskid']: put all submissions for each task in a different directory /taskid/ ['username']: put all submissions for each user in a different directory /username/ ['taskid','username']: /taskid/username/ ['username','taskid']: /username/taskid/ :return: a file-like object containing a tgz archive of all the submissions ] variable[tmpfile] assign[=] <ast.IfExp object at 0x7da1b1734610> variable[tar] assign[=] call[name[tarfile].open, parameter[]] for taget[name[submission]] in starred[name[submissions]] begin[:] variable[submission] assign[=] call[name[self].get_input_from_submission, parameter[name[submission]]] variable[submission_yaml] assign[=] call[name[io].BytesIO, parameter[call[call[name[inginious].common.custom_yaml.dump, parameter[name[submission]]].encode, parameter[constant[utf-8]]]]] for taget[name[username]] in starred[call[name[submission]][constant[username]]] begin[:] variable[base_path] assign[=] constant[/] for taget[name[sub_folder]] in starred[name[sub_folders]] begin[:] if compare[name[sub_folder] equal[==] constant[taskid]] begin[:] variable[base_path] assign[=] binary_operation[call[name[submission]][constant[taskid]] + name[base_path]] variable[base_path] assign[=] binary_operation[constant[/] + name[base_path]] variable[base_path] assign[=] call[name[base_path]][<ast.Slice object at 0x7da1b17374c0>] variable[submission_yaml_fname] assign[=] binary_operation[binary_operation[name[base_path] + call[name[str], parameter[call[name[submission]][constant[_id]]]]] + constant[/submission.test]] if compare[name[submission_yaml_fname] <ast.NotIn object at 0x7da2590d7190> call[name[tar].getnames, parameter[]]] begin[:] variable[info] assign[=] call[name[tarfile].TarInfo, parameter[]] name[info].size assign[=] call[name[submission_yaml].getbuffer, parameter[]].nbytes name[info].mtime assign[=] call[name[time].mktime, parameter[call[call[name[submission]][constant[submitted_on]].timetuple, parameter[]]]] call[name[tar].addfile, parameter[name[info]]] if <ast.BoolOp object at 0x7da1b1736770> begin[:] variable[subfile] assign[=] call[name[self]._gridfs.get, parameter[call[name[submission]][constant[archive]]]] variable[subtar] assign[=] call[name[tarfile].open, parameter[]] for taget[name[member]] in starred[call[name[subtar].getmembers, parameter[]]] begin[:] variable[subtarfile] assign[=] call[name[subtar].extractfile, parameter[name[member]]] name[member].name assign[=] binary_operation[binary_operation[binary_operation[name[base_path] + call[name[str], parameter[call[name[submission]][constant[_id]]]]] + constant[/archive/]] + name[member].name] call[name[tar].addfile, parameter[name[member], name[subtarfile]]] call[name[subtar].close, parameter[]] call[name[subfile].close, parameter[]] if compare[call[name[submission]][constant[input]] is_not constant[None]] begin[:] for taget[tuple[[<ast.Name object at 0x7da2054a78e0>, <ast.Name object at 0x7da2054a5ed0>]]] in starred[call[call[name[submission]][constant[input]].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[problem], name[dict]]] begin[:] variable[DOUBLE_EXTENSIONS] assign[=] list[[<ast.Constant object at 0x7da2054a4070>, <ast.Constant object at 0x7da2054a6140>, <ast.Constant object at 0x7da2054a64d0>, <ast.Constant object at 0x7da2054a4f70>]] variable[ext] assign[=] constant[] if <ast.UnaryOp object at 0x7da2054a5a80> begin[:] <ast.Tuple object at 0x7da2054a5630> assign[=] call[name[os].path.splitext, parameter[call[name[problem]][constant[filename]]]] variable[subfile] assign[=] call[name[io].BytesIO, parameter[call[name[problem]][constant[value]]]] variable[taskfname] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[base_path] + call[name[str], parameter[call[name[submission]][constant[_id]]]]] + constant[/uploaded_files/]] + name[pid]] + name[ext]] variable[info] assign[=] call[name[tarfile].TarInfo, parameter[]] name[info].size assign[=] call[name[subfile].getbuffer, parameter[]].nbytes name[info].mtime assign[=] call[name[time].mktime, parameter[call[call[name[submission]][constant[submitted_on]].timetuple, parameter[]]]] call[name[tar].addfile, parameter[name[info]]] call[name[tar].close, parameter[]] call[name[tmpfile].seek, parameter[constant[0]]] return[name[tmpfile]]
keyword[def] identifier[get_submission_archive] ( identifier[self] , identifier[submissions] , identifier[sub_folders] , identifier[aggregations] , identifier[archive_file] = keyword[None] ): literal[string] identifier[tmpfile] = identifier[archive_file] keyword[if] identifier[archive_file] keyword[is] keyword[not] keyword[None] keyword[else] identifier[tempfile] . identifier[TemporaryFile] () identifier[tar] = identifier[tarfile] . identifier[open] ( identifier[fileobj] = identifier[tmpfile] , identifier[mode] = literal[string] ) keyword[for] identifier[submission] keyword[in] identifier[submissions] : identifier[submission] = identifier[self] . identifier[get_input_from_submission] ( identifier[submission] ) identifier[submission_yaml] = identifier[io] . identifier[BytesIO] ( identifier[inginious] . identifier[common] . identifier[custom_yaml] . identifier[dump] ( identifier[submission] ). identifier[encode] ( literal[string] )) keyword[for] identifier[username] keyword[in] identifier[submission] [ literal[string] ]: identifier[base_path] = literal[string] keyword[for] identifier[sub_folder] keyword[in] identifier[sub_folders] : keyword[if] identifier[sub_folder] == literal[string] : identifier[base_path] = identifier[submission] [ literal[string] ]+ identifier[base_path] keyword[elif] identifier[sub_folder] == literal[string] : identifier[base_path] = literal[string] + literal[string] . identifier[join] ( identifier[submission] [ literal[string] ])+ identifier[base_path] identifier[base_path] = identifier[base_path] [ literal[int] :] keyword[elif] identifier[sub_folder] == literal[string] : keyword[if] identifier[username] keyword[in] identifier[aggregations] : keyword[if] identifier[aggregations] [ identifier[username] ] keyword[is] keyword[None] : identifier[base_path] = literal[string] + literal[string] . identifier[join] ( identifier[submission] [ literal[string] ])+ identifier[base_path] identifier[base_path] = identifier[base_path] [ literal[int] :] keyword[else] : identifier[base_path] =( identifier[aggregations] [ identifier[username] ][ literal[string] ]+ literal[string] + identifier[str] ( identifier[aggregations] [ identifier[username] ][ literal[string] ])+ literal[string] ). identifier[replace] ( literal[string] , literal[string] )+ identifier[base_path] identifier[base_path] = literal[string] + identifier[base_path] identifier[base_path] = identifier[base_path] [ literal[int] :] identifier[submission_yaml_fname] = identifier[base_path] + identifier[str] ( identifier[submission] [ literal[string] ])+ literal[string] keyword[if] identifier[submission_yaml_fname] keyword[not] keyword[in] identifier[tar] . identifier[getnames] (): identifier[info] = identifier[tarfile] . identifier[TarInfo] ( identifier[name] = identifier[submission_yaml_fname] ) identifier[info] . identifier[size] = identifier[submission_yaml] . identifier[getbuffer] (). identifier[nbytes] identifier[info] . identifier[mtime] = identifier[time] . identifier[mktime] ( identifier[submission] [ literal[string] ]. identifier[timetuple] ()) identifier[tar] . identifier[addfile] ( identifier[info] , identifier[fileobj] = identifier[submission_yaml] ) keyword[if] literal[string] keyword[in] identifier[submission] keyword[and] identifier[submission] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[submission] [ literal[string] ]!= literal[string] : identifier[subfile] = identifier[self] . identifier[_gridfs] . identifier[get] ( identifier[submission] [ literal[string] ]) identifier[subtar] = identifier[tarfile] . identifier[open] ( identifier[fileobj] = identifier[subfile] , identifier[mode] = literal[string] ) keyword[for] identifier[member] keyword[in] identifier[subtar] . identifier[getmembers] (): identifier[subtarfile] = identifier[subtar] . identifier[extractfile] ( identifier[member] ) identifier[member] . identifier[name] = identifier[base_path] + identifier[str] ( identifier[submission] [ literal[string] ])+ literal[string] + identifier[member] . identifier[name] identifier[tar] . identifier[addfile] ( identifier[member] , identifier[subtarfile] ) identifier[subtar] . identifier[close] () identifier[subfile] . identifier[close] () keyword[if] identifier[submission] [ literal[string] ] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[pid] , identifier[problem] keyword[in] identifier[submission] [ literal[string] ]. identifier[items] (): keyword[if] identifier[isinstance] ( identifier[problem] , identifier[dict] ): identifier[DOUBLE_EXTENSIONS] =[ literal[string] , literal[string] , literal[string] , literal[string] ] identifier[ext] = literal[string] keyword[if] keyword[not] identifier[problem] [ literal[string] ]. identifier[endswith] ( identifier[tuple] ( identifier[DOUBLE_EXTENSIONS] )): identifier[_] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[problem] [ literal[string] ]) keyword[else] : keyword[for] identifier[t_ext] keyword[in] identifier[DOUBLE_EXTENSIONS] : keyword[if] identifier[problem] [ literal[string] ]. identifier[endswith] ( identifier[t_ext] ): identifier[ext] = identifier[t_ext] identifier[subfile] = identifier[io] . identifier[BytesIO] ( identifier[problem] [ literal[string] ]) identifier[taskfname] = identifier[base_path] + identifier[str] ( identifier[submission] [ literal[string] ])+ literal[string] + identifier[pid] + identifier[ext] identifier[info] = identifier[tarfile] . identifier[TarInfo] ( identifier[name] = identifier[taskfname] ) identifier[info] . identifier[size] = identifier[subfile] . identifier[getbuffer] (). identifier[nbytes] identifier[info] . identifier[mtime] = identifier[time] . identifier[mktime] ( identifier[submission] [ literal[string] ]. identifier[timetuple] ()) identifier[tar] . identifier[addfile] ( identifier[info] , identifier[fileobj] = identifier[subfile] ) identifier[tar] . identifier[close] () identifier[tmpfile] . identifier[seek] ( literal[int] ) keyword[return] identifier[tmpfile]
def get_submission_archive(self, submissions, sub_folders, aggregations, archive_file=None): """ :param submissions: a list of submissions :param sub_folders: possible values: []: put all submissions in / ['taskid']: put all submissions for each task in a different directory /taskid/ ['username']: put all submissions for each user in a different directory /username/ ['taskid','username']: /taskid/username/ ['username','taskid']: /username/taskid/ :return: a file-like object containing a tgz archive of all the submissions """ tmpfile = archive_file if archive_file is not None else tempfile.TemporaryFile() tar = tarfile.open(fileobj=tmpfile, mode='w:gz') for submission in submissions: submission = self.get_input_from_submission(submission) submission_yaml = io.BytesIO(inginious.common.custom_yaml.dump(submission).encode('utf-8')) # Considering multiple single submissions for each user for username in submission['username']: # Compute base path in the tar file base_path = '/' for sub_folder in sub_folders: if sub_folder == 'taskid': base_path = submission['taskid'] + base_path # depends on [control=['if'], data=[]] elif sub_folder == 'username': base_path = '_' + '-'.join(submission['username']) + base_path base_path = base_path[1:] # depends on [control=['if'], data=[]] elif sub_folder == 'aggregation': if username in aggregations: if aggregations[username] is None: # If classrooms are not used, and user is not grouped, his classroom is replaced by None base_path = '_' + '-'.join(submission['username']) + base_path base_path = base_path[1:] # depends on [control=['if'], data=[]] else: base_path = (aggregations[username]['description'] + ' (' + str(aggregations[username]['_id']) + ')').replace(' ', '_') + base_path # depends on [control=['if'], data=['username', 'aggregations']] # depends on [control=['if'], data=[]] base_path = '/' + base_path # depends on [control=['for'], data=['sub_folder']] base_path = base_path[1:] submission_yaml_fname = base_path + str(submission['_id']) + '/submission.test' # Avoid putting two times the same submission on the same place if submission_yaml_fname not in tar.getnames(): info = tarfile.TarInfo(name=submission_yaml_fname) info.size = submission_yaml.getbuffer().nbytes info.mtime = time.mktime(submission['submitted_on'].timetuple()) # Add file in tar archive tar.addfile(info, fileobj=submission_yaml) # If there is an archive, add it too if 'archive' in submission and submission['archive'] is not None and (submission['archive'] != ''): subfile = self._gridfs.get(submission['archive']) subtar = tarfile.open(fileobj=subfile, mode='r:gz') for member in subtar.getmembers(): subtarfile = subtar.extractfile(member) member.name = base_path + str(submission['_id']) + '/archive/' + member.name tar.addfile(member, subtarfile) # depends on [control=['for'], data=['member']] subtar.close() subfile.close() # depends on [control=['if'], data=[]] # If there files that were uploaded by the student, add them if submission['input'] is not None: for (pid, problem) in submission['input'].items(): # If problem is a dict, it is a file (from the specification of the problems) if isinstance(problem, dict): # Get the extension (match extensions with more than one dot too) DOUBLE_EXTENSIONS = ['.tar.gz', '.tar.bz2', '.tar.bz', '.tar.xz'] ext = '' if not problem['filename'].endswith(tuple(DOUBLE_EXTENSIONS)): (_, ext) = os.path.splitext(problem['filename']) # depends on [control=['if'], data=[]] else: for t_ext in DOUBLE_EXTENSIONS: if problem['filename'].endswith(t_ext): ext = t_ext # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t_ext']] subfile = io.BytesIO(problem['value']) taskfname = base_path + str(submission['_id']) + '/uploaded_files/' + pid + ext # Generate file info info = tarfile.TarInfo(name=taskfname) info.size = subfile.getbuffer().nbytes info.mtime = time.mktime(submission['submitted_on'].timetuple()) # Add file in tar archive tar.addfile(info, fileobj=subfile) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['submission_yaml_fname']] # depends on [control=['for'], data=['username']] # depends on [control=['for'], data=['submission']] # Close tarfile and put tempfile cursor at 0 tar.close() tmpfile.seek(0) return tmpfile
def create(self, issue_id=None, repo_slug=None, **kwargs): """ Add an issue comment to one of your repositories. Each issue comment require only the content data field the system autopopulate the rest. """ issue_id = issue_id or self.issue_id repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('CREATE_COMMENT', username=self.bitbucket.username, repo_slug=repo_slug, issue_id=issue_id) return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
def function[create, parameter[self, issue_id, repo_slug]]: constant[ Add an issue comment to one of your repositories. Each issue comment require only the content data field the system autopopulate the rest. ] variable[issue_id] assign[=] <ast.BoolOp object at 0x7da1b1a44af0> variable[repo_slug] assign[=] <ast.BoolOp object at 0x7da1b1a47250> variable[url] assign[=] call[name[self].bitbucket.url, parameter[constant[CREATE_COMMENT]]] return[call[name[self].bitbucket.dispatch, parameter[constant[POST], name[url]]]]
keyword[def] identifier[create] ( identifier[self] , identifier[issue_id] = keyword[None] , identifier[repo_slug] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[issue_id] = identifier[issue_id] keyword[or] identifier[self] . identifier[issue_id] identifier[repo_slug] = identifier[repo_slug] keyword[or] identifier[self] . identifier[bitbucket] . identifier[repo_slug] keyword[or] literal[string] identifier[url] = identifier[self] . identifier[bitbucket] . identifier[url] ( literal[string] , identifier[username] = identifier[self] . identifier[bitbucket] . identifier[username] , identifier[repo_slug] = identifier[repo_slug] , identifier[issue_id] = identifier[issue_id] ) keyword[return] identifier[self] . identifier[bitbucket] . identifier[dispatch] ( literal[string] , identifier[url] , identifier[auth] = identifier[self] . identifier[bitbucket] . identifier[auth] ,** identifier[kwargs] )
def create(self, issue_id=None, repo_slug=None, **kwargs): """ Add an issue comment to one of your repositories. Each issue comment require only the content data field the system autopopulate the rest. """ issue_id = issue_id or self.issue_id repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('CREATE_COMMENT', username=self.bitbucket.username, repo_slug=repo_slug, issue_id=issue_id) return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
def _str_desc(self, reader): """String containing information about the current GO DAG.""" data_version = reader.data_version if data_version is not None: data_version = data_version.replace("releases/", "") desc = "{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms".format( OBO=reader.obo_file, FMT=reader.format_version, REL=data_version, N=len(self)) if reader.optobj: desc = "{D}; optional_attrs({A})".format(D=desc, A=" ".join(sorted(reader.optobj.optional_attrs))) return desc
def function[_str_desc, parameter[self, reader]]: constant[String containing information about the current GO DAG.] variable[data_version] assign[=] name[reader].data_version if compare[name[data_version] is_not constant[None]] begin[:] variable[data_version] assign[=] call[name[data_version].replace, parameter[constant[releases/], constant[]]] variable[desc] assign[=] call[constant[{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms].format, parameter[]] if name[reader].optobj begin[:] variable[desc] assign[=] call[constant[{D}; optional_attrs({A})].format, parameter[]] return[name[desc]]
keyword[def] identifier[_str_desc] ( identifier[self] , identifier[reader] ): literal[string] identifier[data_version] = identifier[reader] . identifier[data_version] keyword[if] identifier[data_version] keyword[is] keyword[not] keyword[None] : identifier[data_version] = identifier[data_version] . identifier[replace] ( literal[string] , literal[string] ) identifier[desc] = literal[string] . identifier[format] ( identifier[OBO] = identifier[reader] . identifier[obo_file] , identifier[FMT] = identifier[reader] . identifier[format_version] , identifier[REL] = identifier[data_version] , identifier[N] = identifier[len] ( identifier[self] )) keyword[if] identifier[reader] . identifier[optobj] : identifier[desc] = literal[string] . identifier[format] ( identifier[D] = identifier[desc] , identifier[A] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[reader] . identifier[optobj] . identifier[optional_attrs] ))) keyword[return] identifier[desc]
def _str_desc(self, reader): """String containing information about the current GO DAG.""" data_version = reader.data_version if data_version is not None: data_version = data_version.replace('releases/', '') # depends on [control=['if'], data=['data_version']] desc = '{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms'.format(OBO=reader.obo_file, FMT=reader.format_version, REL=data_version, N=len(self)) if reader.optobj: desc = '{D}; optional_attrs({A})'.format(D=desc, A=' '.join(sorted(reader.optobj.optional_attrs))) # depends on [control=['if'], data=[]] return desc
def FilterFnTable(fn_table, symbol): """Remove a specific symbol from a fn_table.""" new_table = list() for entry in fn_table: # symbol[0] is a str with the symbol name if entry[0] != symbol: new_table.append(entry) return new_table
def function[FilterFnTable, parameter[fn_table, symbol]]: constant[Remove a specific symbol from a fn_table.] variable[new_table] assign[=] call[name[list], parameter[]] for taget[name[entry]] in starred[name[fn_table]] begin[:] if compare[call[name[entry]][constant[0]] not_equal[!=] name[symbol]] begin[:] call[name[new_table].append, parameter[name[entry]]] return[name[new_table]]
keyword[def] identifier[FilterFnTable] ( identifier[fn_table] , identifier[symbol] ): literal[string] identifier[new_table] = identifier[list] () keyword[for] identifier[entry] keyword[in] identifier[fn_table] : keyword[if] identifier[entry] [ literal[int] ]!= identifier[symbol] : identifier[new_table] . identifier[append] ( identifier[entry] ) keyword[return] identifier[new_table]
def FilterFnTable(fn_table, symbol): """Remove a specific symbol from a fn_table.""" new_table = list() for entry in fn_table: # symbol[0] is a str with the symbol name if entry[0] != symbol: new_table.append(entry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] return new_table
def hmget(self, key, *fields): """ Returns the values associated with the specified `fields` in a hash. For every ``field`` that does not exist in the hash, :data:`None` is returned. Because a non-existing keys are treated as empty hashes, calling :meth:`hmget` against a non-existing key will return a list of :data:`None` values. .. note:: *Time complexity*: ``O(N)`` where ``N`` is the number of fields being requested. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: a :class:`dict` of field name to value mappings for each of the requested fields :rtype: dict """ def format_response(val_array): return dict(zip(fields, val_array)) command = [b'HMGET', key] command.extend(fields) return self._execute(command, format_callback=format_response)
def function[hmget, parameter[self, key]]: constant[ Returns the values associated with the specified `fields` in a hash. For every ``field`` that does not exist in the hash, :data:`None` is returned. Because a non-existing keys are treated as empty hashes, calling :meth:`hmget` against a non-existing key will return a list of :data:`None` values. .. note:: *Time complexity*: ``O(N)`` where ``N`` is the number of fields being requested. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: a :class:`dict` of field name to value mappings for each of the requested fields :rtype: dict ] def function[format_response, parameter[val_array]]: return[call[name[dict], parameter[call[name[zip], parameter[name[fields], name[val_array]]]]]] variable[command] assign[=] list[[<ast.Constant object at 0x7da18dc059c0>, <ast.Name object at 0x7da18dc04760>]] call[name[command].extend, parameter[name[fields]]] return[call[name[self]._execute, parameter[name[command]]]]
keyword[def] identifier[hmget] ( identifier[self] , identifier[key] ,* identifier[fields] ): literal[string] keyword[def] identifier[format_response] ( identifier[val_array] ): keyword[return] identifier[dict] ( identifier[zip] ( identifier[fields] , identifier[val_array] )) identifier[command] =[ literal[string] , identifier[key] ] identifier[command] . identifier[extend] ( identifier[fields] ) keyword[return] identifier[self] . identifier[_execute] ( identifier[command] , identifier[format_callback] = identifier[format_response] )
def hmget(self, key, *fields): """ Returns the values associated with the specified `fields` in a hash. For every ``field`` that does not exist in the hash, :data:`None` is returned. Because a non-existing keys are treated as empty hashes, calling :meth:`hmget` against a non-existing key will return a list of :data:`None` values. .. note:: *Time complexity*: ``O(N)`` where ``N`` is the number of fields being requested. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: a :class:`dict` of field name to value mappings for each of the requested fields :rtype: dict """ def format_response(val_array): return dict(zip(fields, val_array)) command = [b'HMGET', key] command.extend(fields) return self._execute(command, format_callback=format_response)
def transition_to_rollback(self): """Transition to rollback""" assert self.state in [AQStateMachineStates.execute, AQStateMachineStates.execute_complete] self.state = AQStateMachineStates.rollback
def function[transition_to_rollback, parameter[self]]: constant[Transition to rollback] assert[compare[name[self].state in list[[<ast.Attribute object at 0x7da1b1473f10>, <ast.Attribute object at 0x7da1b1470eb0>]]]] name[self].state assign[=] name[AQStateMachineStates].rollback
keyword[def] identifier[transition_to_rollback] ( identifier[self] ): literal[string] keyword[assert] identifier[self] . identifier[state] keyword[in] [ identifier[AQStateMachineStates] . identifier[execute] , identifier[AQStateMachineStates] . identifier[execute_complete] ] identifier[self] . identifier[state] = identifier[AQStateMachineStates] . identifier[rollback]
def transition_to_rollback(self): """Transition to rollback""" assert self.state in [AQStateMachineStates.execute, AQStateMachineStates.execute_complete] self.state = AQStateMachineStates.rollback
def _engineServicesRunning(): """ Return true if the engine services are running """ process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE) stdout = process.communicate()[0] result = process.returncode if result != 0: raise RuntimeError("Unable to check for running client job manager") # See if the CJM is running running = False for line in stdout.split("\n"): if "python" in line and "clientjobmanager.client_job_manager" in line: running = True break return running
def function[_engineServicesRunning, parameter[]]: constant[ Return true if the engine services are running ] variable[process] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Constant object at 0x7da18f09fa90>, <ast.Constant object at 0x7da18f09e4d0>]]]] variable[stdout] assign[=] call[call[name[process].communicate, parameter[]]][constant[0]] variable[result] assign[=] name[process].returncode if compare[name[result] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da18f09e620> variable[running] assign[=] constant[False] for taget[name[line]] in starred[call[name[stdout].split, parameter[constant[ ]]]] begin[:] if <ast.BoolOp object at 0x7da18f09edd0> begin[:] variable[running] assign[=] constant[True] break return[name[running]]
keyword[def] identifier[_engineServicesRunning] (): literal[string] identifier[process] = identifier[subprocess] . identifier[Popen] ([ literal[string] , literal[string] ], identifier[stdout] = identifier[subprocess] . identifier[PIPE] ) identifier[stdout] = identifier[process] . identifier[communicate] ()[ literal[int] ] identifier[result] = identifier[process] . identifier[returncode] keyword[if] identifier[result] != literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[running] = keyword[False] keyword[for] identifier[line] keyword[in] identifier[stdout] . identifier[split] ( literal[string] ): keyword[if] literal[string] keyword[in] identifier[line] keyword[and] literal[string] keyword[in] identifier[line] : identifier[running] = keyword[True] keyword[break] keyword[return] identifier[running]
def _engineServicesRunning(): """ Return true if the engine services are running """ process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) stdout = process.communicate()[0] result = process.returncode if result != 0: raise RuntimeError('Unable to check for running client job manager') # depends on [control=['if'], data=[]] # See if the CJM is running running = False for line in stdout.split('\n'): if 'python' in line and 'clientjobmanager.client_job_manager' in line: running = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] return running
def update_actualremoterelieve_v1(self): """Constrain the actual relieve discharge to a remote location. Required control parameter: |HighestRemoteDischarge| Required derived parameter: |HighestRemoteSmoothPar| Updated flux sequence: |ActualRemoteRelieve| Basic equation - discontinous: :math:`ActualRemoteRelieve = min(ActualRemoteRelease, HighestRemoteDischarge)` Basic equation - continous: :math:`ActualRemoteRelieve = smooth_min1(ActualRemoteRelieve, HighestRemoteDischarge, HighestRemoteSmoothPar)` Used auxiliary methods: |smooth_min1| |smooth_max1| Note that the given continous basic equation is a simplification of the complete algorithm to update |ActualRemoteRelieve|, which also makes use of |smooth_max1| to prevent from gaining negative values in a smooth manner. Examples: Prepare a dam model: >>> from hydpy.models.dam import * >>> parameterstep() Prepare a test function object that performs eight examples with |ActualRemoteRelieve| ranging from 0 to 8 m³/s and a fixed initial value of parameter |HighestRemoteDischarge| of 4 m³/s: >>> highestremotedischarge(4.0) >>> from hydpy import UnitTest >>> test = UnitTest(model, model.update_actualremoterelieve_v1, ... last_example=8, ... parseqs=(fluxes.actualremoterelieve,)) >>> test.nexts.actualremoterelieve = range(8) Through setting the value of |HighestRemoteTolerance| to the lowest possible value, there is no smoothing. Instead, the shown relationship agrees with a combination of the discontinuous minimum and maximum function: >>> highestremotetolerance(0.0) >>> derived.highestremotesmoothpar.update() >>> test() | ex. | actualremoterelieve | ----------------------------- | 1 | 0.0 | | 2 | 1.0 | | 3 | 2.0 | | 4 | 3.0 | | 5 | 4.0 | | 6 | 4.0 | | 7 | 4.0 | | 8 | 4.0 | Setting a sensible |HighestRemoteTolerance| value results in a moderate smoothing: >>> highestremotetolerance(0.1) >>> derived.highestremotesmoothpar.update() >>> test() | ex. | actualremoterelieve | ----------------------------- | 1 | 0.0 | | 2 | 0.999999 | | 3 | 1.99995 | | 4 | 2.996577 | | 5 | 3.836069 | | 6 | 3.991578 | | 7 | 3.993418 | | 8 | 3.993442 | Method |update_actualremoterelieve_v1| is defined in a similar way as method |calc_actualremoterelieve_v1|. Please read the documentation on |calc_actualremoterelieve_v1| for further information. """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess d_smooth = der.highestremotesmoothpar d_highest = con.highestremotedischarge d_value = smoothutils.smooth_min1( flu.actualremoterelieve, d_highest, d_smooth) for dummy in range(5): d_smooth /= 5. d_value = smoothutils.smooth_max1( d_value, 0., d_smooth) d_smooth /= 5. d_value = smoothutils.smooth_min1( d_value, d_highest, d_smooth) d_value = min(d_value, flu.actualremoterelieve) d_value = min(d_value, d_highest) flu.actualremoterelieve = max(d_value, 0.)
def function[update_actualremoterelieve_v1, parameter[self]]: constant[Constrain the actual relieve discharge to a remote location. Required control parameter: |HighestRemoteDischarge| Required derived parameter: |HighestRemoteSmoothPar| Updated flux sequence: |ActualRemoteRelieve| Basic equation - discontinous: :math:`ActualRemoteRelieve = min(ActualRemoteRelease, HighestRemoteDischarge)` Basic equation - continous: :math:`ActualRemoteRelieve = smooth_min1(ActualRemoteRelieve, HighestRemoteDischarge, HighestRemoteSmoothPar)` Used auxiliary methods: |smooth_min1| |smooth_max1| Note that the given continous basic equation is a simplification of the complete algorithm to update |ActualRemoteRelieve|, which also makes use of |smooth_max1| to prevent from gaining negative values in a smooth manner. Examples: Prepare a dam model: >>> from hydpy.models.dam import * >>> parameterstep() Prepare a test function object that performs eight examples with |ActualRemoteRelieve| ranging from 0 to 8 m³/s and a fixed initial value of parameter |HighestRemoteDischarge| of 4 m³/s: >>> highestremotedischarge(4.0) >>> from hydpy import UnitTest >>> test = UnitTest(model, model.update_actualremoterelieve_v1, ... last_example=8, ... parseqs=(fluxes.actualremoterelieve,)) >>> test.nexts.actualremoterelieve = range(8) Through setting the value of |HighestRemoteTolerance| to the lowest possible value, there is no smoothing. Instead, the shown relationship agrees with a combination of the discontinuous minimum and maximum function: >>> highestremotetolerance(0.0) >>> derived.highestremotesmoothpar.update() >>> test() | ex. | actualremoterelieve | ----------------------------- | 1 | 0.0 | | 2 | 1.0 | | 3 | 2.0 | | 4 | 3.0 | | 5 | 4.0 | | 6 | 4.0 | | 7 | 4.0 | | 8 | 4.0 | Setting a sensible |HighestRemoteTolerance| value results in a moderate smoothing: >>> highestremotetolerance(0.1) >>> derived.highestremotesmoothpar.update() >>> test() | ex. | actualremoterelieve | ----------------------------- | 1 | 0.0 | | 2 | 0.999999 | | 3 | 1.99995 | | 4 | 2.996577 | | 5 | 3.836069 | | 6 | 3.991578 | | 7 | 3.993418 | | 8 | 3.993442 | Method |update_actualremoterelieve_v1| is defined in a similar way as method |calc_actualremoterelieve_v1|. Please read the documentation on |calc_actualremoterelieve_v1| for further information. ] variable[con] assign[=] name[self].parameters.control.fastaccess variable[der] assign[=] name[self].parameters.derived.fastaccess variable[flu] assign[=] name[self].sequences.fluxes.fastaccess variable[d_smooth] assign[=] name[der].highestremotesmoothpar variable[d_highest] assign[=] name[con].highestremotedischarge variable[d_value] assign[=] call[name[smoothutils].smooth_min1, parameter[name[flu].actualremoterelieve, name[d_highest], name[d_smooth]]] for taget[name[dummy]] in starred[call[name[range], parameter[constant[5]]]] begin[:] <ast.AugAssign object at 0x7da1b0feba30> variable[d_value] assign[=] call[name[smoothutils].smooth_max1, parameter[name[d_value], constant[0.0], name[d_smooth]]] <ast.AugAssign object at 0x7da1b0febca0> variable[d_value] assign[=] call[name[smoothutils].smooth_min1, parameter[name[d_value], name[d_highest], name[d_smooth]]] variable[d_value] assign[=] call[name[min], parameter[name[d_value], name[flu].actualremoterelieve]] variable[d_value] assign[=] call[name[min], parameter[name[d_value], name[d_highest]]] name[flu].actualremoterelieve assign[=] call[name[max], parameter[name[d_value], constant[0.0]]]
keyword[def] identifier[update_actualremoterelieve_v1] ( identifier[self] ): literal[string] identifier[con] = identifier[self] . identifier[parameters] . identifier[control] . identifier[fastaccess] identifier[der] = identifier[self] . identifier[parameters] . identifier[derived] . identifier[fastaccess] identifier[flu] = identifier[self] . identifier[sequences] . identifier[fluxes] . identifier[fastaccess] identifier[d_smooth] = identifier[der] . identifier[highestremotesmoothpar] identifier[d_highest] = identifier[con] . identifier[highestremotedischarge] identifier[d_value] = identifier[smoothutils] . identifier[smooth_min1] ( identifier[flu] . identifier[actualremoterelieve] , identifier[d_highest] , identifier[d_smooth] ) keyword[for] identifier[dummy] keyword[in] identifier[range] ( literal[int] ): identifier[d_smooth] /= literal[int] identifier[d_value] = identifier[smoothutils] . identifier[smooth_max1] ( identifier[d_value] , literal[int] , identifier[d_smooth] ) identifier[d_smooth] /= literal[int] identifier[d_value] = identifier[smoothutils] . identifier[smooth_min1] ( identifier[d_value] , identifier[d_highest] , identifier[d_smooth] ) identifier[d_value] = identifier[min] ( identifier[d_value] , identifier[flu] . identifier[actualremoterelieve] ) identifier[d_value] = identifier[min] ( identifier[d_value] , identifier[d_highest] ) identifier[flu] . identifier[actualremoterelieve] = identifier[max] ( identifier[d_value] , literal[int] )
def update_actualremoterelieve_v1(self): """Constrain the actual relieve discharge to a remote location. Required control parameter: |HighestRemoteDischarge| Required derived parameter: |HighestRemoteSmoothPar| Updated flux sequence: |ActualRemoteRelieve| Basic equation - discontinous: :math:`ActualRemoteRelieve = min(ActualRemoteRelease, HighestRemoteDischarge)` Basic equation - continous: :math:`ActualRemoteRelieve = smooth_min1(ActualRemoteRelieve, HighestRemoteDischarge, HighestRemoteSmoothPar)` Used auxiliary methods: |smooth_min1| |smooth_max1| Note that the given continous basic equation is a simplification of the complete algorithm to update |ActualRemoteRelieve|, which also makes use of |smooth_max1| to prevent from gaining negative values in a smooth manner. Examples: Prepare a dam model: >>> from hydpy.models.dam import * >>> parameterstep() Prepare a test function object that performs eight examples with |ActualRemoteRelieve| ranging from 0 to 8 m³/s and a fixed initial value of parameter |HighestRemoteDischarge| of 4 m³/s: >>> highestremotedischarge(4.0) >>> from hydpy import UnitTest >>> test = UnitTest(model, model.update_actualremoterelieve_v1, ... last_example=8, ... parseqs=(fluxes.actualremoterelieve,)) >>> test.nexts.actualremoterelieve = range(8) Through setting the value of |HighestRemoteTolerance| to the lowest possible value, there is no smoothing. Instead, the shown relationship agrees with a combination of the discontinuous minimum and maximum function: >>> highestremotetolerance(0.0) >>> derived.highestremotesmoothpar.update() >>> test() | ex. | actualremoterelieve | ----------------------------- | 1 | 0.0 | | 2 | 1.0 | | 3 | 2.0 | | 4 | 3.0 | | 5 | 4.0 | | 6 | 4.0 | | 7 | 4.0 | | 8 | 4.0 | Setting a sensible |HighestRemoteTolerance| value results in a moderate smoothing: >>> highestremotetolerance(0.1) >>> derived.highestremotesmoothpar.update() >>> test() | ex. | actualremoterelieve | ----------------------------- | 1 | 0.0 | | 2 | 0.999999 | | 3 | 1.99995 | | 4 | 2.996577 | | 5 | 3.836069 | | 6 | 3.991578 | | 7 | 3.993418 | | 8 | 3.993442 | Method |update_actualremoterelieve_v1| is defined in a similar way as method |calc_actualremoterelieve_v1|. Please read the documentation on |calc_actualremoterelieve_v1| for further information. """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess d_smooth = der.highestremotesmoothpar d_highest = con.highestremotedischarge d_value = smoothutils.smooth_min1(flu.actualremoterelieve, d_highest, d_smooth) for dummy in range(5): d_smooth /= 5.0 d_value = smoothutils.smooth_max1(d_value, 0.0, d_smooth) d_smooth /= 5.0 d_value = smoothutils.smooth_min1(d_value, d_highest, d_smooth) # depends on [control=['for'], data=[]] d_value = min(d_value, flu.actualremoterelieve) d_value = min(d_value, d_highest) flu.actualremoterelieve = max(d_value, 0.0)
def plot_amino_diagrams(self): """ Plotting of amino diagrams - circles with residue name and id, colored according to the residue type. If the protein has more than one chain, chain identity is also included in the plot. The plot is saved as svg file with residue id and chain id as filename for more certain identification. """ for res in self.topology_data.dict_of_plotted_res: try: color = [self.colors_amino_acids[self.amino_acids[res[0]]],'white'] except KeyError: color = ["pink",'white'] plt.figure(figsize=(2.5,2.5)) ring1,_=plt.pie([1], radius=1, startangle=90, colors=color, counterclock=False) plt.axis('equal') plt.setp(ring1, width=1, edgecolor=color[0]) if len(self.topology_data.universe.protein.segments)<=1: #Parameters for amino diagrams without segids plt.text(0,-0.45,res[0]+"\n"+res[1],ha='center',size=36, fontweight="bold") else: #Parameters for amino diagrams with segids plt.text(0,-0.37,res[0]+"\n"+res[1]+" "+res[2],ha='center',size=30, fontweight="bold") #play with the dpi pylab.savefig(str(res[1])+res[2]+".svg", dpi=300, transparent=True)
def function[plot_amino_diagrams, parameter[self]]: constant[ Plotting of amino diagrams - circles with residue name and id, colored according to the residue type. If the protein has more than one chain, chain identity is also included in the plot. The plot is saved as svg file with residue id and chain id as filename for more certain identification. ] for taget[name[res]] in starred[name[self].topology_data.dict_of_plotted_res] begin[:] <ast.Try object at 0x7da18f8136d0> call[name[plt].figure, parameter[]] <ast.Tuple object at 0x7da18f8133d0> assign[=] call[name[plt].pie, parameter[list[[<ast.Constant object at 0x7da18f811f30>]]]] call[name[plt].axis, parameter[constant[equal]]] call[name[plt].setp, parameter[name[ring1]]] if compare[call[name[len], parameter[name[self].topology_data.universe.protein.segments]] less_or_equal[<=] constant[1]] begin[:] call[name[plt].text, parameter[constant[0], <ast.UnaryOp object at 0x7da18f811720>, binary_operation[binary_operation[call[name[res]][constant[0]] + constant[ ]] + call[name[res]][constant[1]]]]] call[name[pylab].savefig, parameter[binary_operation[binary_operation[call[name[str], parameter[call[name[res]][constant[1]]]] + call[name[res]][constant[2]]] + constant[.svg]]]]
keyword[def] identifier[plot_amino_diagrams] ( identifier[self] ): literal[string] keyword[for] identifier[res] keyword[in] identifier[self] . identifier[topology_data] . identifier[dict_of_plotted_res] : keyword[try] : identifier[color] =[ identifier[self] . identifier[colors_amino_acids] [ identifier[self] . identifier[amino_acids] [ identifier[res] [ literal[int] ]]], literal[string] ] keyword[except] identifier[KeyError] : identifier[color] =[ literal[string] , literal[string] ] identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] )) identifier[ring1] , identifier[_] = identifier[plt] . identifier[pie] ([ literal[int] ], identifier[radius] = literal[int] , identifier[startangle] = literal[int] , identifier[colors] = identifier[color] , identifier[counterclock] = keyword[False] ) identifier[plt] . identifier[axis] ( literal[string] ) identifier[plt] . identifier[setp] ( identifier[ring1] , identifier[width] = literal[int] , identifier[edgecolor] = identifier[color] [ literal[int] ]) keyword[if] identifier[len] ( identifier[self] . identifier[topology_data] . identifier[universe] . identifier[protein] . identifier[segments] )<= literal[int] : identifier[plt] . identifier[text] ( literal[int] ,- literal[int] , identifier[res] [ literal[int] ]+ literal[string] + identifier[res] [ literal[int] ], identifier[ha] = literal[string] , identifier[size] = literal[int] , identifier[fontweight] = literal[string] ) keyword[else] : identifier[plt] . identifier[text] ( literal[int] ,- literal[int] , identifier[res] [ literal[int] ]+ literal[string] + identifier[res] [ literal[int] ]+ literal[string] + identifier[res] [ literal[int] ], identifier[ha] = literal[string] , identifier[size] = literal[int] , identifier[fontweight] = literal[string] ) identifier[pylab] . identifier[savefig] ( identifier[str] ( identifier[res] [ literal[int] ])+ identifier[res] [ literal[int] ]+ literal[string] , identifier[dpi] = literal[int] , identifier[transparent] = keyword[True] )
def plot_amino_diagrams(self): """ Plotting of amino diagrams - circles with residue name and id, colored according to the residue type. If the protein has more than one chain, chain identity is also included in the plot. The plot is saved as svg file with residue id and chain id as filename for more certain identification. """ for res in self.topology_data.dict_of_plotted_res: try: color = [self.colors_amino_acids[self.amino_acids[res[0]]], 'white'] # depends on [control=['try'], data=[]] except KeyError: color = ['pink', 'white'] # depends on [control=['except'], data=[]] plt.figure(figsize=(2.5, 2.5)) (ring1, _) = plt.pie([1], radius=1, startangle=90, colors=color, counterclock=False) plt.axis('equal') plt.setp(ring1, width=1, edgecolor=color[0]) if len(self.topology_data.universe.protein.segments) <= 1: #Parameters for amino diagrams without segids plt.text(0, -0.45, res[0] + '\n' + res[1], ha='center', size=36, fontweight='bold') # depends on [control=['if'], data=[]] else: #Parameters for amino diagrams with segids plt.text(0, -0.37, res[0] + '\n' + res[1] + ' ' + res[2], ha='center', size=30, fontweight='bold') #play with the dpi pylab.savefig(str(res[1]) + res[2] + '.svg', dpi=300, transparent=True) # depends on [control=['for'], data=['res']]
def project_layout(proposal, user=None, repo=None, log=None): """ generate the project template proposal is the name of the project, user is an object containing some information about the user. - full name, - github username - email """ proposal = proposal.lower() #context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json') #context = generate_context(context_file) # os.chdir('..') # context['cookiecutter']['full_name'] = user.name # context['cookiecutter']['email'] = user.email # context['cookiecutter']['github_username'] = user.login # context['cookiecutter']['project_name'] = proposal # context['cookiecutter']['repo_name'] = proposal.lower() try: os.mkdir(proposal) except FileExistsError: log.info('Skip directory structure, as project seem to already exists') with open('.gitignore', 'w') as f: f.write(''' *.pyc __pycache__ /build/ /dist/ ''') with open( '/'.join([proposal, '__init__.py']), 'w') as f: f.write(''' """ a simple package """ __version__ = '0.0.1' ''') travis_yml() #generate_files( # repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'), # context=context # ) log.info('Workig in %s', os.getcwd()) os.listdir('.') subprocess.call(['git','add','.'], ) subprocess.call(['git','commit',"-am'initial commit of %s'" % proposal]) subprocess.call(['git', "push", "origin", "master:master"])
def function[project_layout, parameter[proposal, user, repo, log]]: constant[ generate the project template proposal is the name of the project, user is an object containing some information about the user. - full name, - github username - email ] variable[proposal] assign[=] call[name[proposal].lower, parameter[]] <ast.Try object at 0x7da18bc724d0> with call[name[open], parameter[constant[.gitignore], constant[w]]] begin[:] call[name[f].write, parameter[constant[ *.pyc __pycache__ /build/ /dist/ ]]] with call[name[open], parameter[call[constant[/].join, parameter[list[[<ast.Name object at 0x7da20e9564a0>, <ast.Constant object at 0x7da20e954c70>]]]], constant[w]]] begin[:] call[name[f].write, parameter[constant[ """ a simple package """ __version__ = '0.0.1' ]]] call[name[travis_yml], parameter[]] call[name[log].info, parameter[constant[Workig in %s], call[name[os].getcwd, parameter[]]]] call[name[os].listdir, parameter[constant[.]]] call[name[subprocess].call, parameter[list[[<ast.Constant object at 0x7da20e9b0cd0>, <ast.Constant object at 0x7da20e9b0550>, <ast.Constant object at 0x7da20e9b2890>]]]] call[name[subprocess].call, parameter[list[[<ast.Constant object at 0x7da20e9b3700>, <ast.Constant object at 0x7da20e9b2e60>, <ast.BinOp object at 0x7da20e9b2a40>]]]] call[name[subprocess].call, parameter[list[[<ast.Constant object at 0x7da1b16be1d0>, <ast.Constant object at 0x7da1b16be980>, <ast.Constant object at 0x7da1b16bd660>, <ast.Constant object at 0x7da1b16bc940>]]]]
keyword[def] identifier[project_layout] ( identifier[proposal] , identifier[user] = keyword[None] , identifier[repo] = keyword[None] , identifier[log] = keyword[None] ): literal[string] identifier[proposal] = identifier[proposal] . identifier[lower] () keyword[try] : identifier[os] . identifier[mkdir] ( identifier[proposal] ) keyword[except] identifier[FileExistsError] : identifier[log] . identifier[info] ( literal[string] ) keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( literal[string] ) keyword[with] identifier[open] ( literal[string] . identifier[join] ([ identifier[proposal] , literal[string] ]), literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( literal[string] ) identifier[travis_yml] () identifier[log] . identifier[info] ( literal[string] , identifier[os] . identifier[getcwd] ()) identifier[os] . identifier[listdir] ( literal[string] ) identifier[subprocess] . identifier[call] ([ literal[string] , literal[string] , literal[string] ],) identifier[subprocess] . identifier[call] ([ literal[string] , literal[string] , literal[string] % identifier[proposal] ]) identifier[subprocess] . identifier[call] ([ literal[string] , literal[string] , literal[string] , literal[string] ])
def project_layout(proposal, user=None, repo=None, log=None): """ generate the project template proposal is the name of the project, user is an object containing some information about the user. - full name, - github username - email """ proposal = proposal.lower() #context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json') #context = generate_context(context_file) # os.chdir('..') # context['cookiecutter']['full_name'] = user.name # context['cookiecutter']['email'] = user.email # context['cookiecutter']['github_username'] = user.login # context['cookiecutter']['project_name'] = proposal # context['cookiecutter']['repo_name'] = proposal.lower() try: os.mkdir(proposal) # depends on [control=['try'], data=[]] except FileExistsError: log.info('Skip directory structure, as project seem to already exists') # depends on [control=['except'], data=[]] with open('.gitignore', 'w') as f: f.write('\n*.pyc\n__pycache__\n/build/\n/dist/\n') # depends on [control=['with'], data=['f']] with open('/'.join([proposal, '__init__.py']), 'w') as f: f.write('\n"""\na simple package\n"""\n\n\n__version__ = \'0.0.1\'\n\n ') # depends on [control=['with'], data=['f']] travis_yml() #generate_files( # repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'), # context=context # ) log.info('Workig in %s', os.getcwd()) os.listdir('.') subprocess.call(['git', 'add', '.']) subprocess.call(['git', 'commit', "-am'initial commit of %s'" % proposal]) subprocess.call(['git', 'push', 'origin', 'master:master'])
def makeAB(self): """ Munge A and B reads into single serial block with only unique fields.""" for fld in self.m_blk_a: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: self.m_req[fld] = self.m_blk_a[fld] for fld in self.m_blk_b: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: self.m_req[fld] = self.m_blk_b[fld] pass
def function[makeAB, parameter[self]]: constant[ Munge A and B reads into single serial block with only unique fields.] for taget[name[fld]] in starred[name[self].m_blk_a] begin[:] variable[compare_fld] assign[=] call[name[fld].upper, parameter[]] if <ast.BoolOp object at 0x7da18bc730a0> begin[:] call[name[self].m_req][name[fld]] assign[=] call[name[self].m_blk_a][name[fld]] for taget[name[fld]] in starred[name[self].m_blk_b] begin[:] variable[compare_fld] assign[=] call[name[fld].upper, parameter[]] if <ast.BoolOp object at 0x7da204565840> begin[:] call[name[self].m_req][name[fld]] assign[=] call[name[self].m_blk_b][name[fld]] pass
keyword[def] identifier[makeAB] ( identifier[self] ): literal[string] keyword[for] identifier[fld] keyword[in] identifier[self] . identifier[m_blk_a] : identifier[compare_fld] = identifier[fld] . identifier[upper] () keyword[if] keyword[not] literal[string] keyword[in] identifier[compare_fld] keyword[and] keyword[not] literal[string] keyword[in] identifier[compare_fld] : identifier[self] . identifier[m_req] [ identifier[fld] ]= identifier[self] . identifier[m_blk_a] [ identifier[fld] ] keyword[for] identifier[fld] keyword[in] identifier[self] . identifier[m_blk_b] : identifier[compare_fld] = identifier[fld] . identifier[upper] () keyword[if] keyword[not] literal[string] keyword[in] identifier[compare_fld] keyword[and] keyword[not] literal[string] keyword[in] identifier[compare_fld] : identifier[self] . identifier[m_req] [ identifier[fld] ]= identifier[self] . identifier[m_blk_b] [ identifier[fld] ] keyword[pass]
def makeAB(self): """ Munge A and B reads into single serial block with only unique fields.""" for fld in self.m_blk_a: compare_fld = fld.upper() if not 'RESERVED' in compare_fld and (not 'CRC' in compare_fld): self.m_req[fld] = self.m_blk_a[fld] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fld']] for fld in self.m_blk_b: compare_fld = fld.upper() if not 'RESERVED' in compare_fld and (not 'CRC' in compare_fld): self.m_req[fld] = self.m_blk_b[fld] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fld']] pass
def build_indentation_list(parser: str = 'github'): r"""Create a data structure that holds the state of indentations. :parameter parser: decides the length of the list. Defaults to ``github``. :type parser: str :returns: indentation_list, a list that contains the state of indentations given a header type. :rtype: list :raises: a built-in exception. """ indentation_list = list() if (parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet'): for i in range(0, md_parser[parser]['header']['max_levels']): indentation_list.append(False) return indentation_list
def function[build_indentation_list, parameter[parser]]: constant[Create a data structure that holds the state of indentations. :parameter parser: decides the length of the list. Defaults to ``github``. :type parser: str :returns: indentation_list, a list that contains the state of indentations given a header type. :rtype: list :raises: a built-in exception. ] variable[indentation_list] assign[=] call[name[list], parameter[]] if <ast.BoolOp object at 0x7da1b26aeaa0> begin[:] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[call[call[name[md_parser]][name[parser]]][constant[header]]][constant[max_levels]]]]] begin[:] call[name[indentation_list].append, parameter[constant[False]]] return[name[indentation_list]]
keyword[def] identifier[build_indentation_list] ( identifier[parser] : identifier[str] = literal[string] ): literal[string] identifier[indentation_list] = identifier[list] () keyword[if] ( identifier[parser] == literal[string] keyword[or] identifier[parser] == literal[string] keyword[or] identifier[parser] == literal[string] keyword[or] identifier[parser] == literal[string] keyword[or] identifier[parser] == literal[string] ): keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[md_parser] [ identifier[parser] ][ literal[string] ][ literal[string] ]): identifier[indentation_list] . identifier[append] ( keyword[False] ) keyword[return] identifier[indentation_list]
def build_indentation_list(parser: str='github'): """Create a data structure that holds the state of indentations. :parameter parser: decides the length of the list. Defaults to ``github``. :type parser: str :returns: indentation_list, a list that contains the state of indentations given a header type. :rtype: list :raises: a built-in exception. """ indentation_list = list() if parser == 'github' or parser == 'cmark' or parser == 'gitlab' or (parser == 'commonmarker') or (parser == 'redcarpet'): for i in range(0, md_parser[parser]['header']['max_levels']): indentation_list.append(False) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] return indentation_list
def fetch_and_filter_tags(self): """ Fetch and filter tags, fetch dates and sort them in time order. """ self.all_tags = self.fetcher.get_all_tags() self.filtered_tags = self.get_filtered_tags(self.all_tags) self.fetch_tags_dates()
def function[fetch_and_filter_tags, parameter[self]]: constant[ Fetch and filter tags, fetch dates and sort them in time order. ] name[self].all_tags assign[=] call[name[self].fetcher.get_all_tags, parameter[]] name[self].filtered_tags assign[=] call[name[self].get_filtered_tags, parameter[name[self].all_tags]] call[name[self].fetch_tags_dates, parameter[]]
keyword[def] identifier[fetch_and_filter_tags] ( identifier[self] ): literal[string] identifier[self] . identifier[all_tags] = identifier[self] . identifier[fetcher] . identifier[get_all_tags] () identifier[self] . identifier[filtered_tags] = identifier[self] . identifier[get_filtered_tags] ( identifier[self] . identifier[all_tags] ) identifier[self] . identifier[fetch_tags_dates] ()
def fetch_and_filter_tags(self): """ Fetch and filter tags, fetch dates and sort them in time order. """ self.all_tags = self.fetcher.get_all_tags() self.filtered_tags = self.get_filtered_tags(self.all_tags) self.fetch_tags_dates()
def folder_cls_from_folder_name(cls, folder_name, locale): """Returns the folder class that matches a localized folder name. locale is a string, e.g. 'da_DK' """ for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS: if folder_name.lower() in folder_cls.localized_names(locale): return folder_cls raise KeyError()
def function[folder_cls_from_folder_name, parameter[cls, folder_name, locale]]: constant[Returns the folder class that matches a localized folder name. locale is a string, e.g. 'da_DK' ] for taget[name[folder_cls]] in starred[binary_operation[name[cls].WELLKNOWN_FOLDERS + name[NON_DELETEABLE_FOLDERS]]] begin[:] if compare[call[name[folder_name].lower, parameter[]] in call[name[folder_cls].localized_names, parameter[name[locale]]]] begin[:] return[name[folder_cls]] <ast.Raise object at 0x7da20e9545e0>
keyword[def] identifier[folder_cls_from_folder_name] ( identifier[cls] , identifier[folder_name] , identifier[locale] ): literal[string] keyword[for] identifier[folder_cls] keyword[in] identifier[cls] . identifier[WELLKNOWN_FOLDERS] + identifier[NON_DELETEABLE_FOLDERS] : keyword[if] identifier[folder_name] . identifier[lower] () keyword[in] identifier[folder_cls] . identifier[localized_names] ( identifier[locale] ): keyword[return] identifier[folder_cls] keyword[raise] identifier[KeyError] ()
def folder_cls_from_folder_name(cls, folder_name, locale): """Returns the folder class that matches a localized folder name. locale is a string, e.g. 'da_DK' """ for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS: if folder_name.lower() in folder_cls.localized_names(locale): return folder_cls # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['folder_cls']] raise KeyError()
def retry_on_bad_auth(func): """If bad token or board, try again after clearing relevant cache entries""" @wraps(func) def retry_version(self, *args, **kwargs): while True: try: return func(self, *args, **kwargs) except trolly.ResourceUnavailable: sys.stderr.write('bad request (refresh board id)\n') self._board_id = None self.save_key('board_id', None) except trolly.Unauthorised: sys.stderr.write('bad permissions (refresh token)\n') self._client = None self._token = None self.save_key('token', None) return retry_version
def function[retry_on_bad_auth, parameter[func]]: constant[If bad token or board, try again after clearing relevant cache entries] def function[retry_version, parameter[self]]: while constant[True] begin[:] <ast.Try object at 0x7da20c6c5900> return[name[retry_version]]
keyword[def] identifier[retry_on_bad_auth] ( identifier[func] ): literal[string] @ identifier[wraps] ( identifier[func] ) keyword[def] identifier[retry_version] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): keyword[while] keyword[True] : keyword[try] : keyword[return] identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ) keyword[except] identifier[trolly] . identifier[ResourceUnavailable] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[self] . identifier[_board_id] = keyword[None] identifier[self] . identifier[save_key] ( literal[string] , keyword[None] ) keyword[except] identifier[trolly] . identifier[Unauthorised] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[self] . identifier[_client] = keyword[None] identifier[self] . identifier[_token] = keyword[None] identifier[self] . identifier[save_key] ( literal[string] , keyword[None] ) keyword[return] identifier[retry_version]
def retry_on_bad_auth(func): """If bad token or board, try again after clearing relevant cache entries""" @wraps(func) def retry_version(self, *args, **kwargs): while True: try: return func(self, *args, **kwargs) # depends on [control=['try'], data=[]] except trolly.ResourceUnavailable: sys.stderr.write('bad request (refresh board id)\n') self._board_id = None self.save_key('board_id', None) # depends on [control=['except'], data=[]] except trolly.Unauthorised: sys.stderr.write('bad permissions (refresh token)\n') self._client = None self._token = None self.save_key('token', None) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] return retry_version
def peak_signal_to_noise_ratio(true, pred): """Image quality metric based on maximal signal power vs. power of the noise. Args: true: the ground truth image. pred: the predicted image. Returns: peak signal to noise ratio (PSNR) """ return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def function[peak_signal_to_noise_ratio, parameter[true, pred]]: constant[Image quality metric based on maximal signal power vs. power of the noise. Args: true: the ground truth image. pred: the predicted image. Returns: peak signal to noise ratio (PSNR) ] return[binary_operation[binary_operation[constant[10.0] * call[name[tf].log, parameter[binary_operation[constant[1.0] / call[name[mean_squared_error], parameter[name[true], name[pred]]]]]]] / call[name[tf].log, parameter[constant[10.0]]]]]
keyword[def] identifier[peak_signal_to_noise_ratio] ( identifier[true] , identifier[pred] ): literal[string] keyword[return] literal[int] * identifier[tf] . identifier[log] ( literal[int] / identifier[mean_squared_error] ( identifier[true] , identifier[pred] ))/ identifier[tf] . identifier[log] ( literal[int] )
def peak_signal_to_noise_ratio(true, pred): """Image quality metric based on maximal signal power vs. power of the noise. Args: true: the ground truth image. pred: the predicted image. Returns: peak signal to noise ratio (PSNR) """ return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def packrealsize(*properties): ''' Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], namedstruct._realsize()) return func
def function[packrealsize, parameter[]]: constant[ Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ] def function[func, parameter[namedstruct]]: variable[v] assign[=] name[namedstruct]._target for taget[name[p]] in starred[call[name[properties]][<ast.Slice object at 0x7da1b05ffee0>]] begin[:] variable[v] assign[=] call[name[getattr], parameter[name[v], name[p]]] call[name[setattr], parameter[name[v], call[name[properties]][<ast.UnaryOp object at 0x7da1b0549150>], call[name[namedstruct]._realsize, parameter[]]]] return[name[func]]
keyword[def] identifier[packrealsize] (* identifier[properties] ): literal[string] keyword[def] identifier[func] ( identifier[namedstruct] ): identifier[v] = identifier[namedstruct] . identifier[_target] keyword[for] identifier[p] keyword[in] identifier[properties] [:- literal[int] ]: identifier[v] = identifier[getattr] ( identifier[v] , identifier[p] ) identifier[setattr] ( identifier[v] , identifier[properties] [- literal[int] ], identifier[namedstruct] . identifier[_realsize] ()) keyword[return] identifier[func]
def packrealsize(*properties): """ Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. """ def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) # depends on [control=['for'], data=['p']] setattr(v, properties[-1], namedstruct._realsize()) return func
def pin_direction(self, pin): """Gets the `ahio.Direction` this pin was set to. If you're developing a driver, implement _pin_direction(self, pin) @arg pin the pin you want to see the mode @returns the `ahio.Direction` the pin is set to @throw KeyError if pin isn't mapped. """ if type(pin) is list: return [self.pin_direction(p) for p in pin] pin_id = self._pin_mapping.get(pin, None) if pin_id: return self._pin_direction(pin_id) else: raise KeyError('Requested pin is not mapped: %s' % pin)
def function[pin_direction, parameter[self, pin]]: constant[Gets the `ahio.Direction` this pin was set to. If you're developing a driver, implement _pin_direction(self, pin) @arg pin the pin you want to see the mode @returns the `ahio.Direction` the pin is set to @throw KeyError if pin isn't mapped. ] if compare[call[name[type], parameter[name[pin]]] is name[list]] begin[:] return[<ast.ListComp object at 0x7da18c4cf250>] variable[pin_id] assign[=] call[name[self]._pin_mapping.get, parameter[name[pin], constant[None]]] if name[pin_id] begin[:] return[call[name[self]._pin_direction, parameter[name[pin_id]]]]
keyword[def] identifier[pin_direction] ( identifier[self] , identifier[pin] ): literal[string] keyword[if] identifier[type] ( identifier[pin] ) keyword[is] identifier[list] : keyword[return] [ identifier[self] . identifier[pin_direction] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[pin] ] identifier[pin_id] = identifier[self] . identifier[_pin_mapping] . identifier[get] ( identifier[pin] , keyword[None] ) keyword[if] identifier[pin_id] : keyword[return] identifier[self] . identifier[_pin_direction] ( identifier[pin_id] ) keyword[else] : keyword[raise] identifier[KeyError] ( literal[string] % identifier[pin] )
def pin_direction(self, pin): """Gets the `ahio.Direction` this pin was set to. If you're developing a driver, implement _pin_direction(self, pin) @arg pin the pin you want to see the mode @returns the `ahio.Direction` the pin is set to @throw KeyError if pin isn't mapped. """ if type(pin) is list: return [self.pin_direction(p) for p in pin] # depends on [control=['if'], data=[]] pin_id = self._pin_mapping.get(pin, None) if pin_id: return self._pin_direction(pin_id) # depends on [control=['if'], data=[]] else: raise KeyError('Requested pin is not mapped: %s' % pin)
def make_worksheet_data(headers, worksheet): """ Make data from worksheet """ data = [] row_idx = 1 while row_idx < worksheet.nrows: cell_idx = 0 row_dict = {} while cell_idx < worksheet.ncols: cell_type = worksheet.cell_type(row_idx, cell_idx) if cell_type in VALID_CELL_TYPES: cell_value = worksheet.cell_value(row_idx, cell_idx) try: if cell_type == 2 and cell_value.is_integer(): cell_value = int(cell_value) row_dict[headers[cell_idx]] = cell_value except KeyError: try: column = ascii_uppercase[cell_idx] except IndexError: column = cell_idx puts("There is no header for cell with value '{0}' in column '{1}' of '{2}'" .format( cell_value, column, worksheet.name )) cell_idx += 1 data.append(row_dict) row_idx += 1 # Magic key handling if 'key' in headers.values(): keyed_data = {} for row in data: if 'key' in row.keys(): key = slughifi(row['key']) if keyed_data.get(key): puts("There is already a key named '{0}' with value " "'{1}' in '{2}'. It is being overwritten with " "value '{3}'.".format(key, keyed_data.get(key), worksheet.name, row)) # Magic values worksheet if worksheet.name == "values": value = row.get('value') if value not in ("", None): keyed_data[key] = value else: keyed_data[key] = row data = keyed_data return data
def function[make_worksheet_data, parameter[headers, worksheet]]: constant[ Make data from worksheet ] variable[data] assign[=] list[[]] variable[row_idx] assign[=] constant[1] while compare[name[row_idx] less[<] name[worksheet].nrows] begin[:] variable[cell_idx] assign[=] constant[0] variable[row_dict] assign[=] dictionary[[], []] while compare[name[cell_idx] less[<] name[worksheet].ncols] begin[:] variable[cell_type] assign[=] call[name[worksheet].cell_type, parameter[name[row_idx], name[cell_idx]]] if compare[name[cell_type] in name[VALID_CELL_TYPES]] begin[:] variable[cell_value] assign[=] call[name[worksheet].cell_value, parameter[name[row_idx], name[cell_idx]]] <ast.Try object at 0x7da1b1a1e260> <ast.AugAssign object at 0x7da1b1a1e440> call[name[data].append, parameter[name[row_dict]]] <ast.AugAssign object at 0x7da1b1a1ded0> if compare[constant[key] in call[name[headers].values, parameter[]]] begin[:] variable[keyed_data] assign[=] dictionary[[], []] for taget[name[row]] in starred[name[data]] begin[:] if compare[constant[key] in call[name[row].keys, parameter[]]] begin[:] variable[key] assign[=] call[name[slughifi], parameter[call[name[row]][constant[key]]]] if call[name[keyed_data].get, parameter[name[key]]] begin[:] call[name[puts], parameter[call[constant[There is already a key named '{0}' with value '{1}' in '{2}'. It is being overwritten with value '{3}'.].format, parameter[name[key], call[name[keyed_data].get, parameter[name[key]]], name[worksheet].name, name[row]]]]] if compare[name[worksheet].name equal[==] constant[values]] begin[:] variable[value] assign[=] call[name[row].get, parameter[constant[value]]] if compare[name[value] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b199da50>, <ast.Constant object at 0x7da1b199cc40>]]] begin[:] call[name[keyed_data]][name[key]] assign[=] name[value] variable[data] assign[=] name[keyed_data] return[name[data]]
keyword[def] identifier[make_worksheet_data] ( identifier[headers] , identifier[worksheet] ): literal[string] identifier[data] =[] identifier[row_idx] = literal[int] keyword[while] identifier[row_idx] < identifier[worksheet] . identifier[nrows] : identifier[cell_idx] = literal[int] identifier[row_dict] ={} keyword[while] identifier[cell_idx] < identifier[worksheet] . identifier[ncols] : identifier[cell_type] = identifier[worksheet] . identifier[cell_type] ( identifier[row_idx] , identifier[cell_idx] ) keyword[if] identifier[cell_type] keyword[in] identifier[VALID_CELL_TYPES] : identifier[cell_value] = identifier[worksheet] . identifier[cell_value] ( identifier[row_idx] , identifier[cell_idx] ) keyword[try] : keyword[if] identifier[cell_type] == literal[int] keyword[and] identifier[cell_value] . identifier[is_integer] (): identifier[cell_value] = identifier[int] ( identifier[cell_value] ) identifier[row_dict] [ identifier[headers] [ identifier[cell_idx] ]]= identifier[cell_value] keyword[except] identifier[KeyError] : keyword[try] : identifier[column] = identifier[ascii_uppercase] [ identifier[cell_idx] ] keyword[except] identifier[IndexError] : identifier[column] = identifier[cell_idx] identifier[puts] ( literal[string] . identifier[format] ( identifier[cell_value] , identifier[column] , identifier[worksheet] . identifier[name] )) identifier[cell_idx] += literal[int] identifier[data] . identifier[append] ( identifier[row_dict] ) identifier[row_idx] += literal[int] keyword[if] literal[string] keyword[in] identifier[headers] . identifier[values] (): identifier[keyed_data] ={} keyword[for] identifier[row] keyword[in] identifier[data] : keyword[if] literal[string] keyword[in] identifier[row] . identifier[keys] (): identifier[key] = identifier[slughifi] ( identifier[row] [ literal[string] ]) keyword[if] identifier[keyed_data] . identifier[get] ( identifier[key] ): identifier[puts] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[key] , identifier[keyed_data] . identifier[get] ( identifier[key] ), identifier[worksheet] . identifier[name] , identifier[row] )) keyword[if] identifier[worksheet] . identifier[name] == literal[string] : identifier[value] = identifier[row] . identifier[get] ( literal[string] ) keyword[if] identifier[value] keyword[not] keyword[in] ( literal[string] , keyword[None] ): identifier[keyed_data] [ identifier[key] ]= identifier[value] keyword[else] : identifier[keyed_data] [ identifier[key] ]= identifier[row] identifier[data] = identifier[keyed_data] keyword[return] identifier[data]
def make_worksheet_data(headers, worksheet): """ Make data from worksheet """ data = [] row_idx = 1 while row_idx < worksheet.nrows: cell_idx = 0 row_dict = {} while cell_idx < worksheet.ncols: cell_type = worksheet.cell_type(row_idx, cell_idx) if cell_type in VALID_CELL_TYPES: cell_value = worksheet.cell_value(row_idx, cell_idx) try: if cell_type == 2 and cell_value.is_integer(): cell_value = int(cell_value) # depends on [control=['if'], data=[]] row_dict[headers[cell_idx]] = cell_value # depends on [control=['try'], data=[]] except KeyError: try: column = ascii_uppercase[cell_idx] # depends on [control=['try'], data=[]] except IndexError: column = cell_idx puts("There is no header for cell with value '{0}' in column '{1}' of '{2}'".format(cell_value, column, worksheet.name)) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['cell_type']] cell_idx += 1 # depends on [control=['while'], data=['cell_idx']] data.append(row_dict) row_idx += 1 # depends on [control=['while'], data=['row_idx']] # Magic key handling if 'key' in headers.values(): keyed_data = {} for row in data: if 'key' in row.keys(): key = slughifi(row['key']) if keyed_data.get(key): puts("There is already a key named '{0}' with value '{1}' in '{2}'. It is being overwritten with value '{3}'.".format(key, keyed_data.get(key), worksheet.name, row)) # depends on [control=['if'], data=[]] # Magic values worksheet if worksheet.name == 'values': value = row.get('value') if value not in ('', None): keyed_data[key] = value # depends on [control=['if'], data=['value']] # depends on [control=['if'], data=[]] else: keyed_data[key] = row # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] data = keyed_data # depends on [control=['if'], data=[]] return data
def indent(self): """ Indents text at cursor position. """ cursor = self.editor.textCursor() assert isinstance(cursor, QtGui.QTextCursor) if cursor.hasSelection(): self.indent_selection(cursor) else: # simply insert indentation at the cursor position tab_len = self.editor.tab_length if cursor.positionInBlock() < self.min_column and not cursor.atBlockEnd(): cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column) cursor.beginEditBlock() if self.editor.use_spaces_instead_of_tabs: nb_space_to_add = tab_len - (cursor.positionInBlock() - self.min_column) % tab_len cursor.insertText(nb_space_to_add * " ") else: cursor.insertText('\t') cursor.endEditBlock() self.editor.setTextCursor(cursor)
def function[indent, parameter[self]]: constant[ Indents text at cursor position. ] variable[cursor] assign[=] call[name[self].editor.textCursor, parameter[]] assert[call[name[isinstance], parameter[name[cursor], name[QtGui].QTextCursor]]] if call[name[cursor].hasSelection, parameter[]] begin[:] call[name[self].indent_selection, parameter[name[cursor]]]
keyword[def] identifier[indent] ( identifier[self] ): literal[string] identifier[cursor] = identifier[self] . identifier[editor] . identifier[textCursor] () keyword[assert] identifier[isinstance] ( identifier[cursor] , identifier[QtGui] . identifier[QTextCursor] ) keyword[if] identifier[cursor] . identifier[hasSelection] (): identifier[self] . identifier[indent_selection] ( identifier[cursor] ) keyword[else] : identifier[tab_len] = identifier[self] . identifier[editor] . identifier[tab_length] keyword[if] identifier[cursor] . identifier[positionInBlock] ()< identifier[self] . identifier[min_column] keyword[and] keyword[not] identifier[cursor] . identifier[atBlockEnd] (): identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[Right] , identifier[cursor] . identifier[MoveAnchor] , identifier[self] . identifier[min_column] ) identifier[cursor] . identifier[beginEditBlock] () keyword[if] identifier[self] . identifier[editor] . identifier[use_spaces_instead_of_tabs] : identifier[nb_space_to_add] = identifier[tab_len] -( identifier[cursor] . identifier[positionInBlock] ()- identifier[self] . identifier[min_column] )% identifier[tab_len] identifier[cursor] . identifier[insertText] ( identifier[nb_space_to_add] * literal[string] ) keyword[else] : identifier[cursor] . identifier[insertText] ( literal[string] ) identifier[cursor] . identifier[endEditBlock] () identifier[self] . identifier[editor] . identifier[setTextCursor] ( identifier[cursor] )
def indent(self): """ Indents text at cursor position. """ cursor = self.editor.textCursor() assert isinstance(cursor, QtGui.QTextCursor) if cursor.hasSelection(): self.indent_selection(cursor) # depends on [control=['if'], data=[]] else: # simply insert indentation at the cursor position tab_len = self.editor.tab_length if cursor.positionInBlock() < self.min_column and (not cursor.atBlockEnd()): cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column) # depends on [control=['if'], data=[]] cursor.beginEditBlock() if self.editor.use_spaces_instead_of_tabs: nb_space_to_add = tab_len - (cursor.positionInBlock() - self.min_column) % tab_len cursor.insertText(nb_space_to_add * ' ') # depends on [control=['if'], data=[]] else: cursor.insertText('\t') cursor.endEditBlock() self.editor.setTextCursor(cursor)
def module(self): """The module specified by the ``library`` attribute.""" if self._module is None: if self.library is None: raise ValueError( "Backend '%s' doesn't specify a library attribute" % self.__class__) try: if '.' in self.library: mod_path, cls_name = self.library.rsplit('.', 1) mod = import_module(mod_path) self._module = getattr(mod, cls_name) else: self._module = import_module(self.library) except (AttributeError, ImportError): raise ValueError("Couldn't load %s backend library" % cls_name) return self._module
def function[module, parameter[self]]: constant[The module specified by the ``library`` attribute.] if compare[name[self]._module is constant[None]] begin[:] if compare[name[self].library is constant[None]] begin[:] <ast.Raise object at 0x7da1b0baa590> <ast.Try object at 0x7da1b0ba8580> return[name[self]._module]
keyword[def] identifier[module] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_module] keyword[is] keyword[None] : keyword[if] identifier[self] . identifier[library] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[__class__] ) keyword[try] : keyword[if] literal[string] keyword[in] identifier[self] . identifier[library] : identifier[mod_path] , identifier[cls_name] = identifier[self] . identifier[library] . identifier[rsplit] ( literal[string] , literal[int] ) identifier[mod] = identifier[import_module] ( identifier[mod_path] ) identifier[self] . identifier[_module] = identifier[getattr] ( identifier[mod] , identifier[cls_name] ) keyword[else] : identifier[self] . identifier[_module] = identifier[import_module] ( identifier[self] . identifier[library] ) keyword[except] ( identifier[AttributeError] , identifier[ImportError] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[cls_name] ) keyword[return] identifier[self] . identifier[_module]
def module(self): """The module specified by the ``library`` attribute.""" if self._module is None: if self.library is None: raise ValueError("Backend '%s' doesn't specify a library attribute" % self.__class__) # depends on [control=['if'], data=[]] try: if '.' in self.library: (mod_path, cls_name) = self.library.rsplit('.', 1) mod = import_module(mod_path) self._module = getattr(mod, cls_name) # depends on [control=['if'], data=[]] else: self._module = import_module(self.library) # depends on [control=['try'], data=[]] except (AttributeError, ImportError): raise ValueError("Couldn't load %s backend library" % cls_name) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return self._module
def list_nodes_min(conn=None, call=None): ''' Return a list of VMs with minimal information CLI Example .. code-block:: bash salt-cloud -f list_nodes_min myopenstack ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_min function must be called with -f or --function.' ) if conn is None: conn = get_conn() ret = {} for node in conn.list_servers(bare=True): ret[node.name] = {'id': node.id, 'state': node.status} return ret
def function[list_nodes_min, parameter[conn, call]]: constant[ Return a list of VMs with minimal information CLI Example .. code-block:: bash salt-cloud -f list_nodes_min myopenstack ] if compare[name[call] equal[==] constant[action]] begin[:] <ast.Raise object at 0x7da1b21ee3b0> if compare[name[conn] is constant[None]] begin[:] variable[conn] assign[=] call[name[get_conn], parameter[]] variable[ret] assign[=] dictionary[[], []] for taget[name[node]] in starred[call[name[conn].list_servers, parameter[]]] begin[:] call[name[ret]][name[node].name] assign[=] dictionary[[<ast.Constant object at 0x7da1b21ede40>, <ast.Constant object at 0x7da1b21efac0>], [<ast.Attribute object at 0x7da1b21ef130>, <ast.Attribute object at 0x7da1b21ee1d0>]] return[name[ret]]
keyword[def] identifier[list_nodes_min] ( identifier[conn] = keyword[None] , identifier[call] = keyword[None] ): literal[string] keyword[if] identifier[call] == literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] ) keyword[if] identifier[conn] keyword[is] keyword[None] : identifier[conn] = identifier[get_conn] () identifier[ret] ={} keyword[for] identifier[node] keyword[in] identifier[conn] . identifier[list_servers] ( identifier[bare] = keyword[True] ): identifier[ret] [ identifier[node] . identifier[name] ]={ literal[string] : identifier[node] . identifier[id] , literal[string] : identifier[node] . identifier[status] } keyword[return] identifier[ret]
def list_nodes_min(conn=None, call=None): """ Return a list of VMs with minimal information CLI Example .. code-block:: bash salt-cloud -f list_nodes_min myopenstack """ if call == 'action': raise SaltCloudSystemExit('The list_nodes_min function must be called with -f or --function.') # depends on [control=['if'], data=[]] if conn is None: conn = get_conn() # depends on [control=['if'], data=['conn']] ret = {} for node in conn.list_servers(bare=True): ret[node.name] = {'id': node.id, 'state': node.status} # depends on [control=['for'], data=['node']] return ret
def bits_in(length, keyspace): """ |log2(keyspace^length) = bits| -> (#float) number of bits of entropy in @length of characters for a given a @keyspace """ keyspace = len(keyspace) length_per_cycle = 64 if length > length_per_cycle: bits = 0 length_processed = 0 cycles = ceil(length / length_per_cycle) for _ in range(int(cycles)): if length_processed + length_per_cycle > length: length_per_cycle = length - length_processed bits += calc_bits_in(length_per_cycle, keyspace) length_processed += length_per_cycle else: bits = calc_bits_in(length, keyspace) return float(abs(bits))
def function[bits_in, parameter[length, keyspace]]: constant[ |log2(keyspace^length) = bits| -> (#float) number of bits of entropy in @length of characters for a given a @keyspace ] variable[keyspace] assign[=] call[name[len], parameter[name[keyspace]]] variable[length_per_cycle] assign[=] constant[64] if compare[name[length] greater[>] name[length_per_cycle]] begin[:] variable[bits] assign[=] constant[0] variable[length_processed] assign[=] constant[0] variable[cycles] assign[=] call[name[ceil], parameter[binary_operation[name[length] / name[length_per_cycle]]]] for taget[name[_]] in starred[call[name[range], parameter[call[name[int], parameter[name[cycles]]]]]] begin[:] if compare[binary_operation[name[length_processed] + name[length_per_cycle]] greater[>] name[length]] begin[:] variable[length_per_cycle] assign[=] binary_operation[name[length] - name[length_processed]] <ast.AugAssign object at 0x7da1b10a64d0> <ast.AugAssign object at 0x7da1b10a4d90> return[call[name[float], parameter[call[name[abs], parameter[name[bits]]]]]]
keyword[def] identifier[bits_in] ( identifier[length] , identifier[keyspace] ): literal[string] identifier[keyspace] = identifier[len] ( identifier[keyspace] ) identifier[length_per_cycle] = literal[int] keyword[if] identifier[length] > identifier[length_per_cycle] : identifier[bits] = literal[int] identifier[length_processed] = literal[int] identifier[cycles] = identifier[ceil] ( identifier[length] / identifier[length_per_cycle] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[int] ( identifier[cycles] )): keyword[if] identifier[length_processed] + identifier[length_per_cycle] > identifier[length] : identifier[length_per_cycle] = identifier[length] - identifier[length_processed] identifier[bits] += identifier[calc_bits_in] ( identifier[length_per_cycle] , identifier[keyspace] ) identifier[length_processed] += identifier[length_per_cycle] keyword[else] : identifier[bits] = identifier[calc_bits_in] ( identifier[length] , identifier[keyspace] ) keyword[return] identifier[float] ( identifier[abs] ( identifier[bits] ))
def bits_in(length, keyspace): """ |log2(keyspace^length) = bits| -> (#float) number of bits of entropy in @length of characters for a given a @keyspace """ keyspace = len(keyspace) length_per_cycle = 64 if length > length_per_cycle: bits = 0 length_processed = 0 cycles = ceil(length / length_per_cycle) for _ in range(int(cycles)): if length_processed + length_per_cycle > length: length_per_cycle = length - length_processed # depends on [control=['if'], data=['length']] bits += calc_bits_in(length_per_cycle, keyspace) length_processed += length_per_cycle # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['length', 'length_per_cycle']] else: bits = calc_bits_in(length, keyspace) return float(abs(bits))
def get_next_invoke_id(self, addr): """Called by clients to get an unused invoke ID.""" if _debug: StateMachineAccessPoint._debug("get_next_invoke_id") initialID = self.nextInvokeID while 1: invokeID = self.nextInvokeID self.nextInvokeID = (self.nextInvokeID + 1) % 256 # see if we've checked for them all if initialID == self.nextInvokeID: raise RuntimeError("no available invoke ID") for tr in self.clientTransactions: if (invokeID == tr.invokeID) and (addr == tr.pdu_address): break else: break return invokeID
def function[get_next_invoke_id, parameter[self, addr]]: constant[Called by clients to get an unused invoke ID.] if name[_debug] begin[:] call[name[StateMachineAccessPoint]._debug, parameter[constant[get_next_invoke_id]]] variable[initialID] assign[=] name[self].nextInvokeID while constant[1] begin[:] variable[invokeID] assign[=] name[self].nextInvokeID name[self].nextInvokeID assign[=] binary_operation[binary_operation[name[self].nextInvokeID + constant[1]] <ast.Mod object at 0x7da2590d6920> constant[256]] if compare[name[initialID] equal[==] name[self].nextInvokeID] begin[:] <ast.Raise object at 0x7da2041db430> for taget[name[tr]] in starred[name[self].clientTransactions] begin[:] if <ast.BoolOp object at 0x7da2041daa70> begin[:] break return[name[invokeID]]
keyword[def] identifier[get_next_invoke_id] ( identifier[self] , identifier[addr] ): literal[string] keyword[if] identifier[_debug] : identifier[StateMachineAccessPoint] . identifier[_debug] ( literal[string] ) identifier[initialID] = identifier[self] . identifier[nextInvokeID] keyword[while] literal[int] : identifier[invokeID] = identifier[self] . identifier[nextInvokeID] identifier[self] . identifier[nextInvokeID] =( identifier[self] . identifier[nextInvokeID] + literal[int] )% literal[int] keyword[if] identifier[initialID] == identifier[self] . identifier[nextInvokeID] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[for] identifier[tr] keyword[in] identifier[self] . identifier[clientTransactions] : keyword[if] ( identifier[invokeID] == identifier[tr] . identifier[invokeID] ) keyword[and] ( identifier[addr] == identifier[tr] . identifier[pdu_address] ): keyword[break] keyword[else] : keyword[break] keyword[return] identifier[invokeID]
def get_next_invoke_id(self, addr): """Called by clients to get an unused invoke ID.""" if _debug: StateMachineAccessPoint._debug('get_next_invoke_id') # depends on [control=['if'], data=[]] initialID = self.nextInvokeID while 1: invokeID = self.nextInvokeID self.nextInvokeID = (self.nextInvokeID + 1) % 256 # see if we've checked for them all if initialID == self.nextInvokeID: raise RuntimeError('no available invoke ID') # depends on [control=['if'], data=[]] for tr in self.clientTransactions: if invokeID == tr.invokeID and addr == tr.pdu_address: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tr']] else: break # depends on [control=['while'], data=[]] return invokeID
def zero_node(name): ''' Reset performance statistics to zero on the local node. .. code-block:: yaml zero_ats_node: trafficserver.zero_node ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if __opts__['test']: ret['comment'] = 'Zeroing local node statistics' return ret __salt__['trafficserver.zero_node']() ret['result'] = True ret['comment'] = 'Zeroed local node statistics' return ret
def function[zero_node, parameter[name]]: constant[ Reset performance statistics to zero on the local node. .. code-block:: yaml zero_ats_node: trafficserver.zero_node ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18f811a80>, <ast.Constant object at 0x7da18f813b20>, <ast.Constant object at 0x7da18f813d00>, <ast.Constant object at 0x7da18f812080>], [<ast.Name object at 0x7da18f810130>, <ast.Dict object at 0x7da18f812800>, <ast.Constant object at 0x7da18f8117e0>, <ast.Constant object at 0x7da18f810c40>]] if call[name[__opts__]][constant[test]] begin[:] call[name[ret]][constant[comment]] assign[=] constant[Zeroing local node statistics] return[name[ret]] call[call[name[__salt__]][constant[trafficserver.zero_node]], parameter[]] call[name[ret]][constant[result]] assign[=] constant[True] call[name[ret]][constant[comment]] assign[=] constant[Zeroed local node statistics] return[name[ret]]
keyword[def] identifier[zero_node] ( identifier[name] ): literal[string] identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[None] , literal[string] : literal[string] } keyword[if] identifier[__opts__] [ literal[string] ]: identifier[ret] [ literal[string] ]= literal[string] keyword[return] identifier[ret] identifier[__salt__] [ literal[string] ]() identifier[ret] [ literal[string] ]= keyword[True] identifier[ret] [ literal[string] ]= literal[string] keyword[return] identifier[ret]
def zero_node(name): """ Reset performance statistics to zero on the local node. .. code-block:: yaml zero_ats_node: trafficserver.zero_node """ ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if __opts__['test']: ret['comment'] = 'Zeroing local node statistics' return ret # depends on [control=['if'], data=[]] __salt__['trafficserver.zero_node']() ret['result'] = True ret['comment'] = 'Zeroed local node statistics' return ret
def gui_repaint(self, drawDC=None): """ Performs update of the displayed image on the GUI canvas, using the supplied device context. If drawDC is None, a ClientDC will be used to redraw the image. """ DEBUG_MSG("gui_repaint()", 1, self) if self.IsShownOnScreen(): if drawDC is None: drawDC=wx.ClientDC(self) #drawDC.BeginDrawing() drawDC.DrawBitmap(self.bitmap, 0, 0) #drawDC.EndDrawing() #wx.GetApp().Yield() else: pass
def function[gui_repaint, parameter[self, drawDC]]: constant[ Performs update of the displayed image on the GUI canvas, using the supplied device context. If drawDC is None, a ClientDC will be used to redraw the image. ] call[name[DEBUG_MSG], parameter[constant[gui_repaint()], constant[1], name[self]]] if call[name[self].IsShownOnScreen, parameter[]] begin[:] if compare[name[drawDC] is constant[None]] begin[:] variable[drawDC] assign[=] call[name[wx].ClientDC, parameter[name[self]]] call[name[drawDC].DrawBitmap, parameter[name[self].bitmap, constant[0], constant[0]]]
keyword[def] identifier[gui_repaint] ( identifier[self] , identifier[drawDC] = keyword[None] ): literal[string] identifier[DEBUG_MSG] ( literal[string] , literal[int] , identifier[self] ) keyword[if] identifier[self] . identifier[IsShownOnScreen] (): keyword[if] identifier[drawDC] keyword[is] keyword[None] : identifier[drawDC] = identifier[wx] . identifier[ClientDC] ( identifier[self] ) identifier[drawDC] . identifier[DrawBitmap] ( identifier[self] . identifier[bitmap] , literal[int] , literal[int] ) keyword[else] : keyword[pass]
def gui_repaint(self, drawDC=None): """ Performs update of the displayed image on the GUI canvas, using the supplied device context. If drawDC is None, a ClientDC will be used to redraw the image. """ DEBUG_MSG('gui_repaint()', 1, self) if self.IsShownOnScreen(): if drawDC is None: drawDC = wx.ClientDC(self) # depends on [control=['if'], data=['drawDC']] #drawDC.BeginDrawing() drawDC.DrawBitmap(self.bitmap, 0, 0) # depends on [control=['if'], data=[]] else: #drawDC.EndDrawing() #wx.GetApp().Yield() pass
def from_json(cls, string): """Create AnimeFiles from JSON string.""" obj = json.loads(string) return cls(obj['regexp'], obj['files'])
def function[from_json, parameter[cls, string]]: constant[Create AnimeFiles from JSON string.] variable[obj] assign[=] call[name[json].loads, parameter[name[string]]] return[call[name[cls], parameter[call[name[obj]][constant[regexp]], call[name[obj]][constant[files]]]]]
keyword[def] identifier[from_json] ( identifier[cls] , identifier[string] ): literal[string] identifier[obj] = identifier[json] . identifier[loads] ( identifier[string] ) keyword[return] identifier[cls] ( identifier[obj] [ literal[string] ], identifier[obj] [ literal[string] ])
def from_json(cls, string): """Create AnimeFiles from JSON string.""" obj = json.loads(string) return cls(obj['regexp'], obj['files'])
def unpack(iterable, count, fill=None): """ The iter data unpack function. Example 1: In[1]: source = 'abc' In[2]: a, b = safe_unpack(source, 2) In[3]: print(a, b) a b Example 2: In[1]: source = 'abc' In[2]: a, b, c, d = safe_unpack(source, 4) In[3]: print(a, b, c, d) a b None None """ iterable = list(enumerate(iterable)) cnt = count if count <= len(iterable) else len(iterable) results = [iterable[i][1] for i in range(cnt)] # results[len(results):len(results)] = [fill for i in range(count-cnt)] results = merge(results, [fill for i in range(count-cnt)]) return tuple(results)
def function[unpack, parameter[iterable, count, fill]]: constant[ The iter data unpack function. Example 1: In[1]: source = 'abc' In[2]: a, b = safe_unpack(source, 2) In[3]: print(a, b) a b Example 2: In[1]: source = 'abc' In[2]: a, b, c, d = safe_unpack(source, 4) In[3]: print(a, b, c, d) a b None None ] variable[iterable] assign[=] call[name[list], parameter[call[name[enumerate], parameter[name[iterable]]]]] variable[cnt] assign[=] <ast.IfExp object at 0x7da1b14d1930> variable[results] assign[=] <ast.ListComp object at 0x7da1b14d2ce0> variable[results] assign[=] call[name[merge], parameter[name[results], <ast.ListComp object at 0x7da1b14d2920>]] return[call[name[tuple], parameter[name[results]]]]
keyword[def] identifier[unpack] ( identifier[iterable] , identifier[count] , identifier[fill] = keyword[None] ): literal[string] identifier[iterable] = identifier[list] ( identifier[enumerate] ( identifier[iterable] )) identifier[cnt] = identifier[count] keyword[if] identifier[count] <= identifier[len] ( identifier[iterable] ) keyword[else] identifier[len] ( identifier[iterable] ) identifier[results] =[ identifier[iterable] [ identifier[i] ][ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[cnt] )] identifier[results] = identifier[merge] ( identifier[results] ,[ identifier[fill] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[count] - identifier[cnt] )]) keyword[return] identifier[tuple] ( identifier[results] )
def unpack(iterable, count, fill=None): """ The iter data unpack function. Example 1: In[1]: source = 'abc' In[2]: a, b = safe_unpack(source, 2) In[3]: print(a, b) a b Example 2: In[1]: source = 'abc' In[2]: a, b, c, d = safe_unpack(source, 4) In[3]: print(a, b, c, d) a b None None """ iterable = list(enumerate(iterable)) cnt = count if count <= len(iterable) else len(iterable) results = [iterable[i][1] for i in range(cnt)] # results[len(results):len(results)] = [fill for i in range(count-cnt)] results = merge(results, [fill for i in range(count - cnt)]) return tuple(results)
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ return common.make_population(self._population_size, self._generate_solution)
def function[next_population, parameter[self, population, fitnesses]]: constant[Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. ] return[call[name[common].make_population, parameter[name[self]._population_size, name[self]._generate_solution]]]
keyword[def] identifier[next_population] ( identifier[self] , identifier[population] , identifier[fitnesses] ): literal[string] keyword[return] identifier[common] . identifier[make_population] ( identifier[self] . identifier[_population_size] , identifier[self] . identifier[_generate_solution] )
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ return common.make_population(self._population_size, self._generate_solution)
def parse_args(string): """ `"yada hoa" yupi yeah 12 "" None "None"` -> `["yada hoa", "yupi", "yeah", 12, "", None, "None"]` :param str: :return: """ import ast is_quoted = False result_parts = [] current_str = "" while len(string) > 0: if string[0] == "\"": is_quoted = not is_quoted current_str += string[0] elif string[0].isspace(): if is_quoted: current_str += string[0] else: result_parts.append(current_str) current_str = "" # end if else: current_str += string[0] # end if string = string[1:] # end while if current_str: # last part of the array result_parts.append(current_str) # end if for i in range(len(result_parts)): # Will try for each element if it is something pythonic. Parsed type will replace original list element. try: part = ast.literal_eval(result_parts[i]) result_parts[i] = part # write it back. except ValueError: # could not parse -> is string pass # because already is str. # end try # end for return result_parts
def function[parse_args, parameter[string]]: constant[ `"yada hoa" yupi yeah 12 "" None "None"` -> `["yada hoa", "yupi", "yeah", 12, "", None, "None"]` :param str: :return: ] import module[ast] variable[is_quoted] assign[=] constant[False] variable[result_parts] assign[=] list[[]] variable[current_str] assign[=] constant[] while compare[call[name[len], parameter[name[string]]] greater[>] constant[0]] begin[:] if compare[call[name[string]][constant[0]] equal[==] constant["]] begin[:] variable[is_quoted] assign[=] <ast.UnaryOp object at 0x7da1b04b4190> <ast.AugAssign object at 0x7da1b04b5e70> variable[string] assign[=] call[name[string]][<ast.Slice object at 0x7da18f58cca0>] if name[current_str] begin[:] call[name[result_parts].append, parameter[name[current_str]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[result_parts]]]]]] begin[:] <ast.Try object at 0x7da1b04b6200> return[name[result_parts]]
keyword[def] identifier[parse_args] ( identifier[string] ): literal[string] keyword[import] identifier[ast] identifier[is_quoted] = keyword[False] identifier[result_parts] =[] identifier[current_str] = literal[string] keyword[while] identifier[len] ( identifier[string] )> literal[int] : keyword[if] identifier[string] [ literal[int] ]== literal[string] : identifier[is_quoted] = keyword[not] identifier[is_quoted] identifier[current_str] += identifier[string] [ literal[int] ] keyword[elif] identifier[string] [ literal[int] ]. identifier[isspace] (): keyword[if] identifier[is_quoted] : identifier[current_str] += identifier[string] [ literal[int] ] keyword[else] : identifier[result_parts] . identifier[append] ( identifier[current_str] ) identifier[current_str] = literal[string] keyword[else] : identifier[current_str] += identifier[string] [ literal[int] ] identifier[string] = identifier[string] [ literal[int] :] keyword[if] identifier[current_str] : identifier[result_parts] . identifier[append] ( identifier[current_str] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[result_parts] )): keyword[try] : identifier[part] = identifier[ast] . identifier[literal_eval] ( identifier[result_parts] [ identifier[i] ]) identifier[result_parts] [ identifier[i] ]= identifier[part] keyword[except] identifier[ValueError] : keyword[pass] keyword[return] identifier[result_parts]
def parse_args(string): """ `"yada hoa" yupi yeah 12 "" None "None"` -> `["yada hoa", "yupi", "yeah", 12, "", None, "None"]` :param str: :return: """ import ast is_quoted = False result_parts = [] current_str = '' while len(string) > 0: if string[0] == '"': is_quoted = not is_quoted current_str += string[0] # depends on [control=['if'], data=[]] elif string[0].isspace(): if is_quoted: current_str += string[0] # depends on [control=['if'], data=[]] else: result_parts.append(current_str) current_str = '' # depends on [control=['if'], data=[]] else: # end if current_str += string[0] # end if string = string[1:] # depends on [control=['while'], data=[]] # end while if current_str: # last part of the array result_parts.append(current_str) # depends on [control=['if'], data=[]] # end if for i in range(len(result_parts)): # Will try for each element if it is something pythonic. Parsed type will replace original list element. try: part = ast.literal_eval(result_parts[i]) result_parts[i] = part # write it back. # depends on [control=['try'], data=[]] except ValueError: # could not parse -> is string pass # because already is str. # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']] # end try # end for return result_parts
def adapter_update_nio_binding(self, adapter_number, port_number, nio): """ Update a port NIO binding. :param adapter_number: adapter number :param port_number: port number :param nio: NIO instance to add to the adapter """ if self.ubridge: yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters)
def function[adapter_update_nio_binding, parameter[self, adapter_number, port_number, nio]]: constant[ Update a port NIO binding. :param adapter_number: adapter number :param port_number: port number :param nio: NIO instance to add to the adapter ] if name[self].ubridge begin[:] <ast.YieldFrom object at 0x7da20c993070>
keyword[def] identifier[adapter_update_nio_binding] ( identifier[self] , identifier[adapter_number] , identifier[port_number] , identifier[nio] ): literal[string] keyword[if] identifier[self] . identifier[ubridge] : keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_apply_filters] ( identifier[adapter_number] , identifier[port_number] , identifier[nio] . identifier[filters] )
def adapter_update_nio_binding(self, adapter_number, port_number, nio): """ Update a port NIO binding. :param adapter_number: adapter number :param port_number: port number :param nio: NIO instance to add to the adapter """ if self.ubridge: yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters) # depends on [control=['if'], data=[]]
def renders_impl(self, template_content, context, at_paths=None, at_encoding=anytemplate.compat.ENCODING, **kwargs): """ Render given template string and return the result. :param template_content: Template content :param context: A dict or dict-like object to instantiate given template file :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string """ if "filename" in kwargs: kwargs["filename"] = None kwargs["text"] = template_content if "input_encoding" not in kwargs: kwargs["input_encoding"] = at_encoding.lower() if "output_encoding" not in kwargs: kwargs["output_encoding"] = at_encoding.lower() if at_paths is not None: paths = at_paths + self.lookup_options.get("directories", []) self.lookup_options["directories"] = paths lookup = mako.lookup.TemplateLookup(**self.lookup_options) kwargs["lookup"] = lookup tmpl = mako.template.Template(**kwargs) return _render(tmpl, context)
def function[renders_impl, parameter[self, template_content, context, at_paths, at_encoding]]: constant[ Render given template string and return the result. :param template_content: Template content :param context: A dict or dict-like object to instantiate given template file :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string ] if compare[constant[filename] in name[kwargs]] begin[:] call[name[kwargs]][constant[filename]] assign[=] constant[None] call[name[kwargs]][constant[text]] assign[=] name[template_content] if compare[constant[input_encoding] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[input_encoding]] assign[=] call[name[at_encoding].lower, parameter[]] if compare[constant[output_encoding] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[output_encoding]] assign[=] call[name[at_encoding].lower, parameter[]] if compare[name[at_paths] is_not constant[None]] begin[:] variable[paths] assign[=] binary_operation[name[at_paths] + call[name[self].lookup_options.get, parameter[constant[directories], list[[]]]]] call[name[self].lookup_options][constant[directories]] assign[=] name[paths] variable[lookup] assign[=] call[name[mako].lookup.TemplateLookup, parameter[]] call[name[kwargs]][constant[lookup]] assign[=] name[lookup] variable[tmpl] assign[=] call[name[mako].template.Template, parameter[]] return[call[name[_render], parameter[name[tmpl], name[context]]]]
keyword[def] identifier[renders_impl] ( identifier[self] , identifier[template_content] , identifier[context] , identifier[at_paths] = keyword[None] , identifier[at_encoding] = identifier[anytemplate] . identifier[compat] . identifier[ENCODING] , ** identifier[kwargs] ): literal[string] keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= keyword[None] identifier[kwargs] [ literal[string] ]= identifier[template_content] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[at_encoding] . identifier[lower] () keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[at_encoding] . identifier[lower] () keyword[if] identifier[at_paths] keyword[is] keyword[not] keyword[None] : identifier[paths] = identifier[at_paths] + identifier[self] . identifier[lookup_options] . identifier[get] ( literal[string] ,[]) identifier[self] . identifier[lookup_options] [ literal[string] ]= identifier[paths] identifier[lookup] = identifier[mako] . identifier[lookup] . identifier[TemplateLookup] (** identifier[self] . identifier[lookup_options] ) identifier[kwargs] [ literal[string] ]= identifier[lookup] identifier[tmpl] = identifier[mako] . identifier[template] . identifier[Template] (** identifier[kwargs] ) keyword[return] identifier[_render] ( identifier[tmpl] , identifier[context] )
def renders_impl(self, template_content, context, at_paths=None, at_encoding=anytemplate.compat.ENCODING, **kwargs): """ Render given template string and return the result. :param template_content: Template content :param context: A dict or dict-like object to instantiate given template file :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string """ if 'filename' in kwargs: kwargs['filename'] = None # depends on [control=['if'], data=['kwargs']] kwargs['text'] = template_content if 'input_encoding' not in kwargs: kwargs['input_encoding'] = at_encoding.lower() # depends on [control=['if'], data=['kwargs']] if 'output_encoding' not in kwargs: kwargs['output_encoding'] = at_encoding.lower() # depends on [control=['if'], data=['kwargs']] if at_paths is not None: paths = at_paths + self.lookup_options.get('directories', []) self.lookup_options['directories'] = paths lookup = mako.lookup.TemplateLookup(**self.lookup_options) kwargs['lookup'] = lookup # depends on [control=['if'], data=['at_paths']] tmpl = mako.template.Template(**kwargs) return _render(tmpl, context)
def generate_and_cache(path=user_path): ''' Generate category ranges and save to userlevel cache file. :param path: path to userlevel cache file :type path: str :returns: category ranges dict :rtype: dict of RangeGroup ''' data = tools.generate() if not path: return data try: directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) with open(path, 'wb') as f: pickle.dump((data_version, module_version, data), f) except (PermissionError, ValueError) as e: warnings.warn('Unable to write cache file "%s": %s' % (path, e)) return data
def function[generate_and_cache, parameter[path]]: constant[ Generate category ranges and save to userlevel cache file. :param path: path to userlevel cache file :type path: str :returns: category ranges dict :rtype: dict of RangeGroup ] variable[data] assign[=] call[name[tools].generate, parameter[]] if <ast.UnaryOp object at 0x7da1b0a21a20> begin[:] return[name[data]] <ast.Try object at 0x7da1b0a235e0> return[name[data]]
keyword[def] identifier[generate_and_cache] ( identifier[path] = identifier[user_path] ): literal[string] identifier[data] = identifier[tools] . identifier[generate] () keyword[if] keyword[not] identifier[path] : keyword[return] identifier[data] keyword[try] : identifier[directory] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ): identifier[os] . identifier[makedirs] ( identifier[directory] ) keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] : identifier[pickle] . identifier[dump] (( identifier[data_version] , identifier[module_version] , identifier[data] ), identifier[f] ) keyword[except] ( identifier[PermissionError] , identifier[ValueError] ) keyword[as] identifier[e] : identifier[warnings] . identifier[warn] ( literal[string] %( identifier[path] , identifier[e] )) keyword[return] identifier[data]
def generate_and_cache(path=user_path): """ Generate category ranges and save to userlevel cache file. :param path: path to userlevel cache file :type path: str :returns: category ranges dict :rtype: dict of RangeGroup """ data = tools.generate() if not path: return data # depends on [control=['if'], data=[]] try: directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) # depends on [control=['if'], data=[]] with open(path, 'wb') as f: pickle.dump((data_version, module_version, data), f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except (PermissionError, ValueError) as e: warnings.warn('Unable to write cache file "%s": %s' % (path, e)) # depends on [control=['except'], data=['e']] return data
def get_projects(format): """Gets projects data from fablabs.io.""" projects_json = data_from_fablabs_io(fablabs_io_projects_api_url_v0) projects = {} project_url = "https://www.fablabs.io/projects/" fablabs = get_labs(format="object") # Load all the FabLabs for i in projects_json["projects"]: i = i["projects"] current_project = Project() current_project.id = i["id"] current_project.title = i["title"] current_project.description = i["description"] current_project.github = i["github"] current_project.web = i["web"] current_project.dropbox = i["dropbox"] current_project.bitbucket = i["bitbucket"] current_project.lab_id = i["lab_id"] # Add the lab of the project if i["lab_id"] is not None: for k in fablabs: if fablabs[k].id == i["lab_id"]: current_project.lab = fablabs[k] else: current_project.lab = None current_project.owner_id = i["owner_id"] current_project.created_at = i["created_at"] current_project.updated_at = i["updated_at"] current_project.vimeo = i["vimeo"] current_project.flickr = i["flickr"] current_project.youtube = i["youtube"] current_project.drive = i["drive"] current_project.twitter = i["twitter"] current_project.facebook = i["facebook"] current_project.googleplus = i["googleplus"] current_project.instagram = i["instagram"] current_project.status = i["status"] current_project.version = i["version"] current_project.faq = i["faq"] current_project.scope = i["scope"] current_project.community = i["community"] current_project.lookingfor = i["lookingfor"] current_project.cover = i["cover"] url = project_url + str(current_project.id) current_project.url = url # Add the project projects[current_project.id] = current_project # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in projects: project_dict = projects[j].__dict__ # Convert the lab from a Fab Lab object to a dict if project_dict["lab"] is not None: project_dict["lab"] = project_dict["lab"].__dict__ output[j] = project_dict # Return a geojson, only for projects linked to a lab elif format.lower() == "geojson" or format.lower() == "geo": projects_list = [] for p in projects: if projects[p].lab_id is not None: single_project = projects[p].__dict__ if projects[p].lab is not None: single_project["lab"] = single_project["lab"].__dict__ for l in fablabs: single_lab = fablabs[l].__dict__ if single_lab["id"] == single_project["lab_id"]: project_lab = Feature( type="Feature", geometry=Point((single_lab["latitude"], single_lab["longitude"])), properties=single_project) projects_list.append(project_lab) output = dumps(FeatureCollection(projects_list)) # Return an object elif format.lower() == "object" or format.lower() == "obj": output = projects # Default: return an object else: output = projects # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
def function[get_projects, parameter[format]]: constant[Gets projects data from fablabs.io.] variable[projects_json] assign[=] call[name[data_from_fablabs_io], parameter[name[fablabs_io_projects_api_url_v0]]] variable[projects] assign[=] dictionary[[], []] variable[project_url] assign[=] constant[https://www.fablabs.io/projects/] variable[fablabs] assign[=] call[name[get_labs], parameter[]] for taget[name[i]] in starred[call[name[projects_json]][constant[projects]]] begin[:] variable[i] assign[=] call[name[i]][constant[projects]] variable[current_project] assign[=] call[name[Project], parameter[]] name[current_project].id assign[=] call[name[i]][constant[id]] name[current_project].title assign[=] call[name[i]][constant[title]] name[current_project].description assign[=] call[name[i]][constant[description]] name[current_project].github assign[=] call[name[i]][constant[github]] name[current_project].web assign[=] call[name[i]][constant[web]] name[current_project].dropbox assign[=] call[name[i]][constant[dropbox]] name[current_project].bitbucket assign[=] call[name[i]][constant[bitbucket]] name[current_project].lab_id assign[=] call[name[i]][constant[lab_id]] if compare[call[name[i]][constant[lab_id]] is_not constant[None]] begin[:] for taget[name[k]] in starred[name[fablabs]] begin[:] if compare[call[name[fablabs]][name[k]].id equal[==] call[name[i]][constant[lab_id]]] begin[:] name[current_project].lab assign[=] call[name[fablabs]][name[k]] name[current_project].owner_id assign[=] call[name[i]][constant[owner_id]] name[current_project].created_at assign[=] call[name[i]][constant[created_at]] name[current_project].updated_at assign[=] call[name[i]][constant[updated_at]] name[current_project].vimeo assign[=] call[name[i]][constant[vimeo]] name[current_project].flickr assign[=] call[name[i]][constant[flickr]] name[current_project].youtube assign[=] call[name[i]][constant[youtube]] name[current_project].drive assign[=] call[name[i]][constant[drive]] name[current_project].twitter assign[=] call[name[i]][constant[twitter]] name[current_project].facebook assign[=] call[name[i]][constant[facebook]] name[current_project].googleplus assign[=] call[name[i]][constant[googleplus]] name[current_project].instagram assign[=] call[name[i]][constant[instagram]] name[current_project].status assign[=] call[name[i]][constant[status]] name[current_project].version assign[=] call[name[i]][constant[version]] name[current_project].faq assign[=] call[name[i]][constant[faq]] name[current_project].scope assign[=] call[name[i]][constant[scope]] name[current_project].community assign[=] call[name[i]][constant[community]] name[current_project].lookingfor assign[=] call[name[i]][constant[lookingfor]] name[current_project].cover assign[=] call[name[i]][constant[cover]] variable[url] assign[=] binary_operation[name[project_url] + call[name[str], parameter[name[current_project].id]]] name[current_project].url assign[=] name[url] call[name[projects]][name[current_project].id] assign[=] name[current_project] if <ast.BoolOp object at 0x7da1b168c1c0> begin[:] variable[output] assign[=] dictionary[[], []] for taget[name[j]] in starred[name[projects]] begin[:] variable[project_dict] assign[=] call[name[projects]][name[j]].__dict__ if compare[call[name[project_dict]][constant[lab]] is_not constant[None]] begin[:] call[name[project_dict]][constant[lab]] assign[=] call[name[project_dict]][constant[lab]].__dict__ call[name[output]][name[j]] assign[=] name[project_dict] if compare[call[name[format].lower, parameter[]] equal[==] constant[json]] begin[:] variable[output] assign[=] call[name[json].dumps, parameter[name[output]]] return[name[output]]
keyword[def] identifier[get_projects] ( identifier[format] ): literal[string] identifier[projects_json] = identifier[data_from_fablabs_io] ( identifier[fablabs_io_projects_api_url_v0] ) identifier[projects] ={} identifier[project_url] = literal[string] identifier[fablabs] = identifier[get_labs] ( identifier[format] = literal[string] ) keyword[for] identifier[i] keyword[in] identifier[projects_json] [ literal[string] ]: identifier[i] = identifier[i] [ literal[string] ] identifier[current_project] = identifier[Project] () identifier[current_project] . identifier[id] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[title] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[description] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[github] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[web] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[dropbox] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[bitbucket] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[lab_id] = identifier[i] [ literal[string] ] keyword[if] identifier[i] [ literal[string] ] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[k] keyword[in] identifier[fablabs] : keyword[if] identifier[fablabs] [ identifier[k] ]. identifier[id] == identifier[i] [ literal[string] ]: identifier[current_project] . identifier[lab] = identifier[fablabs] [ identifier[k] ] keyword[else] : identifier[current_project] . identifier[lab] = keyword[None] identifier[current_project] . identifier[owner_id] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[created_at] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[updated_at] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[vimeo] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[flickr] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[youtube] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[drive] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[twitter] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[facebook] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[googleplus] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[instagram] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[status] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[version] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[faq] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[scope] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[community] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[lookingfor] = identifier[i] [ literal[string] ] identifier[current_project] . identifier[cover] = identifier[i] [ literal[string] ] identifier[url] = identifier[project_url] + identifier[str] ( identifier[current_project] . identifier[id] ) identifier[current_project] . identifier[url] = identifier[url] identifier[projects] [ identifier[current_project] . identifier[id] ]= identifier[current_project] keyword[if] identifier[format] . identifier[lower] ()== literal[string] keyword[or] identifier[format] . identifier[lower] ()== literal[string] : identifier[output] ={} keyword[for] identifier[j] keyword[in] identifier[projects] : identifier[project_dict] = identifier[projects] [ identifier[j] ]. identifier[__dict__] keyword[if] identifier[project_dict] [ literal[string] ] keyword[is] keyword[not] keyword[None] : identifier[project_dict] [ literal[string] ]= identifier[project_dict] [ literal[string] ]. identifier[__dict__] identifier[output] [ identifier[j] ]= identifier[project_dict] keyword[elif] identifier[format] . identifier[lower] ()== literal[string] keyword[or] identifier[format] . identifier[lower] ()== literal[string] : identifier[projects_list] =[] keyword[for] identifier[p] keyword[in] identifier[projects] : keyword[if] identifier[projects] [ identifier[p] ]. identifier[lab_id] keyword[is] keyword[not] keyword[None] : identifier[single_project] = identifier[projects] [ identifier[p] ]. identifier[__dict__] keyword[if] identifier[projects] [ identifier[p] ]. identifier[lab] keyword[is] keyword[not] keyword[None] : identifier[single_project] [ literal[string] ]= identifier[single_project] [ literal[string] ]. identifier[__dict__] keyword[for] identifier[l] keyword[in] identifier[fablabs] : identifier[single_lab] = identifier[fablabs] [ identifier[l] ]. identifier[__dict__] keyword[if] identifier[single_lab] [ literal[string] ]== identifier[single_project] [ literal[string] ]: identifier[project_lab] = identifier[Feature] ( identifier[type] = literal[string] , identifier[geometry] = identifier[Point] (( identifier[single_lab] [ literal[string] ], identifier[single_lab] [ literal[string] ])), identifier[properties] = identifier[single_project] ) identifier[projects_list] . identifier[append] ( identifier[project_lab] ) identifier[output] = identifier[dumps] ( identifier[FeatureCollection] ( identifier[projects_list] )) keyword[elif] identifier[format] . identifier[lower] ()== literal[string] keyword[or] identifier[format] . identifier[lower] ()== literal[string] : identifier[output] = identifier[projects] keyword[else] : identifier[output] = identifier[projects] keyword[if] identifier[format] . identifier[lower] ()== literal[string] : identifier[output] = identifier[json] . identifier[dumps] ( identifier[output] ) keyword[return] identifier[output]
def get_projects(format): """Gets projects data from fablabs.io.""" projects_json = data_from_fablabs_io(fablabs_io_projects_api_url_v0) projects = {} project_url = 'https://www.fablabs.io/projects/' fablabs = get_labs(format='object') # Load all the FabLabs for i in projects_json['projects']: i = i['projects'] current_project = Project() current_project.id = i['id'] current_project.title = i['title'] current_project.description = i['description'] current_project.github = i['github'] current_project.web = i['web'] current_project.dropbox = i['dropbox'] current_project.bitbucket = i['bitbucket'] current_project.lab_id = i['lab_id'] # Add the lab of the project if i['lab_id'] is not None: for k in fablabs: if fablabs[k].id == i['lab_id']: current_project.lab = fablabs[k] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]] else: current_project.lab = None current_project.owner_id = i['owner_id'] current_project.created_at = i['created_at'] current_project.updated_at = i['updated_at'] current_project.vimeo = i['vimeo'] current_project.flickr = i['flickr'] current_project.youtube = i['youtube'] current_project.drive = i['drive'] current_project.twitter = i['twitter'] current_project.facebook = i['facebook'] current_project.googleplus = i['googleplus'] current_project.instagram = i['instagram'] current_project.status = i['status'] current_project.version = i['version'] current_project.faq = i['faq'] current_project.scope = i['scope'] current_project.community = i['community'] current_project.lookingfor = i['lookingfor'] current_project.cover = i['cover'] url = project_url + str(current_project.id) current_project.url = url # Add the project projects[current_project.id] = current_project # depends on [control=['for'], data=['i']] # Return a dictiornary / json if format.lower() == 'dict' or format.lower() == 'json': output = {} for j in projects: project_dict = projects[j].__dict__ # Convert the lab from a Fab Lab object to a dict if project_dict['lab'] is not None: project_dict['lab'] = project_dict['lab'].__dict__ # depends on [control=['if'], data=[]] output[j] = project_dict # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]] # Return a geojson, only for projects linked to a lab elif format.lower() == 'geojson' or format.lower() == 'geo': projects_list = [] for p in projects: if projects[p].lab_id is not None: single_project = projects[p].__dict__ if projects[p].lab is not None: single_project['lab'] = single_project['lab'].__dict__ # depends on [control=['if'], data=[]] for l in fablabs: single_lab = fablabs[l].__dict__ if single_lab['id'] == single_project['lab_id']: project_lab = Feature(type='Feature', geometry=Point((single_lab['latitude'], single_lab['longitude'])), properties=single_project) projects_list.append(project_lab) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['l']] output = dumps(FeatureCollection(projects_list)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] # Return an object elif format.lower() == 'object' or format.lower() == 'obj': output = projects # depends on [control=['if'], data=[]] else: # Default: return an object output = projects # Return a proper json if format.lower() == 'json': output = json.dumps(output) # depends on [control=['if'], data=[]] return output
def handle_program_options(): """ Uses the built-in argparse module to handle command-line options for the program. :return: The gathered command-line options specified by the user :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description="Convert Sanger-sequencing \ derived data files for use with the \ metagenomics analysis program QIIME, by \ extracting Sample ID information, adding\ barcodes and primers to the sequence \ data, and outputting a mapping file and\ single FASTA-formatted sequence file \ formed by concatenating all input data.") parser.add_argument('-i', '--input_dir', required=True, help="The directory containing sequence data files. \ Assumes all data files are placed in this \ directory. For files organized within folders by\ sample, use -s in addition.") parser.add_argument('-m', '--map_file', default='map.txt', help="QIIME-formatted mapping file linking Sample IDs \ with barcodes and primers.") parser.add_argument('-o', '--output', default='output.fasta', metavar='OUTPUT_FILE', help="Single file containing all sequence data found \ in input_dir, FASTA-formatted with barcode and \ primer preprended to sequence. If the -q option \ is passed, any quality data will also be output \ to a single file of the same name with a .qual \ extension.") parser.add_argument('-b', '--barcode_length', type=int, default=12, help="Length of the generated barcode sequences. \ Default is 12 (QIIME default), minimum is 8.") parser.add_argument('-q', '--qual', action='store_true', default=False, help="Instruct the program to look for quality \ input files") parser.add_argument('-u', '--utf16', action='store_true', default=False, help="UTF-16 encoded input files") parser.add_argument('-t', '--treatment', help="Inserts an additional column into the mapping \ file specifying some treatment or other variable\ that separates the current set of sequences \ from any other set of seqeunces. For example:\ -t DiseaseState=healthy") # data input options sidGroup = parser.add_mutually_exclusive_group(required=True) sidGroup.add_argument('-d', '--identifier_pattern', action=ValidateIDPattern, nargs=2, metavar=('SEPARATOR', 'FIELD_NUMBER'), help="Indicates how to extract the Sample ID from \ the description line. Specify two things: \ 1. Field separator, 2. Field number of Sample \ ID (1 or greater). If the separator is a space \ or tab, use \s or \\t respectively. \ Example: >ka-SampleID-2091, use -i - 2, \ indicating - is the separator and the Sample ID\ is field #2.") sidGroup.add_argument('-f', '--filename_sample_id', action='store_true', default=False, help='Specify that the program should\ the name of each fasta file as the Sample ID for use\ in the mapping file. This is meant to be used when \ all sequence data for a sample is stored in a single\ file.') return parser.parse_args()
def function[handle_program_options, parameter[]]: constant[ Uses the built-in argparse module to handle command-line options for the program. :return: The gathered command-line options specified by the user :rtype: argparse.ArgumentParser ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[parser].add_argument, parameter[constant[-i], constant[--input_dir]]] call[name[parser].add_argument, parameter[constant[-m], constant[--map_file]]] call[name[parser].add_argument, parameter[constant[-o], constant[--output]]] call[name[parser].add_argument, parameter[constant[-b], constant[--barcode_length]]] call[name[parser].add_argument, parameter[constant[-q], constant[--qual]]] call[name[parser].add_argument, parameter[constant[-u], constant[--utf16]]] call[name[parser].add_argument, parameter[constant[-t], constant[--treatment]]] variable[sidGroup] assign[=] call[name[parser].add_mutually_exclusive_group, parameter[]] call[name[sidGroup].add_argument, parameter[constant[-d], constant[--identifier_pattern]]] call[name[sidGroup].add_argument, parameter[constant[-f], constant[--filename_sample_id]]] return[call[name[parser].parse_args, parameter[]]]
keyword[def] identifier[handle_program_options] (): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] , identifier[metavar] = literal[string] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] ) identifier[sidGroup] = identifier[parser] . identifier[add_mutually_exclusive_group] ( identifier[required] = keyword[True] ) identifier[sidGroup] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = identifier[ValidateIDPattern] , identifier[nargs] = literal[int] , identifier[metavar] =( literal[string] , literal[string] ), identifier[help] = literal[string] ) identifier[sidGroup] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) keyword[return] identifier[parser] . identifier[parse_args] ()
def handle_program_options(): """ Uses the built-in argparse module to handle command-line options for the program. :return: The gathered command-line options specified by the user :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Convert Sanger-sequencing derived data files for use with the metagenomics analysis program QIIME, by extracting Sample ID information, adding barcodes and primers to the sequence data, and outputting a mapping file and single FASTA-formatted sequence file formed by concatenating all input data.') parser.add_argument('-i', '--input_dir', required=True, help='The directory containing sequence data files. Assumes all data files are placed in this directory. For files organized within folders by sample, use -s in addition.') parser.add_argument('-m', '--map_file', default='map.txt', help='QIIME-formatted mapping file linking Sample IDs with barcodes and primers.') parser.add_argument('-o', '--output', default='output.fasta', metavar='OUTPUT_FILE', help='Single file containing all sequence data found in input_dir, FASTA-formatted with barcode and primer preprended to sequence. If the -q option is passed, any quality data will also be output to a single file of the same name with a .qual extension.') parser.add_argument('-b', '--barcode_length', type=int, default=12, help='Length of the generated barcode sequences. Default is 12 (QIIME default), minimum is 8.') parser.add_argument('-q', '--qual', action='store_true', default=False, help='Instruct the program to look for quality input files') parser.add_argument('-u', '--utf16', action='store_true', default=False, help='UTF-16 encoded input files') parser.add_argument('-t', '--treatment', help='Inserts an additional column into the mapping file specifying some treatment or other variable that separates the current set of sequences from any other set of seqeunces. For example: -t DiseaseState=healthy') # data input options sidGroup = parser.add_mutually_exclusive_group(required=True) sidGroup.add_argument('-d', '--identifier_pattern', action=ValidateIDPattern, nargs=2, metavar=('SEPARATOR', 'FIELD_NUMBER'), help='Indicates how to extract the Sample ID from the description line. Specify two things: 1. Field separator, 2. Field number of Sample ID (1 or greater). If the separator is a space or tab, use \\s or \\t respectively. Example: >ka-SampleID-2091, use -i - 2, indicating - is the separator and the Sample ID is field #2.') sidGroup.add_argument('-f', '--filename_sample_id', action='store_true', default=False, help='Specify that the program should the name of each fasta file as the Sample ID for use in the mapping file. This is meant to be used when all sequence data for a sample is stored in a single file.') return parser.parse_args()
def make_tuple(stream, tuple_key, values, roots=None): """Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId`` """ component_name = stream.component_name stream_id = stream.id gen_task = roots[0].taskid if roots is not None and len(roots) > 0 else None return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id, task=gen_task, values=values, creation_time=time.time(), roots=roots)
def function[make_tuple, parameter[stream, tuple_key, values, roots]]: constant[Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId`` ] variable[component_name] assign[=] name[stream].component_name variable[stream_id] assign[=] name[stream].id variable[gen_task] assign[=] <ast.IfExp object at 0x7da18dc065f0> return[call[name[HeronTuple], parameter[]]]
keyword[def] identifier[make_tuple] ( identifier[stream] , identifier[tuple_key] , identifier[values] , identifier[roots] = keyword[None] ): literal[string] identifier[component_name] = identifier[stream] . identifier[component_name] identifier[stream_id] = identifier[stream] . identifier[id] identifier[gen_task] = identifier[roots] [ literal[int] ]. identifier[taskid] keyword[if] identifier[roots] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[roots] )> literal[int] keyword[else] keyword[None] keyword[return] identifier[HeronTuple] ( identifier[id] = identifier[str] ( identifier[tuple_key] ), identifier[component] = identifier[component_name] , identifier[stream] = identifier[stream_id] , identifier[task] = identifier[gen_task] , identifier[values] = identifier[values] , identifier[creation_time] = identifier[time] . identifier[time] (), identifier[roots] = identifier[roots] )
def make_tuple(stream, tuple_key, values, roots=None): """Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId`` """ component_name = stream.component_name stream_id = stream.id gen_task = roots[0].taskid if roots is not None and len(roots) > 0 else None return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id, task=gen_task, values=values, creation_time=time.time(), roots=roots)
def start(self): """Start the firewall.""" self.clear() self.setDefaultPolicy() self.acceptIcmp() self.acceptInput('lo')
def function[start, parameter[self]]: constant[Start the firewall.] call[name[self].clear, parameter[]] call[name[self].setDefaultPolicy, parameter[]] call[name[self].acceptIcmp, parameter[]] call[name[self].acceptInput, parameter[constant[lo]]]
keyword[def] identifier[start] ( identifier[self] ): literal[string] identifier[self] . identifier[clear] () identifier[self] . identifier[setDefaultPolicy] () identifier[self] . identifier[acceptIcmp] () identifier[self] . identifier[acceptInput] ( literal[string] )
def start(self): """Start the firewall.""" self.clear() self.setDefaultPolicy() self.acceptIcmp() self.acceptInput('lo')
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = WebView(self.get_context(), None, d.style)
def function[create_widget, parameter[self]]: constant[ Create the underlying widget. ] variable[d] assign[=] name[self].declaration name[self].widget assign[=] call[name[WebView], parameter[call[name[self].get_context, parameter[]], constant[None], name[d].style]]
keyword[def] identifier[create_widget] ( identifier[self] ): literal[string] identifier[d] = identifier[self] . identifier[declaration] identifier[self] . identifier[widget] = identifier[WebView] ( identifier[self] . identifier[get_context] (), keyword[None] , identifier[d] . identifier[style] )
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = WebView(self.get_context(), None, d.style)
def on_channel_open(self, channel): """ Input channel creation callback Queue declaration done here Args: channel: input channel """ self.in_channel.exchange_declare(exchange='input_exc', type='topic', durable=True) channel.queue_declare(callback=self.on_input_queue_declare, queue=self.INPUT_QUEUE_NAME)
def function[on_channel_open, parameter[self, channel]]: constant[ Input channel creation callback Queue declaration done here Args: channel: input channel ] call[name[self].in_channel.exchange_declare, parameter[]] call[name[channel].queue_declare, parameter[]]
keyword[def] identifier[on_channel_open] ( identifier[self] , identifier[channel] ): literal[string] identifier[self] . identifier[in_channel] . identifier[exchange_declare] ( identifier[exchange] = literal[string] , identifier[type] = literal[string] , identifier[durable] = keyword[True] ) identifier[channel] . identifier[queue_declare] ( identifier[callback] = identifier[self] . identifier[on_input_queue_declare] , identifier[queue] = identifier[self] . identifier[INPUT_QUEUE_NAME] )
def on_channel_open(self, channel): """ Input channel creation callback Queue declaration done here Args: channel: input channel """ self.in_channel.exchange_declare(exchange='input_exc', type='topic', durable=True) channel.queue_declare(callback=self.on_input_queue_declare, queue=self.INPUT_QUEUE_NAME)
def featureclass_to_json(fc): """converts a feature class to JSON""" if arcpyFound == False: raise Exception("ArcPy is required to use this function") desc = arcpy.Describe(fc) if desc.dataType == "Table" or desc.dataType == "TableView": return recordset_to_json(table=fc) else: return arcpy.FeatureSet(fc).JSON
def function[featureclass_to_json, parameter[fc]]: constant[converts a feature class to JSON] if compare[name[arcpyFound] equal[==] constant[False]] begin[:] <ast.Raise object at 0x7da1b124c490> variable[desc] assign[=] call[name[arcpy].Describe, parameter[name[fc]]] if <ast.BoolOp object at 0x7da1b124cc10> begin[:] return[call[name[recordset_to_json], parameter[]]]
keyword[def] identifier[featureclass_to_json] ( identifier[fc] ): literal[string] keyword[if] identifier[arcpyFound] == keyword[False] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[desc] = identifier[arcpy] . identifier[Describe] ( identifier[fc] ) keyword[if] identifier[desc] . identifier[dataType] == literal[string] keyword[or] identifier[desc] . identifier[dataType] == literal[string] : keyword[return] identifier[recordset_to_json] ( identifier[table] = identifier[fc] ) keyword[else] : keyword[return] identifier[arcpy] . identifier[FeatureSet] ( identifier[fc] ). identifier[JSON]
def featureclass_to_json(fc): """converts a feature class to JSON""" if arcpyFound == False: raise Exception('ArcPy is required to use this function') # depends on [control=['if'], data=[]] desc = arcpy.Describe(fc) if desc.dataType == 'Table' or desc.dataType == 'TableView': return recordset_to_json(table=fc) # depends on [control=['if'], data=[]] else: return arcpy.FeatureSet(fc).JSON
def register(config, pconn): """ Do registration using basic auth """ username = config.username password = config.password authmethod = config.authmethod auto_config = config.auto_config if not username and not password and not auto_config and authmethod == 'BASIC': logger.debug('Username and password must be defined in configuration file with BASIC authentication method.') return False return pconn.register()
def function[register, parameter[config, pconn]]: constant[ Do registration using basic auth ] variable[username] assign[=] name[config].username variable[password] assign[=] name[config].password variable[authmethod] assign[=] name[config].authmethod variable[auto_config] assign[=] name[config].auto_config if <ast.BoolOp object at 0x7da20c993310> begin[:] call[name[logger].debug, parameter[constant[Username and password must be defined in configuration file with BASIC authentication method.]]] return[constant[False]] return[call[name[pconn].register, parameter[]]]
keyword[def] identifier[register] ( identifier[config] , identifier[pconn] ): literal[string] identifier[username] = identifier[config] . identifier[username] identifier[password] = identifier[config] . identifier[password] identifier[authmethod] = identifier[config] . identifier[authmethod] identifier[auto_config] = identifier[config] . identifier[auto_config] keyword[if] keyword[not] identifier[username] keyword[and] keyword[not] identifier[password] keyword[and] keyword[not] identifier[auto_config] keyword[and] identifier[authmethod] == literal[string] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] keyword[return] identifier[pconn] . identifier[register] ()
def register(config, pconn): """ Do registration using basic auth """ username = config.username password = config.password authmethod = config.authmethod auto_config = config.auto_config if not username and (not password) and (not auto_config) and (authmethod == 'BASIC'): logger.debug('Username and password must be defined in configuration file with BASIC authentication method.') return False # depends on [control=['if'], data=[]] return pconn.register()
def nvmlDeviceGetName(handle): r""" /** * Retrieves the name of this device. * * For all products. * * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not * exceed 64 characters in length (including the NULL terminator). See \ref * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. * * @param device The identifier of the target device * @param name Reference in which to return the product name * @param length The maximum allowed length of the string returned in \a name * * @return * - \ref NVML_SUCCESS if \a name has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetName """ c_name = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE) fn = _nvmlGetFunctionPointer("nvmlDeviceGetName") ret = fn(handle, c_name, c_uint(NVML_DEVICE_NAME_BUFFER_SIZE)) _nvmlCheckReturn(ret) return bytes_to_str(c_name.value)
def function[nvmlDeviceGetName, parameter[handle]]: constant[ /** * Retrieves the name of this device. * * For all products. * * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not * exceed 64 characters in length (including the NULL terminator). See \ref * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. * * @param device The identifier of the target device * @param name Reference in which to return the product name * @param length The maximum allowed length of the string returned in \a name * * @return * - \ref NVML_SUCCESS if \a name has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetName ] variable[c_name] assign[=] call[name[create_string_buffer], parameter[name[NVML_DEVICE_NAME_BUFFER_SIZE]]] variable[fn] assign[=] call[name[_nvmlGetFunctionPointer], parameter[constant[nvmlDeviceGetName]]] variable[ret] assign[=] call[name[fn], parameter[name[handle], name[c_name], call[name[c_uint], parameter[name[NVML_DEVICE_NAME_BUFFER_SIZE]]]]] call[name[_nvmlCheckReturn], parameter[name[ret]]] return[call[name[bytes_to_str], parameter[name[c_name].value]]]
keyword[def] identifier[nvmlDeviceGetName] ( identifier[handle] ): literal[string] identifier[c_name] = identifier[create_string_buffer] ( identifier[NVML_DEVICE_NAME_BUFFER_SIZE] ) identifier[fn] = identifier[_nvmlGetFunctionPointer] ( literal[string] ) identifier[ret] = identifier[fn] ( identifier[handle] , identifier[c_name] , identifier[c_uint] ( identifier[NVML_DEVICE_NAME_BUFFER_SIZE] )) identifier[_nvmlCheckReturn] ( identifier[ret] ) keyword[return] identifier[bytes_to_str] ( identifier[c_name] . identifier[value] )
def nvmlDeviceGetName(handle): """ /** * Retrieves the name of this device. * * For all products. * * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not * exceed 64 characters in length (including the NULL terminator). See \\ref * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. * * @param device The identifier of the target device * @param name Reference in which to return the product name * @param length The maximum allowed length of the string returned in \\a name * * @return * - \\ref NVML_SUCCESS if \\a name has been set * - \\ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \\ref NVML_ERROR_INVALID_ARGUMENT if \\a device is invalid, or \\a name is NULL * - \\ref NVML_ERROR_INSUFFICIENT_SIZE if \\a length is too small * - \\ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \\ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetName """ c_name = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE) fn = _nvmlGetFunctionPointer('nvmlDeviceGetName') ret = fn(handle, c_name, c_uint(NVML_DEVICE_NAME_BUFFER_SIZE)) _nvmlCheckReturn(ret) return bytes_to_str(c_name.value)
def project_groups(self, key, limit=99999, filter_str=None): """ Get Project Groups :param key: :param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by fixed system limits. Default by built-in method: 99999 :param filter_str: OPTIONAL: group filter string :return: """ url = 'rest/api/1.0/projects/{key}/permissions/groups'.format(key=key) params = {} if limit: params['limit'] = limit if filter_str: params['filter'] = filter_str return (self.get(url, params=params) or {}).get('values')
def function[project_groups, parameter[self, key, limit, filter_str]]: constant[ Get Project Groups :param key: :param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by fixed system limits. Default by built-in method: 99999 :param filter_str: OPTIONAL: group filter string :return: ] variable[url] assign[=] call[constant[rest/api/1.0/projects/{key}/permissions/groups].format, parameter[]] variable[params] assign[=] dictionary[[], []] if name[limit] begin[:] call[name[params]][constant[limit]] assign[=] name[limit] if name[filter_str] begin[:] call[name[params]][constant[filter]] assign[=] name[filter_str] return[call[<ast.BoolOp object at 0x7da18f09c760>.get, parameter[constant[values]]]]
keyword[def] identifier[project_groups] ( identifier[self] , identifier[key] , identifier[limit] = literal[int] , identifier[filter_str] = keyword[None] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[key] = identifier[key] ) identifier[params] ={} keyword[if] identifier[limit] : identifier[params] [ literal[string] ]= identifier[limit] keyword[if] identifier[filter_str] : identifier[params] [ literal[string] ]= identifier[filter_str] keyword[return] ( identifier[self] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] ) keyword[or] {}). identifier[get] ( literal[string] )
def project_groups(self, key, limit=99999, filter_str=None): """ Get Project Groups :param key: :param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by fixed system limits. Default by built-in method: 99999 :param filter_str: OPTIONAL: group filter string :return: """ url = 'rest/api/1.0/projects/{key}/permissions/groups'.format(key=key) params = {} if limit: params['limit'] = limit # depends on [control=['if'], data=[]] if filter_str: params['filter'] = filter_str # depends on [control=['if'], data=[]] return (self.get(url, params=params) or {}).get('values')
def _find_class(self, class_name): "Resolve the class from the name." classes = {} classes.update(globals()) classes.update(self.INSTANCE_CLASSES) logger.debug(f'looking up class: {class_name}') cls = classes[class_name] logger.debug(f'found class: {cls}') return cls
def function[_find_class, parameter[self, class_name]]: constant[Resolve the class from the name.] variable[classes] assign[=] dictionary[[], []] call[name[classes].update, parameter[call[name[globals], parameter[]]]] call[name[classes].update, parameter[name[self].INSTANCE_CLASSES]] call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b0f43bb0>]] variable[cls] assign[=] call[name[classes]][name[class_name]] call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b0f43df0>]] return[name[cls]]
keyword[def] identifier[_find_class] ( identifier[self] , identifier[class_name] ): literal[string] identifier[classes] ={} identifier[classes] . identifier[update] ( identifier[globals] ()) identifier[classes] . identifier[update] ( identifier[self] . identifier[INSTANCE_CLASSES] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[cls] = identifier[classes] [ identifier[class_name] ] identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[cls]
def _find_class(self, class_name): """Resolve the class from the name.""" classes = {} classes.update(globals()) classes.update(self.INSTANCE_CLASSES) logger.debug(f'looking up class: {class_name}') cls = classes[class_name] logger.debug(f'found class: {cls}') return cls
def per_from_id(flavors=chat_flavors+inline_flavors): """ :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the message flavor is in ``flavors``. """ return _wrap_none(lambda msg: msg['from']['id'] if flavors == 'all' or flavor(msg) in flavors else None)
def function[per_from_id, parameter[flavors]]: constant[ :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the message flavor is in ``flavors``. ] return[call[name[_wrap_none], parameter[<ast.Lambda object at 0x7da1b1c7e890>]]]
keyword[def] identifier[per_from_id] ( identifier[flavors] = identifier[chat_flavors] + identifier[inline_flavors] ): literal[string] keyword[return] identifier[_wrap_none] ( keyword[lambda] identifier[msg] : identifier[msg] [ literal[string] ][ literal[string] ] keyword[if] identifier[flavors] == literal[string] keyword[or] identifier[flavor] ( identifier[msg] ) keyword[in] identifier[flavors] keyword[else] keyword[None] )
def per_from_id(flavors=chat_flavors + inline_flavors): """ :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the message flavor is in ``flavors``. """ return _wrap_none(lambda msg: msg['from']['id'] if flavors == 'all' or flavor(msg) in flavors else None)
def __grabHotkey(self, key, modifiers, window): """ Grab a specific hotkey in the given window """ logger.debug("Grabbing hotkey: %r %r", modifiers, key) try: keycode = self.__lookupKeyCode(key) mask = 0 for mod in modifiers: mask |= self.modMasks[mod] window.grab_key(keycode, mask, True, X.GrabModeAsync, X.GrabModeAsync) if Key.NUMLOCK in self.modMasks: window.grab_key(keycode, mask|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync) if Key.CAPSLOCK in self.modMasks: window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK], True, X.GrabModeAsync, X.GrabModeAsync) if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks: window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync) except Exception as e: logger.warning("Failed to grab hotkey %r %r: %s", modifiers, key, str(e))
def function[__grabHotkey, parameter[self, key, modifiers, window]]: constant[ Grab a specific hotkey in the given window ] call[name[logger].debug, parameter[constant[Grabbing hotkey: %r %r], name[modifiers], name[key]]] <ast.Try object at 0x7da18dc9beb0>
keyword[def] identifier[__grabHotkey] ( identifier[self] , identifier[key] , identifier[modifiers] , identifier[window] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] , identifier[modifiers] , identifier[key] ) keyword[try] : identifier[keycode] = identifier[self] . identifier[__lookupKeyCode] ( identifier[key] ) identifier[mask] = literal[int] keyword[for] identifier[mod] keyword[in] identifier[modifiers] : identifier[mask] |= identifier[self] . identifier[modMasks] [ identifier[mod] ] identifier[window] . identifier[grab_key] ( identifier[keycode] , identifier[mask] , keyword[True] , identifier[X] . identifier[GrabModeAsync] , identifier[X] . identifier[GrabModeAsync] ) keyword[if] identifier[Key] . identifier[NUMLOCK] keyword[in] identifier[self] . identifier[modMasks] : identifier[window] . identifier[grab_key] ( identifier[keycode] , identifier[mask] | identifier[self] . identifier[modMasks] [ identifier[Key] . identifier[NUMLOCK] ], keyword[True] , identifier[X] . identifier[GrabModeAsync] , identifier[X] . identifier[GrabModeAsync] ) keyword[if] identifier[Key] . identifier[CAPSLOCK] keyword[in] identifier[self] . identifier[modMasks] : identifier[window] . identifier[grab_key] ( identifier[keycode] , identifier[mask] | identifier[self] . identifier[modMasks] [ identifier[Key] . identifier[CAPSLOCK] ], keyword[True] , identifier[X] . identifier[GrabModeAsync] , identifier[X] . identifier[GrabModeAsync] ) keyword[if] identifier[Key] . identifier[CAPSLOCK] keyword[in] identifier[self] . identifier[modMasks] keyword[and] identifier[Key] . identifier[NUMLOCK] keyword[in] identifier[self] . identifier[modMasks] : identifier[window] . identifier[grab_key] ( identifier[keycode] , identifier[mask] | identifier[self] . identifier[modMasks] [ identifier[Key] . identifier[CAPSLOCK] ]| identifier[self] . identifier[modMasks] [ identifier[Key] . identifier[NUMLOCK] ], keyword[True] , identifier[X] . identifier[GrabModeAsync] , identifier[X] . identifier[GrabModeAsync] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[warning] ( literal[string] , identifier[modifiers] , identifier[key] , identifier[str] ( identifier[e] ))
def __grabHotkey(self, key, modifiers, window): """ Grab a specific hotkey in the given window """ logger.debug('Grabbing hotkey: %r %r', modifiers, key) try: keycode = self.__lookupKeyCode(key) mask = 0 for mod in modifiers: mask |= self.modMasks[mod] # depends on [control=['for'], data=['mod']] window.grab_key(keycode, mask, True, X.GrabModeAsync, X.GrabModeAsync) if Key.NUMLOCK in self.modMasks: window.grab_key(keycode, mask | self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync) # depends on [control=['if'], data=[]] if Key.CAPSLOCK in self.modMasks: window.grab_key(keycode, mask | self.modMasks[Key.CAPSLOCK], True, X.GrabModeAsync, X.GrabModeAsync) # depends on [control=['if'], data=[]] if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks: window.grab_key(keycode, mask | self.modMasks[Key.CAPSLOCK] | self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: logger.warning('Failed to grab hotkey %r %r: %s', modifiers, key, str(e)) # depends on [control=['except'], data=['e']]
def _rottrip(ang: np.ndarray) -> np.ndarray: """ transformation matrix Parameters ---------- ang : N x 3 numpy.ndarray angle to transform (radians) """ ang = ang.squeeze() if ang.size > 1: raise ValueError('only one angle allowed at a time') return np.array([[np.cos(ang), np.sin(ang), 0], [-np.sin(ang), np.cos(ang), 0], [0, 0, 1]])
def function[_rottrip, parameter[ang]]: constant[ transformation matrix Parameters ---------- ang : N x 3 numpy.ndarray angle to transform (radians) ] variable[ang] assign[=] call[name[ang].squeeze, parameter[]] if compare[name[ang].size greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b1295240> return[call[name[np].array, parameter[list[[<ast.List object at 0x7da1b1295210>, <ast.List object at 0x7da1b1295000>, <ast.List object at 0x7da1b1294be0>]]]]]
keyword[def] identifier[_rottrip] ( identifier[ang] : identifier[np] . identifier[ndarray] )-> identifier[np] . identifier[ndarray] : literal[string] identifier[ang] = identifier[ang] . identifier[squeeze] () keyword[if] identifier[ang] . identifier[size] > literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[np] . identifier[array] ([[ identifier[np] . identifier[cos] ( identifier[ang] ), identifier[np] . identifier[sin] ( identifier[ang] ), literal[int] ], [- identifier[np] . identifier[sin] ( identifier[ang] ), identifier[np] . identifier[cos] ( identifier[ang] ), literal[int] ], [ literal[int] , literal[int] , literal[int] ]])
def _rottrip(ang: np.ndarray) -> np.ndarray: """ transformation matrix Parameters ---------- ang : N x 3 numpy.ndarray angle to transform (radians) """ ang = ang.squeeze() if ang.size > 1: raise ValueError('only one angle allowed at a time') # depends on [control=['if'], data=[]] return np.array([[np.cos(ang), np.sin(ang), 0], [-np.sin(ang), np.cos(ang), 0], [0, 0, 1]])
def current_snapshot(id_or_symbol): """ 获得当前市场快照数据。只能在日内交易阶段调用,获取当日调用时点的市场快照数据。 市场快照数据记录了每日从开盘到当前的数据信息,可以理解为一个动态的day bar数据。 在目前分钟回测中,快照数据为当日所有分钟线累积而成,一般情况下,最后一个分钟线获取到的快照数据应当与当日的日线行情保持一致。 需要注意,在实盘模拟中,该函数返回的是调用当时的市场快照情况,所以在同一个handle_bar中不同时点调用可能返回的数据不同。 如果当日截止到调用时候对应股票没有任何成交,那么snapshot中的close, high, low, last几个价格水平都将以0表示。 :param str id_or_symbol: 合约代码或简称 :return: :class:`~Snapshot` :example: 在handle_bar中调用该函数,假设策略当前时间是20160104 09:33: .. code-block:: python3 :linenos: [In] logger.info(current_snapshot('000001.XSHE')) [Out] 2016-01-04 09:33:00.00 INFO Snapshot(order_book_id: '000001.XSHE', datetime: datetime.datetime(2016, 1, 4, 9, 33), open: 10.0, high: 10.025, low: 9.9667, last: 9.9917, volume: 2050320, total_turnover: 20485195, prev_close: 9.99) """ env = Environment.get_instance() frequency = env.config.base.frequency order_book_id = assure_order_book_id(id_or_symbol) dt = env.calendar_dt if env.config.base.run_type == RUN_TYPE.BACKTEST: if ExecutionContext.phase() == EXECUTION_PHASE.BEFORE_TRADING: dt = env.data_proxy.get_previous_trading_date(env.trading_dt.date()) return env.data_proxy.current_snapshot(order_book_id, "1d", dt) elif ExecutionContext.phase() == EXECUTION_PHASE.AFTER_TRADING: return env.data_proxy.current_snapshot(order_book_id, "1d", dt) # PT、实盘直接取最新快照,忽略 frequency, dt 参数 return env.data_proxy.current_snapshot(order_book_id, frequency, dt)
def function[current_snapshot, parameter[id_or_symbol]]: constant[ 获得当前市场快照数据。只能在日内交易阶段调用,获取当日调用时点的市场快照数据。 市场快照数据记录了每日从开盘到当前的数据信息,可以理解为一个动态的day bar数据。 在目前分钟回测中,快照数据为当日所有分钟线累积而成,一般情况下,最后一个分钟线获取到的快照数据应当与当日的日线行情保持一致。 需要注意,在实盘模拟中,该函数返回的是调用当时的市场快照情况,所以在同一个handle_bar中不同时点调用可能返回的数据不同。 如果当日截止到调用时候对应股票没有任何成交,那么snapshot中的close, high, low, last几个价格水平都将以0表示。 :param str id_or_symbol: 合约代码或简称 :return: :class:`~Snapshot` :example: 在handle_bar中调用该函数,假设策略当前时间是20160104 09:33: .. code-block:: python3 :linenos: [In] logger.info(current_snapshot('000001.XSHE')) [Out] 2016-01-04 09:33:00.00 INFO Snapshot(order_book_id: '000001.XSHE', datetime: datetime.datetime(2016, 1, 4, 9, 33), open: 10.0, high: 10.025, low: 9.9667, last: 9.9917, volume: 2050320, total_turnover: 20485195, prev_close: 9.99) ] variable[env] assign[=] call[name[Environment].get_instance, parameter[]] variable[frequency] assign[=] name[env].config.base.frequency variable[order_book_id] assign[=] call[name[assure_order_book_id], parameter[name[id_or_symbol]]] variable[dt] assign[=] name[env].calendar_dt if compare[name[env].config.base.run_type equal[==] name[RUN_TYPE].BACKTEST] begin[:] if compare[call[name[ExecutionContext].phase, parameter[]] equal[==] name[EXECUTION_PHASE].BEFORE_TRADING] begin[:] variable[dt] assign[=] call[name[env].data_proxy.get_previous_trading_date, parameter[call[name[env].trading_dt.date, parameter[]]]] return[call[name[env].data_proxy.current_snapshot, parameter[name[order_book_id], constant[1d], name[dt]]]] return[call[name[env].data_proxy.current_snapshot, parameter[name[order_book_id], name[frequency], name[dt]]]]
keyword[def] identifier[current_snapshot] ( identifier[id_or_symbol] ): literal[string] identifier[env] = identifier[Environment] . identifier[get_instance] () identifier[frequency] = identifier[env] . identifier[config] . identifier[base] . identifier[frequency] identifier[order_book_id] = identifier[assure_order_book_id] ( identifier[id_or_symbol] ) identifier[dt] = identifier[env] . identifier[calendar_dt] keyword[if] identifier[env] . identifier[config] . identifier[base] . identifier[run_type] == identifier[RUN_TYPE] . identifier[BACKTEST] : keyword[if] identifier[ExecutionContext] . identifier[phase] ()== identifier[EXECUTION_PHASE] . identifier[BEFORE_TRADING] : identifier[dt] = identifier[env] . identifier[data_proxy] . identifier[get_previous_trading_date] ( identifier[env] . identifier[trading_dt] . identifier[date] ()) keyword[return] identifier[env] . identifier[data_proxy] . identifier[current_snapshot] ( identifier[order_book_id] , literal[string] , identifier[dt] ) keyword[elif] identifier[ExecutionContext] . identifier[phase] ()== identifier[EXECUTION_PHASE] . identifier[AFTER_TRADING] : keyword[return] identifier[env] . identifier[data_proxy] . identifier[current_snapshot] ( identifier[order_book_id] , literal[string] , identifier[dt] ) keyword[return] identifier[env] . identifier[data_proxy] . identifier[current_snapshot] ( identifier[order_book_id] , identifier[frequency] , identifier[dt] )
def current_snapshot(id_or_symbol): """ 获得当前市场快照数据。只能在日内交易阶段调用,获取当日调用时点的市场快照数据。 市场快照数据记录了每日从开盘到当前的数据信息,可以理解为一个动态的day bar数据。 在目前分钟回测中,快照数据为当日所有分钟线累积而成,一般情况下,最后一个分钟线获取到的快照数据应当与当日的日线行情保持一致。 需要注意,在实盘模拟中,该函数返回的是调用当时的市场快照情况,所以在同一个handle_bar中不同时点调用可能返回的数据不同。 如果当日截止到调用时候对应股票没有任何成交,那么snapshot中的close, high, low, last几个价格水平都将以0表示。 :param str id_or_symbol: 合约代码或简称 :return: :class:`~Snapshot` :example: 在handle_bar中调用该函数,假设策略当前时间是20160104 09:33: .. code-block:: python3 :linenos: [In] logger.info(current_snapshot('000001.XSHE')) [Out] 2016-01-04 09:33:00.00 INFO Snapshot(order_book_id: '000001.XSHE', datetime: datetime.datetime(2016, 1, 4, 9, 33), open: 10.0, high: 10.025, low: 9.9667, last: 9.9917, volume: 2050320, total_turnover: 20485195, prev_close: 9.99) """ env = Environment.get_instance() frequency = env.config.base.frequency order_book_id = assure_order_book_id(id_or_symbol) dt = env.calendar_dt if env.config.base.run_type == RUN_TYPE.BACKTEST: if ExecutionContext.phase() == EXECUTION_PHASE.BEFORE_TRADING: dt = env.data_proxy.get_previous_trading_date(env.trading_dt.date()) return env.data_proxy.current_snapshot(order_book_id, '1d', dt) # depends on [control=['if'], data=[]] elif ExecutionContext.phase() == EXECUTION_PHASE.AFTER_TRADING: return env.data_proxy.current_snapshot(order_book_id, '1d', dt) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # PT、实盘直接取最新快照,忽略 frequency, dt 参数 return env.data_proxy.current_snapshot(order_book_id, frequency, dt)
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) """ Y = np.array(self.data[self.max_lag:]) sample = np.random.choice(len(Y), mini_batch, replace=False) Y = Y[sample] X = self.X[:, sample] # Transform latent variables z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) bias = z[self.latent_variables.z_indices['Bias']['start']:self.latent_variables.z_indices['Bias']['end']+1] bias = np.reshape(bias, (-1, self.units)) output_bias = z[self.latent_variables.z_indices['Output bias']['start']] input_weights = z[self.latent_variables.z_indices['Input weight']['start']:self.latent_variables.z_indices['Input weight']['end']+1] input_weights = np.reshape(input_weights, (-1, self.units)) output_weights = z[self.latent_variables.z_indices['Output weight']['start']:self.latent_variables.z_indices['Output weight']['end']+1] # Construct neural network h = self.activation(X.T.dot(input_weights) + bias[0]) if self.layers > 1: hidden_weights = z[self.latent_variables.z_indices['Hidden weight']['start']:self.latent_variables.z_indices['Hidden weight']['end']+1] hidden_weights = np.reshape(hidden_weights, (self.layers-1, self.units, -1)) for k in range(0, self.layers-1): h = self.activation(h.dot(hidden_weights[k]) + bias[1+k]) return h.dot(output_weights) + output_bias, Y
def function[_mb_model, parameter[self, beta, mini_batch]]: constant[ Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) ] variable[Y] assign[=] call[name[np].array, parameter[call[name[self].data][<ast.Slice object at 0x7da2044c0d60>]]] variable[sample] assign[=] call[name[np].random.choice, parameter[call[name[len], parameter[name[Y]]], name[mini_batch]]] variable[Y] assign[=] call[name[Y]][name[sample]] variable[X] assign[=] call[name[self].X][tuple[[<ast.Slice object at 0x7da2044c3e20>, <ast.Name object at 0x7da2044c27a0>]]] variable[z] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da2044c2c80>]] variable[bias] assign[=] call[name[z]][<ast.Slice object at 0x7da20c6c6470>] variable[bias] assign[=] call[name[np].reshape, parameter[name[bias], tuple[[<ast.UnaryOp object at 0x7da20c6c5d20>, <ast.Attribute object at 0x7da20c6c4e20>]]]] variable[output_bias] assign[=] call[name[z]][call[call[name[self].latent_variables.z_indices][constant[Output bias]]][constant[start]]] variable[input_weights] assign[=] call[name[z]][<ast.Slice object at 0x7da20c6c5420>] variable[input_weights] assign[=] call[name[np].reshape, parameter[name[input_weights], tuple[[<ast.UnaryOp object at 0x7da20c6c4a30>, <ast.Attribute object at 0x7da20cabf730>]]]] variable[output_weights] assign[=] call[name[z]][<ast.Slice object at 0x7da20cabf1c0>] variable[h] assign[=] call[name[self].activation, parameter[binary_operation[call[name[X].T.dot, parameter[name[input_weights]]] + call[name[bias]][constant[0]]]]] if compare[name[self].layers greater[>] constant[1]] begin[:] variable[hidden_weights] assign[=] call[name[z]][<ast.Slice object at 0x7da18f00d840>] variable[hidden_weights] assign[=] call[name[np].reshape, parameter[name[hidden_weights], tuple[[<ast.BinOp object at 0x7da18f00d9f0>, <ast.Attribute object at 0x7da18f00fd30>, <ast.UnaryOp object at 0x7da18f00d600>]]]] for taget[name[k]] in starred[call[name[range], parameter[constant[0], binary_operation[name[self].layers - constant[1]]]]] begin[:] variable[h] assign[=] call[name[self].activation, parameter[binary_operation[call[name[h].dot, parameter[call[name[hidden_weights]][name[k]]]] + call[name[bias]][binary_operation[constant[1] + name[k]]]]]] return[tuple[[<ast.BinOp object at 0x7da18f00e980>, <ast.Name object at 0x7da18f00de70>]]]
keyword[def] identifier[_mb_model] ( identifier[self] , identifier[beta] , identifier[mini_batch] ): literal[string] identifier[Y] = identifier[np] . identifier[array] ( identifier[self] . identifier[data] [ identifier[self] . identifier[max_lag] :]) identifier[sample] = identifier[np] . identifier[random] . identifier[choice] ( identifier[len] ( identifier[Y] ), identifier[mini_batch] , identifier[replace] = keyword[False] ) identifier[Y] = identifier[Y] [ identifier[sample] ] identifier[X] = identifier[self] . identifier[X] [:, identifier[sample] ] identifier[z] = identifier[np] . identifier[array] ([ identifier[self] . identifier[latent_variables] . identifier[z_list] [ identifier[k] ]. identifier[prior] . identifier[transform] ( identifier[beta] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[beta] . identifier[shape] [ literal[int] ])]) identifier[bias] = identifier[z] [ identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]: identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]+ literal[int] ] identifier[bias] = identifier[np] . identifier[reshape] ( identifier[bias] ,(- literal[int] , identifier[self] . identifier[units] )) identifier[output_bias] = identifier[z] [ identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]] identifier[input_weights] = identifier[z] [ identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]: identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]+ literal[int] ] identifier[input_weights] = identifier[np] . identifier[reshape] ( identifier[input_weights] ,(- literal[int] , identifier[self] . identifier[units] )) identifier[output_weights] = identifier[z] [ identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]: identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]+ literal[int] ] identifier[h] = identifier[self] . identifier[activation] ( identifier[X] . identifier[T] . identifier[dot] ( identifier[input_weights] )+ identifier[bias] [ literal[int] ]) keyword[if] identifier[self] . identifier[layers] > literal[int] : identifier[hidden_weights] = identifier[z] [ identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]: identifier[self] . identifier[latent_variables] . identifier[z_indices] [ literal[string] ][ literal[string] ]+ literal[int] ] identifier[hidden_weights] = identifier[np] . identifier[reshape] ( identifier[hidden_weights] ,( identifier[self] . identifier[layers] - literal[int] , identifier[self] . identifier[units] ,- literal[int] )) keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[layers] - literal[int] ): identifier[h] = identifier[self] . identifier[activation] ( identifier[h] . identifier[dot] ( identifier[hidden_weights] [ identifier[k] ])+ identifier[bias] [ literal[int] + identifier[k] ]) keyword[return] identifier[h] . identifier[dot] ( identifier[output_weights] )+ identifier[output_bias] , identifier[Y]
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) """ Y = np.array(self.data[self.max_lag:]) sample = np.random.choice(len(Y), mini_batch, replace=False) Y = Y[sample] X = self.X[:, sample] # Transform latent variables z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) bias = z[self.latent_variables.z_indices['Bias']['start']:self.latent_variables.z_indices['Bias']['end'] + 1] bias = np.reshape(bias, (-1, self.units)) output_bias = z[self.latent_variables.z_indices['Output bias']['start']] input_weights = z[self.latent_variables.z_indices['Input weight']['start']:self.latent_variables.z_indices['Input weight']['end'] + 1] input_weights = np.reshape(input_weights, (-1, self.units)) output_weights = z[self.latent_variables.z_indices['Output weight']['start']:self.latent_variables.z_indices['Output weight']['end'] + 1] # Construct neural network h = self.activation(X.T.dot(input_weights) + bias[0]) if self.layers > 1: hidden_weights = z[self.latent_variables.z_indices['Hidden weight']['start']:self.latent_variables.z_indices['Hidden weight']['end'] + 1] hidden_weights = np.reshape(hidden_weights, (self.layers - 1, self.units, -1)) for k in range(0, self.layers - 1): h = self.activation(h.dot(hidden_weights[k]) + bias[1 + k]) # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]] return (h.dot(output_weights) + output_bias, Y)
def adsorb_both_surfaces(self, molecule, repeat=None, min_lw=5.0, reorient=True, find_args={}): """ Function that generates all adsorption structures for a given molecular adsorbate on both surfaces of a slab. This is useful for calculating surface energy where both surfaces need to be equivalent or if we want to calculate nonpolar systems. Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0} """ # Get the adsorbed surfaces first adslabs = self.generate_adsorption_structures(molecule, repeat=repeat, min_lw=min_lw, reorient=reorient, find_args=find_args) new_adslabs = [] for adslab in adslabs: # Find the adsorbate sites and indices in each slab symmetric, adsorbates, indices = False, [], [] for i, site in enumerate(adslab.sites): if site.surface_properties == "adsorbate": adsorbates.append(site) indices.append(i) # Start with the clean slab adslab.remove_sites(indices) slab = adslab.copy() # For each site, we add it back to the slab along with a # symmetrically equivalent position on the other side of # the slab using symmetry operations for adsorbate in adsorbates: p2 = adslab.get_symmetric_site(adsorbate.frac_coords) slab.append(adsorbate.specie, p2, properties={"surface_properties": "adsorbate"}) slab.append(adsorbate.specie, adsorbate.frac_coords, properties={"surface_properties": "adsorbate"}) new_adslabs.append(slab) return new_adslabs
def function[adsorb_both_surfaces, parameter[self, molecule, repeat, min_lw, reorient, find_args]]: constant[ Function that generates all adsorption structures for a given molecular adsorbate on both surfaces of a slab. This is useful for calculating surface energy where both surfaces need to be equivalent or if we want to calculate nonpolar systems. Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0} ] variable[adslabs] assign[=] call[name[self].generate_adsorption_structures, parameter[name[molecule]]] variable[new_adslabs] assign[=] list[[]] for taget[name[adslab]] in starred[name[adslabs]] begin[:] <ast.Tuple object at 0x7da2047e9060> assign[=] tuple[[<ast.Constant object at 0x7da2047ea470>, <ast.List object at 0x7da2047e8580>, <ast.List object at 0x7da2047e92d0>]] for taget[tuple[[<ast.Name object at 0x7da2047e9780>, <ast.Name object at 0x7da2047e8850>]]] in starred[call[name[enumerate], parameter[name[adslab].sites]]] begin[:] if compare[name[site].surface_properties equal[==] constant[adsorbate]] begin[:] call[name[adsorbates].append, parameter[name[site]]] call[name[indices].append, parameter[name[i]]] call[name[adslab].remove_sites, parameter[name[indices]]] variable[slab] assign[=] call[name[adslab].copy, parameter[]] for taget[name[adsorbate]] in starred[name[adsorbates]] begin[:] variable[p2] assign[=] call[name[adslab].get_symmetric_site, parameter[name[adsorbate].frac_coords]] call[name[slab].append, parameter[name[adsorbate].specie, name[p2]]] call[name[slab].append, parameter[name[adsorbate].specie, name[adsorbate].frac_coords]] call[name[new_adslabs].append, parameter[name[slab]]] return[name[new_adslabs]]
keyword[def] identifier[adsorb_both_surfaces] ( identifier[self] , identifier[molecule] , identifier[repeat] = keyword[None] , identifier[min_lw] = literal[int] , identifier[reorient] = keyword[True] , identifier[find_args] ={}): literal[string] identifier[adslabs] = identifier[self] . identifier[generate_adsorption_structures] ( identifier[molecule] , identifier[repeat] = identifier[repeat] , identifier[min_lw] = identifier[min_lw] , identifier[reorient] = identifier[reorient] , identifier[find_args] = identifier[find_args] ) identifier[new_adslabs] =[] keyword[for] identifier[adslab] keyword[in] identifier[adslabs] : identifier[symmetric] , identifier[adsorbates] , identifier[indices] = keyword[False] ,[],[] keyword[for] identifier[i] , identifier[site] keyword[in] identifier[enumerate] ( identifier[adslab] . identifier[sites] ): keyword[if] identifier[site] . identifier[surface_properties] == literal[string] : identifier[adsorbates] . identifier[append] ( identifier[site] ) identifier[indices] . identifier[append] ( identifier[i] ) identifier[adslab] . identifier[remove_sites] ( identifier[indices] ) identifier[slab] = identifier[adslab] . identifier[copy] () keyword[for] identifier[adsorbate] keyword[in] identifier[adsorbates] : identifier[p2] = identifier[adslab] . identifier[get_symmetric_site] ( identifier[adsorbate] . identifier[frac_coords] ) identifier[slab] . identifier[append] ( identifier[adsorbate] . identifier[specie] , identifier[p2] , identifier[properties] ={ literal[string] : literal[string] }) identifier[slab] . identifier[append] ( identifier[adsorbate] . identifier[specie] , identifier[adsorbate] . identifier[frac_coords] , identifier[properties] ={ literal[string] : literal[string] }) identifier[new_adslabs] . identifier[append] ( identifier[slab] ) keyword[return] identifier[new_adslabs]
def adsorb_both_surfaces(self, molecule, repeat=None, min_lw=5.0, reorient=True, find_args={}): """ Function that generates all adsorption structures for a given molecular adsorbate on both surfaces of a slab. This is useful for calculating surface energy where both surfaces need to be equivalent or if we want to calculate nonpolar systems. Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0} """ # Get the adsorbed surfaces first adslabs = self.generate_adsorption_structures(molecule, repeat=repeat, min_lw=min_lw, reorient=reorient, find_args=find_args) new_adslabs = [] for adslab in adslabs: # Find the adsorbate sites and indices in each slab (symmetric, adsorbates, indices) = (False, [], []) for (i, site) in enumerate(adslab.sites): if site.surface_properties == 'adsorbate': adsorbates.append(site) indices.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # Start with the clean slab adslab.remove_sites(indices) slab = adslab.copy() # For each site, we add it back to the slab along with a # symmetrically equivalent position on the other side of # the slab using symmetry operations for adsorbate in adsorbates: p2 = adslab.get_symmetric_site(adsorbate.frac_coords) slab.append(adsorbate.specie, p2, properties={'surface_properties': 'adsorbate'}) slab.append(adsorbate.specie, adsorbate.frac_coords, properties={'surface_properties': 'adsorbate'}) # depends on [control=['for'], data=['adsorbate']] new_adslabs.append(slab) # depends on [control=['for'], data=['adslab']] return new_adslabs