code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def decode_date(self, val): """Tries to decode strings that look like dates into datetime objects.""" if isinstance(val, basestring) and val.count('-') == 2 and len(val) > 9: try: dt = dateutil.parser.parse(val) # Check for UTC. if val.endswith(('+00:00', '-00:00', 'Z')): # Then remove tzinfo for gae, which is offset-naive. dt = dt.replace(tzinfo=None) return dt except (TypeError, ValueError): pass return val
def function[decode_date, parameter[self, val]]: constant[Tries to decode strings that look like dates into datetime objects.] if <ast.BoolOp object at 0x7da1b02097e0> begin[:] <ast.Try object at 0x7da1b0209ea0> return[name[val]]
keyword[def] identifier[decode_date] ( identifier[self] , identifier[val] ): literal[string] keyword[if] identifier[isinstance] ( identifier[val] , identifier[basestring] ) keyword[and] identifier[val] . identifier[count] ( literal[string] )== literal[int] keyword[and] identifier[len] ( identifier[val] )> literal[int] : keyword[try] : identifier[dt] = identifier[dateutil] . identifier[parser] . identifier[parse] ( identifier[val] ) keyword[if] identifier[val] . identifier[endswith] (( literal[string] , literal[string] , literal[string] )): identifier[dt] = identifier[dt] . identifier[replace] ( identifier[tzinfo] = keyword[None] ) keyword[return] identifier[dt] keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[pass] keyword[return] identifier[val]
def decode_date(self, val): """Tries to decode strings that look like dates into datetime objects.""" if isinstance(val, basestring) and val.count('-') == 2 and (len(val) > 9): try: dt = dateutil.parser.parse(val) # Check for UTC. if val.endswith(('+00:00', '-00:00', 'Z')): # Then remove tzinfo for gae, which is offset-naive. dt = dt.replace(tzinfo=None) # depends on [control=['if'], data=[]] return dt # depends on [control=['try'], data=[]] except (TypeError, ValueError): pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return val
def write_toml(self, data, path=None): """Writes the given data structure out as TOML.""" if path is None: path = self.pipfile_location data = convert_toml_outline_tables(data) try: formatted_data = tomlkit.dumps(data).rstrip() except Exception: document = tomlkit.document() for section in ("packages", "dev-packages"): document[section] = tomlkit.container.Table() # Convert things to inline tables — fancy :) for package in data.get(section, {}): if hasattr(data[section][package], "keys"): table = tomlkit.inline_table() table.update(data[section][package]) document[section][package] = table else: document[section][package] = tomlkit.string(data[section][package]) formatted_data = tomlkit.dumps(document).rstrip() if ( vistir.compat.Path(path).absolute() == vistir.compat.Path(self.pipfile_location).absolute() ): newlines = self._pipfile_newlines else: newlines = DEFAULT_NEWLINES formatted_data = cleanup_toml(formatted_data) with io.open(path, "w", newline=newlines) as f: f.write(formatted_data) # pipfile is mutated! self.clear_pipfile_cache()
def function[write_toml, parameter[self, data, path]]: constant[Writes the given data structure out as TOML.] if compare[name[path] is constant[None]] begin[:] variable[path] assign[=] name[self].pipfile_location variable[data] assign[=] call[name[convert_toml_outline_tables], parameter[name[data]]] <ast.Try object at 0x7da1b1e8eda0> if compare[call[call[name[vistir].compat.Path, parameter[name[path]]].absolute, parameter[]] equal[==] call[call[name[vistir].compat.Path, parameter[name[self].pipfile_location]].absolute, parameter[]]] begin[:] variable[newlines] assign[=] name[self]._pipfile_newlines variable[formatted_data] assign[=] call[name[cleanup_toml], parameter[name[formatted_data]]] with call[name[io].open, parameter[name[path], constant[w]]] begin[:] call[name[f].write, parameter[name[formatted_data]]] call[name[self].clear_pipfile_cache, parameter[]]
keyword[def] identifier[write_toml] ( identifier[self] , identifier[data] , identifier[path] = keyword[None] ): literal[string] keyword[if] identifier[path] keyword[is] keyword[None] : identifier[path] = identifier[self] . identifier[pipfile_location] identifier[data] = identifier[convert_toml_outline_tables] ( identifier[data] ) keyword[try] : identifier[formatted_data] = identifier[tomlkit] . identifier[dumps] ( identifier[data] ). identifier[rstrip] () keyword[except] identifier[Exception] : identifier[document] = identifier[tomlkit] . identifier[document] () keyword[for] identifier[section] keyword[in] ( literal[string] , literal[string] ): identifier[document] [ identifier[section] ]= identifier[tomlkit] . identifier[container] . identifier[Table] () keyword[for] identifier[package] keyword[in] identifier[data] . identifier[get] ( identifier[section] ,{}): keyword[if] identifier[hasattr] ( identifier[data] [ identifier[section] ][ identifier[package] ], literal[string] ): identifier[table] = identifier[tomlkit] . identifier[inline_table] () identifier[table] . identifier[update] ( identifier[data] [ identifier[section] ][ identifier[package] ]) identifier[document] [ identifier[section] ][ identifier[package] ]= identifier[table] keyword[else] : identifier[document] [ identifier[section] ][ identifier[package] ]= identifier[tomlkit] . identifier[string] ( identifier[data] [ identifier[section] ][ identifier[package] ]) identifier[formatted_data] = identifier[tomlkit] . identifier[dumps] ( identifier[document] ). identifier[rstrip] () keyword[if] ( identifier[vistir] . identifier[compat] . identifier[Path] ( identifier[path] ). identifier[absolute] () == identifier[vistir] . identifier[compat] . identifier[Path] ( identifier[self] . identifier[pipfile_location] ). identifier[absolute] () ): identifier[newlines] = identifier[self] . identifier[_pipfile_newlines] keyword[else] : identifier[newlines] = identifier[DEFAULT_NEWLINES] identifier[formatted_data] = identifier[cleanup_toml] ( identifier[formatted_data] ) keyword[with] identifier[io] . identifier[open] ( identifier[path] , literal[string] , identifier[newline] = identifier[newlines] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[formatted_data] ) identifier[self] . identifier[clear_pipfile_cache] ()
def write_toml(self, data, path=None): """Writes the given data structure out as TOML.""" if path is None: path = self.pipfile_location # depends on [control=['if'], data=['path']] data = convert_toml_outline_tables(data) try: formatted_data = tomlkit.dumps(data).rstrip() # depends on [control=['try'], data=[]] except Exception: document = tomlkit.document() for section in ('packages', 'dev-packages'): document[section] = tomlkit.container.Table() # Convert things to inline tables — fancy :) for package in data.get(section, {}): if hasattr(data[section][package], 'keys'): table = tomlkit.inline_table() table.update(data[section][package]) document[section][package] = table # depends on [control=['if'], data=[]] else: document[section][package] = tomlkit.string(data[section][package]) # depends on [control=['for'], data=['package']] # depends on [control=['for'], data=['section']] formatted_data = tomlkit.dumps(document).rstrip() # depends on [control=['except'], data=[]] if vistir.compat.Path(path).absolute() == vistir.compat.Path(self.pipfile_location).absolute(): newlines = self._pipfile_newlines # depends on [control=['if'], data=[]] else: newlines = DEFAULT_NEWLINES formatted_data = cleanup_toml(formatted_data) with io.open(path, 'w', newline=newlines) as f: f.write(formatted_data) # depends on [control=['with'], data=['f']] # pipfile is mutated! self.clear_pipfile_cache()
def _get_baremetal_connections(self, port, only_active_switch=False, from_segment=False): """Get switch ips and interfaces from baremetal transaction. This method is used to extract switch/interface information from transactions where VNIC_TYPE is baremetal. :param port: Received port transaction :param only_active_switch: Indicator for selecting connections with switches that are active :param from_segment: only return interfaces from the segment/transaction as opposed to say port channels which are learned. :Returns: list of switch_ip, intf_type, port_id, is_native """ connections = [] is_native = False if self.trunk.is_trunk_subport(port) else True all_link_info = port[bc.portbindings.PROFILE]['local_link_information'] for link_info in all_link_info: # Extract port info intf_type, port = nexus_help.split_interface_name( link_info['port_id']) # Determine if this switch is to be skipped switch_info = self._get_baremetal_switch_info( link_info) if not switch_info: continue switch_ip = switch_info['switch_ip'] # If not for Nexus if not self._switch_defined(switch_ip): continue # Requested connections for only active switches if (only_active_switch and not self.is_switch_active(switch_ip)): continue ch_grp = 0 if not from_segment: try: reserved = nxos_db.get_switch_if_host_mappings( switch_ip, nexus_help.format_interface_name( intf_type, port)) if reserved[0].ch_grp > 0: ch_grp = reserved[0].ch_grp intf_type, port = nexus_help.split_interface_name( '', ch_grp) except excep.NexusHostMappingNotFound: pass connections.append((switch_ip, intf_type, port, is_native, ch_grp)) return connections
def function[_get_baremetal_connections, parameter[self, port, only_active_switch, from_segment]]: constant[Get switch ips and interfaces from baremetal transaction. This method is used to extract switch/interface information from transactions where VNIC_TYPE is baremetal. :param port: Received port transaction :param only_active_switch: Indicator for selecting connections with switches that are active :param from_segment: only return interfaces from the segment/transaction as opposed to say port channels which are learned. :Returns: list of switch_ip, intf_type, port_id, is_native ] variable[connections] assign[=] list[[]] variable[is_native] assign[=] <ast.IfExp object at 0x7da1b1ba9d50> variable[all_link_info] assign[=] call[call[name[port]][name[bc].portbindings.PROFILE]][constant[local_link_information]] for taget[name[link_info]] in starred[name[all_link_info]] begin[:] <ast.Tuple object at 0x7da1b1ba9090> assign[=] call[name[nexus_help].split_interface_name, parameter[call[name[link_info]][constant[port_id]]]] variable[switch_info] assign[=] call[name[self]._get_baremetal_switch_info, parameter[name[link_info]]] if <ast.UnaryOp object at 0x7da18bc709a0> begin[:] continue variable[switch_ip] assign[=] call[name[switch_info]][constant[switch_ip]] if <ast.UnaryOp object at 0x7da18bc73ca0> begin[:] continue if <ast.BoolOp object at 0x7da18bc708b0> begin[:] continue variable[ch_grp] assign[=] constant[0] if <ast.UnaryOp object at 0x7da1b1c62380> begin[:] <ast.Try object at 0x7da1b1c61240> call[name[connections].append, parameter[tuple[[<ast.Name object at 0x7da1b1baa4d0>, <ast.Name object at 0x7da1b1baa200>, <ast.Name object at 0x7da1b1baa110>, <ast.Name object at 0x7da1b1baa140>, <ast.Name object at 0x7da1b1baa170>]]]] return[name[connections]]
keyword[def] identifier[_get_baremetal_connections] ( identifier[self] , identifier[port] , identifier[only_active_switch] = keyword[False] , identifier[from_segment] = keyword[False] ): literal[string] identifier[connections] =[] identifier[is_native] = keyword[False] keyword[if] identifier[self] . identifier[trunk] . identifier[is_trunk_subport] ( identifier[port] ) keyword[else] keyword[True] identifier[all_link_info] = identifier[port] [ identifier[bc] . identifier[portbindings] . identifier[PROFILE] ][ literal[string] ] keyword[for] identifier[link_info] keyword[in] identifier[all_link_info] : identifier[intf_type] , identifier[port] = identifier[nexus_help] . identifier[split_interface_name] ( identifier[link_info] [ literal[string] ]) identifier[switch_info] = identifier[self] . identifier[_get_baremetal_switch_info] ( identifier[link_info] ) keyword[if] keyword[not] identifier[switch_info] : keyword[continue] identifier[switch_ip] = identifier[switch_info] [ literal[string] ] keyword[if] keyword[not] identifier[self] . identifier[_switch_defined] ( identifier[switch_ip] ): keyword[continue] keyword[if] ( identifier[only_active_switch] keyword[and] keyword[not] identifier[self] . identifier[is_switch_active] ( identifier[switch_ip] )): keyword[continue] identifier[ch_grp] = literal[int] keyword[if] keyword[not] identifier[from_segment] : keyword[try] : identifier[reserved] = identifier[nxos_db] . identifier[get_switch_if_host_mappings] ( identifier[switch_ip] , identifier[nexus_help] . identifier[format_interface_name] ( identifier[intf_type] , identifier[port] )) keyword[if] identifier[reserved] [ literal[int] ]. identifier[ch_grp] > literal[int] : identifier[ch_grp] = identifier[reserved] [ literal[int] ]. identifier[ch_grp] identifier[intf_type] , identifier[port] = identifier[nexus_help] . identifier[split_interface_name] ( literal[string] , identifier[ch_grp] ) keyword[except] identifier[excep] . identifier[NexusHostMappingNotFound] : keyword[pass] identifier[connections] . identifier[append] (( identifier[switch_ip] , identifier[intf_type] , identifier[port] , identifier[is_native] , identifier[ch_grp] )) keyword[return] identifier[connections]
def _get_baremetal_connections(self, port, only_active_switch=False, from_segment=False): """Get switch ips and interfaces from baremetal transaction. This method is used to extract switch/interface information from transactions where VNIC_TYPE is baremetal. :param port: Received port transaction :param only_active_switch: Indicator for selecting connections with switches that are active :param from_segment: only return interfaces from the segment/transaction as opposed to say port channels which are learned. :Returns: list of switch_ip, intf_type, port_id, is_native """ connections = [] is_native = False if self.trunk.is_trunk_subport(port) else True all_link_info = port[bc.portbindings.PROFILE]['local_link_information'] for link_info in all_link_info: # Extract port info (intf_type, port) = nexus_help.split_interface_name(link_info['port_id']) # Determine if this switch is to be skipped switch_info = self._get_baremetal_switch_info(link_info) if not switch_info: continue # depends on [control=['if'], data=[]] switch_ip = switch_info['switch_ip'] # If not for Nexus if not self._switch_defined(switch_ip): continue # depends on [control=['if'], data=[]] # Requested connections for only active switches if only_active_switch and (not self.is_switch_active(switch_ip)): continue # depends on [control=['if'], data=[]] ch_grp = 0 if not from_segment: try: reserved = nxos_db.get_switch_if_host_mappings(switch_ip, nexus_help.format_interface_name(intf_type, port)) if reserved[0].ch_grp > 0: ch_grp = reserved[0].ch_grp (intf_type, port) = nexus_help.split_interface_name('', ch_grp) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except excep.NexusHostMappingNotFound: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] connections.append((switch_ip, intf_type, port, is_native, ch_grp)) # depends on [control=['for'], data=['link_info']] return connections
def euler_from_matrix(matrix, axes='sxyz'): """Return Euler angles from rotation matrix for specified axis sequence. axes : One of 24 axis sequences as string or encoded tuple Note that many Euler angle triplets can describe one matrix. >>> R0 = euler_matrix(1, 2, 3, 'syxz') >>> al, be, ga = euler_from_matrix(R0, 'syxz') >>> R1 = euler_matrix(al, be, ga, 'syxz') >>> np.allclose(R0, R1) True >>> angles = (4*math.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R0 = euler_matrix(axes=axes, *angles) ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) ... if not np.allclose(R0, R1): print(axes, "failed") """ try: firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] except (AttributeError, KeyError): _TUPLE2AXES[axes] # validation firstaxis, parity, repetition, frame = axes i = firstaxis j = _NEXT_AXIS[i + parity] k = _NEXT_AXIS[i - parity + 1] M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3] if repetition: sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) if sy > _EPS: ax = math.atan2(M[i, j], M[i, k]) ay = math.atan2(sy, M[i, i]) az = math.atan2(M[j, i], -M[k, i]) else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(sy, M[i, i]) az = 0.0 else: cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) if cy > _EPS: ax = math.atan2(M[k, j], M[k, k]) ay = math.atan2(-M[k, i], cy) az = math.atan2(M[j, i], M[i, i]) else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(-M[k, i], cy) az = 0.0 if parity: ax, ay, az = -ax, -ay, -az if frame: ax, az = az, ax return ax, ay, az
def function[euler_from_matrix, parameter[matrix, axes]]: constant[Return Euler angles from rotation matrix for specified axis sequence. axes : One of 24 axis sequences as string or encoded tuple Note that many Euler angle triplets can describe one matrix. >>> R0 = euler_matrix(1, 2, 3, 'syxz') >>> al, be, ga = euler_from_matrix(R0, 'syxz') >>> R1 = euler_matrix(al, be, ga, 'syxz') >>> np.allclose(R0, R1) True >>> angles = (4*math.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R0 = euler_matrix(axes=axes, *angles) ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) ... if not np.allclose(R0, R1): print(axes, "failed") ] <ast.Try object at 0x7da207f997b0> variable[i] assign[=] name[firstaxis] variable[j] assign[=] call[name[_NEXT_AXIS]][binary_operation[name[i] + name[parity]]] variable[k] assign[=] call[name[_NEXT_AXIS]][binary_operation[binary_operation[name[i] - name[parity]] + constant[1]]] variable[M] assign[=] call[call[name[np].array, parameter[name[matrix]]]][tuple[[<ast.Slice object at 0x7da207f9b730>, <ast.Slice object at 0x7da207f9a950>]]] if name[repetition] begin[:] variable[sy] assign[=] call[name[math].sqrt, parameter[binary_operation[binary_operation[call[name[M]][tuple[[<ast.Name object at 0x7da207f99420>, <ast.Name object at 0x7da207f98a90>]]] * call[name[M]][tuple[[<ast.Name object at 0x7da207f987f0>, <ast.Name object at 0x7da207f99120>]]]] + binary_operation[call[name[M]][tuple[[<ast.Name object at 0x7da207f98c10>, <ast.Name object at 0x7da207f99db0>]]] * call[name[M]][tuple[[<ast.Name object at 0x7da207f98850>, <ast.Name object at 0x7da207f9ba30>]]]]]]] if compare[name[sy] greater[>] name[_EPS]] begin[:] variable[ax] assign[=] call[name[math].atan2, parameter[call[name[M]][tuple[[<ast.Name object at 0x7da207f99b70>, <ast.Name object at 0x7da207f99270>]]], call[name[M]][tuple[[<ast.Name object at 0x7da207f99390>, <ast.Name object at 0x7da207f98ca0>]]]]] variable[ay] assign[=] call[name[math].atan2, parameter[name[sy], call[name[M]][tuple[[<ast.Name object at 0x7da207f9a530>, <ast.Name object at 0x7da207f9a980>]]]]] variable[az] assign[=] call[name[math].atan2, parameter[call[name[M]][tuple[[<ast.Name object at 0x7da207f988b0>, <ast.Name object at 0x7da207f9b400>]]], <ast.UnaryOp object at 0x7da207f98220>]] if name[parity] begin[:] <ast.Tuple object at 0x7da2044c3400> assign[=] tuple[[<ast.UnaryOp object at 0x7da2044c2740>, <ast.UnaryOp object at 0x7da2044c2b00>, <ast.UnaryOp object at 0x7da2044c0880>]] if name[frame] begin[:] <ast.Tuple object at 0x7da2044c0250> assign[=] tuple[[<ast.Name object at 0x7da2044c3040>, <ast.Name object at 0x7da2044c2a10>]] return[tuple[[<ast.Name object at 0x7da2044c33d0>, <ast.Name object at 0x7da2044c3ee0>, <ast.Name object at 0x7da2044c13c0>]]]
keyword[def] identifier[euler_from_matrix] ( identifier[matrix] , identifier[axes] = literal[string] ): literal[string] keyword[try] : identifier[firstaxis] , identifier[parity] , identifier[repetition] , identifier[frame] = identifier[_AXES2TUPLE] [ identifier[axes] . identifier[lower] ()] keyword[except] ( identifier[AttributeError] , identifier[KeyError] ): identifier[_TUPLE2AXES] [ identifier[axes] ] identifier[firstaxis] , identifier[parity] , identifier[repetition] , identifier[frame] = identifier[axes] identifier[i] = identifier[firstaxis] identifier[j] = identifier[_NEXT_AXIS] [ identifier[i] + identifier[parity] ] identifier[k] = identifier[_NEXT_AXIS] [ identifier[i] - identifier[parity] + literal[int] ] identifier[M] = identifier[np] . identifier[array] ( identifier[matrix] , identifier[dtype] = identifier[np] . identifier[float64] , identifier[copy] = keyword[False] )[: literal[int] ,: literal[int] ] keyword[if] identifier[repetition] : identifier[sy] = identifier[math] . identifier[sqrt] ( identifier[M] [ identifier[i] , identifier[j] ]* identifier[M] [ identifier[i] , identifier[j] ]+ identifier[M] [ identifier[i] , identifier[k] ]* identifier[M] [ identifier[i] , identifier[k] ]) keyword[if] identifier[sy] > identifier[_EPS] : identifier[ax] = identifier[math] . identifier[atan2] ( identifier[M] [ identifier[i] , identifier[j] ], identifier[M] [ identifier[i] , identifier[k] ]) identifier[ay] = identifier[math] . identifier[atan2] ( identifier[sy] , identifier[M] [ identifier[i] , identifier[i] ]) identifier[az] = identifier[math] . identifier[atan2] ( identifier[M] [ identifier[j] , identifier[i] ],- identifier[M] [ identifier[k] , identifier[i] ]) keyword[else] : identifier[ax] = identifier[math] . identifier[atan2] (- identifier[M] [ identifier[j] , identifier[k] ], identifier[M] [ identifier[j] , identifier[j] ]) identifier[ay] = identifier[math] . identifier[atan2] ( identifier[sy] , identifier[M] [ identifier[i] , identifier[i] ]) identifier[az] = literal[int] keyword[else] : identifier[cy] = identifier[math] . identifier[sqrt] ( identifier[M] [ identifier[i] , identifier[i] ]* identifier[M] [ identifier[i] , identifier[i] ]+ identifier[M] [ identifier[j] , identifier[i] ]* identifier[M] [ identifier[j] , identifier[i] ]) keyword[if] identifier[cy] > identifier[_EPS] : identifier[ax] = identifier[math] . identifier[atan2] ( identifier[M] [ identifier[k] , identifier[j] ], identifier[M] [ identifier[k] , identifier[k] ]) identifier[ay] = identifier[math] . identifier[atan2] (- identifier[M] [ identifier[k] , identifier[i] ], identifier[cy] ) identifier[az] = identifier[math] . identifier[atan2] ( identifier[M] [ identifier[j] , identifier[i] ], identifier[M] [ identifier[i] , identifier[i] ]) keyword[else] : identifier[ax] = identifier[math] . identifier[atan2] (- identifier[M] [ identifier[j] , identifier[k] ], identifier[M] [ identifier[j] , identifier[j] ]) identifier[ay] = identifier[math] . identifier[atan2] (- identifier[M] [ identifier[k] , identifier[i] ], identifier[cy] ) identifier[az] = literal[int] keyword[if] identifier[parity] : identifier[ax] , identifier[ay] , identifier[az] =- identifier[ax] ,- identifier[ay] ,- identifier[az] keyword[if] identifier[frame] : identifier[ax] , identifier[az] = identifier[az] , identifier[ax] keyword[return] identifier[ax] , identifier[ay] , identifier[az]
def euler_from_matrix(matrix, axes='sxyz'): """Return Euler angles from rotation matrix for specified axis sequence. axes : One of 24 axis sequences as string or encoded tuple Note that many Euler angle triplets can describe one matrix. >>> R0 = euler_matrix(1, 2, 3, 'syxz') >>> al, be, ga = euler_from_matrix(R0, 'syxz') >>> R1 = euler_matrix(al, be, ga, 'syxz') >>> np.allclose(R0, R1) True >>> angles = (4*math.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R0 = euler_matrix(axes=axes, *angles) ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) ... if not np.allclose(R0, R1): print(axes, "failed") """ try: (firstaxis, parity, repetition, frame) = _AXES2TUPLE[axes.lower()] # depends on [control=['try'], data=[]] except (AttributeError, KeyError): _TUPLE2AXES[axes] # validation (firstaxis, parity, repetition, frame) = axes # depends on [control=['except'], data=[]] i = firstaxis j = _NEXT_AXIS[i + parity] k = _NEXT_AXIS[i - parity + 1] M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3] if repetition: sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) if sy > _EPS: ax = math.atan2(M[i, j], M[i, k]) ay = math.atan2(sy, M[i, i]) az = math.atan2(M[j, i], -M[k, i]) # depends on [control=['if'], data=['sy']] else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(sy, M[i, i]) az = 0.0 # depends on [control=['if'], data=[]] else: cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) if cy > _EPS: ax = math.atan2(M[k, j], M[k, k]) ay = math.atan2(-M[k, i], cy) az = math.atan2(M[j, i], M[i, i]) # depends on [control=['if'], data=['cy']] else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(-M[k, i], cy) az = 0.0 if parity: (ax, ay, az) = (-ax, -ay, -az) # depends on [control=['if'], data=[]] if frame: (ax, az) = (az, ax) # depends on [control=['if'], data=[]] return (ax, ay, az)
def login(self): """ Attempt a login to LiveEdu.tv """ email = self.get_option("email") password = self.get_option("password") if email and password: res = self.session.http.get(self.login_url) csrf_match = self.csrf_re.search(res.text) token = csrf_match and csrf_match.group(1) self.logger.debug("Attempting login as {0} (token={1})", email, token) res = self.session.http.post(self.login_url, data=dict(login=email, password=password, csrfmiddlewaretoken=token), allow_redirects=False, raise_for_status=False, headers={"Referer": self.login_url}) if res.status_code != 302: self.logger.error("Failed to login to LiveEdu account: {0}", email)
def function[login, parameter[self]]: constant[ Attempt a login to LiveEdu.tv ] variable[email] assign[=] call[name[self].get_option, parameter[constant[email]]] variable[password] assign[=] call[name[self].get_option, parameter[constant[password]]] if <ast.BoolOp object at 0x7da18f09cac0> begin[:] variable[res] assign[=] call[name[self].session.http.get, parameter[name[self].login_url]] variable[csrf_match] assign[=] call[name[self].csrf_re.search, parameter[name[res].text]] variable[token] assign[=] <ast.BoolOp object at 0x7da18f09f280> call[name[self].logger.debug, parameter[constant[Attempting login as {0} (token={1})], name[email], name[token]]] variable[res] assign[=] call[name[self].session.http.post, parameter[name[self].login_url]] if compare[name[res].status_code not_equal[!=] constant[302]] begin[:] call[name[self].logger.error, parameter[constant[Failed to login to LiveEdu account: {0}], name[email]]]
keyword[def] identifier[login] ( identifier[self] ): literal[string] identifier[email] = identifier[self] . identifier[get_option] ( literal[string] ) identifier[password] = identifier[self] . identifier[get_option] ( literal[string] ) keyword[if] identifier[email] keyword[and] identifier[password] : identifier[res] = identifier[self] . identifier[session] . identifier[http] . identifier[get] ( identifier[self] . identifier[login_url] ) identifier[csrf_match] = identifier[self] . identifier[csrf_re] . identifier[search] ( identifier[res] . identifier[text] ) identifier[token] = identifier[csrf_match] keyword[and] identifier[csrf_match] . identifier[group] ( literal[int] ) identifier[self] . identifier[logger] . identifier[debug] ( literal[string] , identifier[email] , identifier[token] ) identifier[res] = identifier[self] . identifier[session] . identifier[http] . identifier[post] ( identifier[self] . identifier[login_url] , identifier[data] = identifier[dict] ( identifier[login] = identifier[email] , identifier[password] = identifier[password] , identifier[csrfmiddlewaretoken] = identifier[token] ), identifier[allow_redirects] = keyword[False] , identifier[raise_for_status] = keyword[False] , identifier[headers] ={ literal[string] : identifier[self] . identifier[login_url] }) keyword[if] identifier[res] . identifier[status_code] != literal[int] : identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[email] )
def login(self): """ Attempt a login to LiveEdu.tv """ email = self.get_option('email') password = self.get_option('password') if email and password: res = self.session.http.get(self.login_url) csrf_match = self.csrf_re.search(res.text) token = csrf_match and csrf_match.group(1) self.logger.debug('Attempting login as {0} (token={1})', email, token) res = self.session.http.post(self.login_url, data=dict(login=email, password=password, csrfmiddlewaretoken=token), allow_redirects=False, raise_for_status=False, headers={'Referer': self.login_url}) if res.status_code != 302: self.logger.error('Failed to login to LiveEdu account: {0}', email) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def progress(self, percent, tag, summary): """ Can be overridden or monkey-patched if you want to get progress updates yourself. """ if self.progress_updates: self.progress_updates(percent, tag, summary)
def function[progress, parameter[self, percent, tag, summary]]: constant[ Can be overridden or monkey-patched if you want to get progress updates yourself. ] if name[self].progress_updates begin[:] call[name[self].progress_updates, parameter[name[percent], name[tag], name[summary]]]
keyword[def] identifier[progress] ( identifier[self] , identifier[percent] , identifier[tag] , identifier[summary] ): literal[string] keyword[if] identifier[self] . identifier[progress_updates] : identifier[self] . identifier[progress_updates] ( identifier[percent] , identifier[tag] , identifier[summary] )
def progress(self, percent, tag, summary): """ Can be overridden or monkey-patched if you want to get progress updates yourself. """ if self.progress_updates: self.progress_updates(percent, tag, summary) # depends on [control=['if'], data=[]]
def load_config(filename): """ Read config file specified by `filename` Parameters ---------- filename : str Description of filename """ package_path = ding0.__path__[0] FILE = path.join(package_path, 'config', filename) try: cfg.read(FILE) global _loaded _loaded = True except: logger.exception("configfile not found.")
def function[load_config, parameter[filename]]: constant[ Read config file specified by `filename` Parameters ---------- filename : str Description of filename ] variable[package_path] assign[=] call[name[ding0].__path__][constant[0]] variable[FILE] assign[=] call[name[path].join, parameter[name[package_path], constant[config], name[filename]]] <ast.Try object at 0x7da20e74b1f0>
keyword[def] identifier[load_config] ( identifier[filename] ): literal[string] identifier[package_path] = identifier[ding0] . identifier[__path__] [ literal[int] ] identifier[FILE] = identifier[path] . identifier[join] ( identifier[package_path] , literal[string] , identifier[filename] ) keyword[try] : identifier[cfg] . identifier[read] ( identifier[FILE] ) keyword[global] identifier[_loaded] identifier[_loaded] = keyword[True] keyword[except] : identifier[logger] . identifier[exception] ( literal[string] )
def load_config(filename): """ Read config file specified by `filename` Parameters ---------- filename : str Description of filename """ package_path = ding0.__path__[0] FILE = path.join(package_path, 'config', filename) try: cfg.read(FILE) global _loaded _loaded = True # depends on [control=['try'], data=[]] except: logger.exception('configfile not found.') # depends on [control=['except'], data=[]]
def calcPunkProp(sNow): ''' Calculates the proportion of punks in the population, given data from each type. Parameters ---------- pNow : [np.array] List of arrays of binary data, representing the fashion choice of each agent in each type of this market (0=jock, 1=punk). pop_size : [int] List with the number of agents of each type in the market. Unused. ''' sNowX = np.asarray(sNow).flatten() pNow = np.mean(sNowX) return FashionMarketInfo(pNow)
def function[calcPunkProp, parameter[sNow]]: constant[ Calculates the proportion of punks in the population, given data from each type. Parameters ---------- pNow : [np.array] List of arrays of binary data, representing the fashion choice of each agent in each type of this market (0=jock, 1=punk). pop_size : [int] List with the number of agents of each type in the market. Unused. ] variable[sNowX] assign[=] call[call[name[np].asarray, parameter[name[sNow]]].flatten, parameter[]] variable[pNow] assign[=] call[name[np].mean, parameter[name[sNowX]]] return[call[name[FashionMarketInfo], parameter[name[pNow]]]]
keyword[def] identifier[calcPunkProp] ( identifier[sNow] ): literal[string] identifier[sNowX] = identifier[np] . identifier[asarray] ( identifier[sNow] ). identifier[flatten] () identifier[pNow] = identifier[np] . identifier[mean] ( identifier[sNowX] ) keyword[return] identifier[FashionMarketInfo] ( identifier[pNow] )
def calcPunkProp(sNow): """ Calculates the proportion of punks in the population, given data from each type. Parameters ---------- pNow : [np.array] List of arrays of binary data, representing the fashion choice of each agent in each type of this market (0=jock, 1=punk). pop_size : [int] List with the number of agents of each type in the market. Unused. """ sNowX = np.asarray(sNow).flatten() pNow = np.mean(sNowX) return FashionMarketInfo(pNow)
def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
def function[reset_for_retry, parameter[self]]: constant[Reset self for shard retry.] <ast.AugAssign object at 0x7da1b0578d00> name[self].last_work_item assign[=] constant[] name[self].active assign[=] constant[True] name[self].result_status assign[=] constant[None] name[self].input_finished assign[=] constant[False] name[self].counters_map assign[=] call[name[CountersMap], parameter[]] name[self].slice_id assign[=] constant[0] name[self].slice_start_time assign[=] constant[None] name[self].slice_request_id assign[=] constant[None] name[self].slice_retries assign[=] constant[0] name[self].acquired_once assign[=] constant[False]
keyword[def] identifier[reset_for_retry] ( identifier[self] ): literal[string] identifier[self] . identifier[retries] += literal[int] identifier[self] . identifier[last_work_item] = literal[string] identifier[self] . identifier[active] = keyword[True] identifier[self] . identifier[result_status] = keyword[None] identifier[self] . identifier[input_finished] = keyword[False] identifier[self] . identifier[counters_map] = identifier[CountersMap] () identifier[self] . identifier[slice_id] = literal[int] identifier[self] . identifier[slice_start_time] = keyword[None] identifier[self] . identifier[slice_request_id] = keyword[None] identifier[self] . identifier[slice_retries] = literal[int] identifier[self] . identifier[acquired_once] = keyword[False]
def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = '' self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
def data_received(self, data): """Called when asyncio.Protocol detects received data from network.""" self.buffer += data.decode() self.log.debug('Received %d bytes from AVR: %s', len(self.buffer), self.buffer) self._assemble_buffer()
def function[data_received, parameter[self, data]]: constant[Called when asyncio.Protocol detects received data from network.] <ast.AugAssign object at 0x7da1b23b2500> call[name[self].log.debug, parameter[constant[Received %d bytes from AVR: %s], call[name[len], parameter[name[self].buffer]], name[self].buffer]] call[name[self]._assemble_buffer, parameter[]]
keyword[def] identifier[data_received] ( identifier[self] , identifier[data] ): literal[string] identifier[self] . identifier[buffer] += identifier[data] . identifier[decode] () identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[len] ( identifier[self] . identifier[buffer] ), identifier[self] . identifier[buffer] ) identifier[self] . identifier[_assemble_buffer] ()
def data_received(self, data): """Called when asyncio.Protocol detects received data from network.""" self.buffer += data.decode() self.log.debug('Received %d bytes from AVR: %s', len(self.buffer), self.buffer) self._assemble_buffer()
def IsAllSpent(self): """ Flag indicating if all balance is spend. Returns: bool: """ for item in self.Items: if item == CoinState.Confirmed: return False return True
def function[IsAllSpent, parameter[self]]: constant[ Flag indicating if all balance is spend. Returns: bool: ] for taget[name[item]] in starred[name[self].Items] begin[:] if compare[name[item] equal[==] name[CoinState].Confirmed] begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[IsAllSpent] ( identifier[self] ): literal[string] keyword[for] identifier[item] keyword[in] identifier[self] . identifier[Items] : keyword[if] identifier[item] == identifier[CoinState] . identifier[Confirmed] : keyword[return] keyword[False] keyword[return] keyword[True]
def IsAllSpent(self): """ Flag indicating if all balance is spend. Returns: bool: """ for item in self.Items: if item == CoinState.Confirmed: return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] return True
def _do_fail_callback( self, reason, msg, reply_cb, inform_cb, user_data, timeout_handle): """Do callback for a failed request.""" # this may also result in reply_cb being None if no # reply_cb was passed to the request method if reply_cb is None: # this happens if no reply_cb was passed in to the request return reason_msg = Message.reply(msg.name, "fail", reason, mid=msg.mid) try: if user_data is None: reply_cb(reason_msg) else: reply_cb(reason_msg, *user_data) except Exception: e_type, e_value, trace = sys.exc_info() exc_reason = "\n".join(traceback.format_exception( e_type, e_value, trace, self._tb_limit)) self._logger.error("Callback reply during failure %s, %s FAIL: %s" % (reason, msg.name, exc_reason))
def function[_do_fail_callback, parameter[self, reason, msg, reply_cb, inform_cb, user_data, timeout_handle]]: constant[Do callback for a failed request.] if compare[name[reply_cb] is constant[None]] begin[:] return[None] variable[reason_msg] assign[=] call[name[Message].reply, parameter[name[msg].name, constant[fail], name[reason]]] <ast.Try object at 0x7da1b05d8940>
keyword[def] identifier[_do_fail_callback] ( identifier[self] , identifier[reason] , identifier[msg] , identifier[reply_cb] , identifier[inform_cb] , identifier[user_data] , identifier[timeout_handle] ): literal[string] keyword[if] identifier[reply_cb] keyword[is] keyword[None] : keyword[return] identifier[reason_msg] = identifier[Message] . identifier[reply] ( identifier[msg] . identifier[name] , literal[string] , identifier[reason] , identifier[mid] = identifier[msg] . identifier[mid] ) keyword[try] : keyword[if] identifier[user_data] keyword[is] keyword[None] : identifier[reply_cb] ( identifier[reason_msg] ) keyword[else] : identifier[reply_cb] ( identifier[reason_msg] ,* identifier[user_data] ) keyword[except] identifier[Exception] : identifier[e_type] , identifier[e_value] , identifier[trace] = identifier[sys] . identifier[exc_info] () identifier[exc_reason] = literal[string] . identifier[join] ( identifier[traceback] . identifier[format_exception] ( identifier[e_type] , identifier[e_value] , identifier[trace] , identifier[self] . identifier[_tb_limit] )) identifier[self] . identifier[_logger] . identifier[error] ( literal[string] % ( identifier[reason] , identifier[msg] . identifier[name] , identifier[exc_reason] ))
def _do_fail_callback(self, reason, msg, reply_cb, inform_cb, user_data, timeout_handle): """Do callback for a failed request.""" # this may also result in reply_cb being None if no # reply_cb was passed to the request method if reply_cb is None: # this happens if no reply_cb was passed in to the request return # depends on [control=['if'], data=[]] reason_msg = Message.reply(msg.name, 'fail', reason, mid=msg.mid) try: if user_data is None: reply_cb(reason_msg) # depends on [control=['if'], data=[]] else: reply_cb(reason_msg, *user_data) # depends on [control=['try'], data=[]] except Exception: (e_type, e_value, trace) = sys.exc_info() exc_reason = '\n'.join(traceback.format_exception(e_type, e_value, trace, self._tb_limit)) self._logger.error('Callback reply during failure %s, %s FAIL: %s' % (reason, msg.name, exc_reason)) # depends on [control=['except'], data=[]]
def eulerian_tour_undirected(graph): """Eulerian tour on an undirected graph :param graph: directed graph in listlist format, cannot be listdict :assumes: graph is eulerian :returns: eulerian cycle as a vertex list :complexity: `O(|V|+|E|)` """ P = [] Q = [0] R = [] succ = [0] * len(graph) seen = [set() for _ in graph] while Q: node = Q.pop() P.append(node) while succ[node] < len(graph[node]): neighbor = graph[node][succ[node]] succ[node] += 1 if neighbor not in seen[node]: seen[neighbor].add(node) R.append(neighbor) node = neighbor while R: Q.append(R.pop()) return P
def function[eulerian_tour_undirected, parameter[graph]]: constant[Eulerian tour on an undirected graph :param graph: directed graph in listlist format, cannot be listdict :assumes: graph is eulerian :returns: eulerian cycle as a vertex list :complexity: `O(|V|+|E|)` ] variable[P] assign[=] list[[]] variable[Q] assign[=] list[[<ast.Constant object at 0x7da20c9914e0>]] variable[R] assign[=] list[[]] variable[succ] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20c991d20>]] * call[name[len], parameter[name[graph]]]] variable[seen] assign[=] <ast.ListComp object at 0x7da20c991840> while name[Q] begin[:] variable[node] assign[=] call[name[Q].pop, parameter[]] call[name[P].append, parameter[name[node]]] while compare[call[name[succ]][name[node]] less[<] call[name[len], parameter[call[name[graph]][name[node]]]]] begin[:] variable[neighbor] assign[=] call[call[name[graph]][name[node]]][call[name[succ]][name[node]]] <ast.AugAssign object at 0x7da20cabcee0> if compare[name[neighbor] <ast.NotIn object at 0x7da2590d7190> call[name[seen]][name[node]]] begin[:] call[call[name[seen]][name[neighbor]].add, parameter[name[node]]] call[name[R].append, parameter[name[neighbor]]] variable[node] assign[=] name[neighbor] while name[R] begin[:] call[name[Q].append, parameter[call[name[R].pop, parameter[]]]] return[name[P]]
keyword[def] identifier[eulerian_tour_undirected] ( identifier[graph] ): literal[string] identifier[P] =[] identifier[Q] =[ literal[int] ] identifier[R] =[] identifier[succ] =[ literal[int] ]* identifier[len] ( identifier[graph] ) identifier[seen] =[ identifier[set] () keyword[for] identifier[_] keyword[in] identifier[graph] ] keyword[while] identifier[Q] : identifier[node] = identifier[Q] . identifier[pop] () identifier[P] . identifier[append] ( identifier[node] ) keyword[while] identifier[succ] [ identifier[node] ]< identifier[len] ( identifier[graph] [ identifier[node] ]): identifier[neighbor] = identifier[graph] [ identifier[node] ][ identifier[succ] [ identifier[node] ]] identifier[succ] [ identifier[node] ]+= literal[int] keyword[if] identifier[neighbor] keyword[not] keyword[in] identifier[seen] [ identifier[node] ]: identifier[seen] [ identifier[neighbor] ]. identifier[add] ( identifier[node] ) identifier[R] . identifier[append] ( identifier[neighbor] ) identifier[node] = identifier[neighbor] keyword[while] identifier[R] : identifier[Q] . identifier[append] ( identifier[R] . identifier[pop] ()) keyword[return] identifier[P]
def eulerian_tour_undirected(graph): """Eulerian tour on an undirected graph :param graph: directed graph in listlist format, cannot be listdict :assumes: graph is eulerian :returns: eulerian cycle as a vertex list :complexity: `O(|V|+|E|)` """ P = [] Q = [0] R = [] succ = [0] * len(graph) seen = [set() for _ in graph] while Q: node = Q.pop() P.append(node) while succ[node] < len(graph[node]): neighbor = graph[node][succ[node]] succ[node] += 1 if neighbor not in seen[node]: seen[neighbor].add(node) R.append(neighbor) node = neighbor # depends on [control=['if'], data=['neighbor']] # depends on [control=['while'], data=[]] while R: Q.append(R.pop()) # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]] return P
def endpoint(self, *args): """endpoint: Decorates a function to make it a CLI endpoint The function must be called do_<some>_<action> and accept one 'args' parameter. It will be converted into a ./cli some action commandline endpoint. A set of Arguments can be passed to the decorator, the syntax is the same than the argparse add_argument function. """ # Decorator function def decorator(func): func_name = func.__name__ func_name = func_name.replace("do_", "") actions = func_name.split("_") cmd_parser = None sub = self.subparsers wcount = 0 # For each word in the command we build the parsing tree for word in actions: parser_name = '_'.join(actions[:wcount+1]) # If the parser exist, we use it, otherwise we create it if self.parsers.has_key(parser_name): cmd_parser = self.parsers[parser_name] else: cmd_parser = sub.add_parser(word) self.parsers[parser_name] = cmd_parser # We don't want to add a subparser to the final endpoint, # since it would require a void positional argument and # fuck up the whole thing. if wcount != len(actions) - 1: # Same that with the parsers, it it exist we use it # otherwise we create it. It avoids overwrites if self.parsers.has_key("sub_"+parser_name): sub = self.parsers["sub_"+parser_name] else: sub = cmd_parser.add_subparsers() self.parsers["sub_"+parser_name] = sub wcount += 1 # Bind the endpoint to the function cmd_parser.set_defaults(func=func) # We add the arguments to the function for argument in args: if type(argument) == Argument: cmd_parser.add_argument(*argument.args, **argument.kwargs) elif type(argument) == ExclusiveGroup: group = cmd_parser.add_mutually_exclusive_group(required=argument.required) for arg in argument.args: group.add_argument(*arg.args, **arg.kwargs) # Standard inner function def inner(*args, **kwargs): return func(*args, **kwargs) return inner return decorator
def function[endpoint, parameter[self]]: constant[endpoint: Decorates a function to make it a CLI endpoint The function must be called do_<some>_<action> and accept one 'args' parameter. It will be converted into a ./cli some action commandline endpoint. A set of Arguments can be passed to the decorator, the syntax is the same than the argparse add_argument function. ] def function[decorator, parameter[func]]: variable[func_name] assign[=] name[func].__name__ variable[func_name] assign[=] call[name[func_name].replace, parameter[constant[do_], constant[]]] variable[actions] assign[=] call[name[func_name].split, parameter[constant[_]]] variable[cmd_parser] assign[=] constant[None] variable[sub] assign[=] name[self].subparsers variable[wcount] assign[=] constant[0] for taget[name[word]] in starred[name[actions]] begin[:] variable[parser_name] assign[=] call[constant[_].join, parameter[call[name[actions]][<ast.Slice object at 0x7da1b1325360>]]] if call[name[self].parsers.has_key, parameter[name[parser_name]]] begin[:] variable[cmd_parser] assign[=] call[name[self].parsers][name[parser_name]] if compare[name[wcount] not_equal[!=] binary_operation[call[name[len], parameter[name[actions]]] - constant[1]]] begin[:] if call[name[self].parsers.has_key, parameter[binary_operation[constant[sub_] + name[parser_name]]]] begin[:] variable[sub] assign[=] call[name[self].parsers][binary_operation[constant[sub_] + name[parser_name]]] <ast.AugAssign object at 0x7da18f812140> call[name[cmd_parser].set_defaults, parameter[]] for taget[name[argument]] in starred[name[args]] begin[:] if compare[call[name[type], parameter[name[argument]]] equal[==] name[Argument]] begin[:] call[name[cmd_parser].add_argument, parameter[<ast.Starred object at 0x7da20c991c60>]] def function[inner, parameter[]]: return[call[name[func], parameter[<ast.Starred object at 0x7da18c4ce530>]]] return[name[inner]] return[name[decorator]]
keyword[def] identifier[endpoint] ( identifier[self] ,* identifier[args] ): literal[string] keyword[def] identifier[decorator] ( identifier[func] ): identifier[func_name] = identifier[func] . identifier[__name__] identifier[func_name] = identifier[func_name] . identifier[replace] ( literal[string] , literal[string] ) identifier[actions] = identifier[func_name] . identifier[split] ( literal[string] ) identifier[cmd_parser] = keyword[None] identifier[sub] = identifier[self] . identifier[subparsers] identifier[wcount] = literal[int] keyword[for] identifier[word] keyword[in] identifier[actions] : identifier[parser_name] = literal[string] . identifier[join] ( identifier[actions] [: identifier[wcount] + literal[int] ]) keyword[if] identifier[self] . identifier[parsers] . identifier[has_key] ( identifier[parser_name] ): identifier[cmd_parser] = identifier[self] . identifier[parsers] [ identifier[parser_name] ] keyword[else] : identifier[cmd_parser] = identifier[sub] . identifier[add_parser] ( identifier[word] ) identifier[self] . identifier[parsers] [ identifier[parser_name] ]= identifier[cmd_parser] keyword[if] identifier[wcount] != identifier[len] ( identifier[actions] )- literal[int] : keyword[if] identifier[self] . identifier[parsers] . identifier[has_key] ( literal[string] + identifier[parser_name] ): identifier[sub] = identifier[self] . identifier[parsers] [ literal[string] + identifier[parser_name] ] keyword[else] : identifier[sub] = identifier[cmd_parser] . identifier[add_subparsers] () identifier[self] . identifier[parsers] [ literal[string] + identifier[parser_name] ]= identifier[sub] identifier[wcount] += literal[int] identifier[cmd_parser] . identifier[set_defaults] ( identifier[func] = identifier[func] ) keyword[for] identifier[argument] keyword[in] identifier[args] : keyword[if] identifier[type] ( identifier[argument] )== identifier[Argument] : identifier[cmd_parser] . identifier[add_argument] (* identifier[argument] . identifier[args] ,** identifier[argument] . identifier[kwargs] ) keyword[elif] identifier[type] ( identifier[argument] )== identifier[ExclusiveGroup] : identifier[group] = identifier[cmd_parser] . identifier[add_mutually_exclusive_group] ( identifier[required] = identifier[argument] . identifier[required] ) keyword[for] identifier[arg] keyword[in] identifier[argument] . identifier[args] : identifier[group] . identifier[add_argument] (* identifier[arg] . identifier[args] ,** identifier[arg] . identifier[kwargs] ) keyword[def] identifier[inner] (* identifier[args] ,** identifier[kwargs] ): keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[inner] keyword[return] identifier[decorator]
def endpoint(self, *args): """endpoint: Decorates a function to make it a CLI endpoint The function must be called do_<some>_<action> and accept one 'args' parameter. It will be converted into a ./cli some action commandline endpoint. A set of Arguments can be passed to the decorator, the syntax is the same than the argparse add_argument function. """ # Decorator function def decorator(func): func_name = func.__name__ func_name = func_name.replace('do_', '') actions = func_name.split('_') cmd_parser = None sub = self.subparsers wcount = 0 # For each word in the command we build the parsing tree for word in actions: parser_name = '_'.join(actions[:wcount + 1]) # If the parser exist, we use it, otherwise we create it if self.parsers.has_key(parser_name): cmd_parser = self.parsers[parser_name] # depends on [control=['if'], data=[]] else: cmd_parser = sub.add_parser(word) self.parsers[parser_name] = cmd_parser # We don't want to add a subparser to the final endpoint, # since it would require a void positional argument and # fuck up the whole thing. if wcount != len(actions) - 1: # Same that with the parsers, it it exist we use it # otherwise we create it. It avoids overwrites if self.parsers.has_key('sub_' + parser_name): sub = self.parsers['sub_' + parser_name] # depends on [control=['if'], data=[]] else: sub = cmd_parser.add_subparsers() self.parsers['sub_' + parser_name] = sub # depends on [control=['if'], data=[]] wcount += 1 # depends on [control=['for'], data=['word']] # Bind the endpoint to the function cmd_parser.set_defaults(func=func) # We add the arguments to the function for argument in args: if type(argument) == Argument: cmd_parser.add_argument(*argument.args, **argument.kwargs) # depends on [control=['if'], data=[]] elif type(argument) == ExclusiveGroup: group = cmd_parser.add_mutually_exclusive_group(required=argument.required) for arg in argument.args: group.add_argument(*arg.args, **arg.kwargs) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['argument']] # Standard inner function def inner(*args, **kwargs): return func(*args, **kwargs) return inner return decorator
def pretokenized_tfrecord_dataset(filenames, text2self, eos_included, repeat, batch_size, sequence_length): """Reads tensor2tensor-style data files. The dataset is defined by sets of TFRecord files of TFExample protos. There should be a "targets" feature (a 1d tensor of integers) If not text2self, there should also be an "inputs" feature. Other features get ignored. eos_included specifies whether the inputs and targets were written with an EOS token, as in tensor2tensor Args: filenames: a list of strings text2self: a boolean eos_included: a boolean repeat: a boolean batch_size: an integer sequence_length: an integer Returns: A tf.data.Dataset of batches """ dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024) if repeat: dataset = dataset.repeat() keys = ["targets"] if text2self else ["inputs", "targets"] def decode_example(serialized_example): """Return a dict of Tensors from a serialized tensorflow.Example.""" data_fields = {} data_items_to_decoders = {} for k in keys: data_fields[k] = tf.VarLenFeature(tf.int64) data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k) decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder( data_fields, data_items_to_decoders) decode_items = list(sorted(data_items_to_decoders)) decoded = decoder.decode(serialized_example, items=decode_items) if not eos_included: decoded = [tf.concat([v, [1]], 0) for v in decoded] return dict(zip(decode_items, decoded)) dataset = dataset.map(decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE) return pack_and_batch(dataset, batch_size, sequence_length)
def function[pretokenized_tfrecord_dataset, parameter[filenames, text2self, eos_included, repeat, batch_size, sequence_length]]: constant[Reads tensor2tensor-style data files. The dataset is defined by sets of TFRecord files of TFExample protos. There should be a "targets" feature (a 1d tensor of integers) If not text2self, there should also be an "inputs" feature. Other features get ignored. eos_included specifies whether the inputs and targets were written with an EOS token, as in tensor2tensor Args: filenames: a list of strings text2self: a boolean eos_included: a boolean repeat: a boolean batch_size: an integer sequence_length: an integer Returns: A tf.data.Dataset of batches ] variable[dataset] assign[=] call[name[tf].data.TFRecordDataset, parameter[name[filenames]]] if name[repeat] begin[:] variable[dataset] assign[=] call[name[dataset].repeat, parameter[]] variable[keys] assign[=] <ast.IfExp object at 0x7da20c7c8790> def function[decode_example, parameter[serialized_example]]: constant[Return a dict of Tensors from a serialized tensorflow.Example.] variable[data_fields] assign[=] dictionary[[], []] variable[data_items_to_decoders] assign[=] dictionary[[], []] for taget[name[k]] in starred[name[keys]] begin[:] call[name[data_fields]][name[k]] assign[=] call[name[tf].VarLenFeature, parameter[name[tf].int64]] call[name[data_items_to_decoders]][name[k]] assign[=] call[name[tf].contrib.slim.tfexample_decoder.Tensor, parameter[name[k]]] variable[decoder] assign[=] call[name[tf].contrib.slim.tfexample_decoder.TFExampleDecoder, parameter[name[data_fields], name[data_items_to_decoders]]] variable[decode_items] assign[=] call[name[list], parameter[call[name[sorted], parameter[name[data_items_to_decoders]]]]] variable[decoded] assign[=] call[name[decoder].decode, parameter[name[serialized_example]]] if <ast.UnaryOp object at 0x7da20c7c9c90> begin[:] variable[decoded] assign[=] <ast.ListComp object at 0x7da20c7c84f0> return[call[name[dict], parameter[call[name[zip], parameter[name[decode_items], name[decoded]]]]]] variable[dataset] assign[=] call[name[dataset].map, parameter[name[decode_example]]] return[call[name[pack_and_batch], parameter[name[dataset], name[batch_size], name[sequence_length]]]]
keyword[def] identifier[pretokenized_tfrecord_dataset] ( identifier[filenames] , identifier[text2self] , identifier[eos_included] , identifier[repeat] , identifier[batch_size] , identifier[sequence_length] ): literal[string] identifier[dataset] = identifier[tf] . identifier[data] . identifier[TFRecordDataset] ( identifier[filenames] , identifier[buffer_size] = literal[int] * literal[int] * literal[int] ) keyword[if] identifier[repeat] : identifier[dataset] = identifier[dataset] . identifier[repeat] () identifier[keys] =[ literal[string] ] keyword[if] identifier[text2self] keyword[else] [ literal[string] , literal[string] ] keyword[def] identifier[decode_example] ( identifier[serialized_example] ): literal[string] identifier[data_fields] ={} identifier[data_items_to_decoders] ={} keyword[for] identifier[k] keyword[in] identifier[keys] : identifier[data_fields] [ identifier[k] ]= identifier[tf] . identifier[VarLenFeature] ( identifier[tf] . identifier[int64] ) identifier[data_items_to_decoders] [ identifier[k] ]= identifier[tf] . identifier[contrib] . identifier[slim] . identifier[tfexample_decoder] . identifier[Tensor] ( identifier[k] ) identifier[decoder] = identifier[tf] . identifier[contrib] . identifier[slim] . identifier[tfexample_decoder] . identifier[TFExampleDecoder] ( identifier[data_fields] , identifier[data_items_to_decoders] ) identifier[decode_items] = identifier[list] ( identifier[sorted] ( identifier[data_items_to_decoders] )) identifier[decoded] = identifier[decoder] . identifier[decode] ( identifier[serialized_example] , identifier[items] = identifier[decode_items] ) keyword[if] keyword[not] identifier[eos_included] : identifier[decoded] =[ identifier[tf] . identifier[concat] ([ identifier[v] ,[ literal[int] ]], literal[int] ) keyword[for] identifier[v] keyword[in] identifier[decoded] ] keyword[return] identifier[dict] ( identifier[zip] ( identifier[decode_items] , identifier[decoded] )) identifier[dataset] = identifier[dataset] . identifier[map] ( identifier[decode_example] , identifier[num_parallel_calls] = identifier[tf] . identifier[data] . identifier[experimental] . identifier[AUTOTUNE] ) keyword[return] identifier[pack_and_batch] ( identifier[dataset] , identifier[batch_size] , identifier[sequence_length] )
def pretokenized_tfrecord_dataset(filenames, text2self, eos_included, repeat, batch_size, sequence_length): """Reads tensor2tensor-style data files. The dataset is defined by sets of TFRecord files of TFExample protos. There should be a "targets" feature (a 1d tensor of integers) If not text2self, there should also be an "inputs" feature. Other features get ignored. eos_included specifies whether the inputs and targets were written with an EOS token, as in tensor2tensor Args: filenames: a list of strings text2self: a boolean eos_included: a boolean repeat: a boolean batch_size: an integer sequence_length: an integer Returns: A tf.data.Dataset of batches """ dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024) if repeat: dataset = dataset.repeat() # depends on [control=['if'], data=[]] keys = ['targets'] if text2self else ['inputs', 'targets'] def decode_example(serialized_example): """Return a dict of Tensors from a serialized tensorflow.Example.""" data_fields = {} data_items_to_decoders = {} for k in keys: data_fields[k] = tf.VarLenFeature(tf.int64) data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k) # depends on [control=['for'], data=['k']] decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(data_fields, data_items_to_decoders) decode_items = list(sorted(data_items_to_decoders)) decoded = decoder.decode(serialized_example, items=decode_items) if not eos_included: decoded = [tf.concat([v, [1]], 0) for v in decoded] # depends on [control=['if'], data=[]] return dict(zip(decode_items, decoded)) dataset = dataset.map(decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE) return pack_and_batch(dataset, batch_size, sequence_length)
def _call_cli(self, command, cwd=None, universal_newlines=False, redirect_stderr=False): """ Executes the given command, internally using Popen. The output of stdout and stderr are returned as a tuple. The returned tuple looks like: (stdout, stderr, returncode) Parameters ---------- command: string The command to execute. cwd: string Change the working directory of the program to the specified path. universal_newlines: boolean Enable the universal_newlines feature of Popen. redirect_stderr: boolean If True, redirect stderr into stdout """ command = str(command.encode("utf-8").decode("ascii", "ignore")) env = os.environ.copy() env.update(self.envvars) stderr = STDOUT if redirect_stderr else PIPE proc = Popen(shlex.split(command), stdout=PIPE, stderr=stderr, cwd=cwd, universal_newlines=universal_newlines, env=env) stdout, stderr = proc.communicate() return (stdout, stderr, proc.returncode)
def function[_call_cli, parameter[self, command, cwd, universal_newlines, redirect_stderr]]: constant[ Executes the given command, internally using Popen. The output of stdout and stderr are returned as a tuple. The returned tuple looks like: (stdout, stderr, returncode) Parameters ---------- command: string The command to execute. cwd: string Change the working directory of the program to the specified path. universal_newlines: boolean Enable the universal_newlines feature of Popen. redirect_stderr: boolean If True, redirect stderr into stdout ] variable[command] assign[=] call[name[str], parameter[call[call[name[command].encode, parameter[constant[utf-8]]].decode, parameter[constant[ascii], constant[ignore]]]]] variable[env] assign[=] call[name[os].environ.copy, parameter[]] call[name[env].update, parameter[name[self].envvars]] variable[stderr] assign[=] <ast.IfExp object at 0x7da1b1a1e8c0> variable[proc] assign[=] call[name[Popen], parameter[call[name[shlex].split, parameter[name[command]]]]] <ast.Tuple object at 0x7da1b1a1dcc0> assign[=] call[name[proc].communicate, parameter[]] return[tuple[[<ast.Name object at 0x7da1b1a1f8b0>, <ast.Name object at 0x7da1b1a1df60>, <ast.Attribute object at 0x7da1b1a1e6e0>]]]
keyword[def] identifier[_call_cli] ( identifier[self] , identifier[command] , identifier[cwd] = keyword[None] , identifier[universal_newlines] = keyword[False] , identifier[redirect_stderr] = keyword[False] ): literal[string] identifier[command] = identifier[str] ( identifier[command] . identifier[encode] ( literal[string] ). identifier[decode] ( literal[string] , literal[string] )) identifier[env] = identifier[os] . identifier[environ] . identifier[copy] () identifier[env] . identifier[update] ( identifier[self] . identifier[envvars] ) identifier[stderr] = identifier[STDOUT] keyword[if] identifier[redirect_stderr] keyword[else] identifier[PIPE] identifier[proc] = identifier[Popen] ( identifier[shlex] . identifier[split] ( identifier[command] ), identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[stderr] , identifier[cwd] = identifier[cwd] , identifier[universal_newlines] = identifier[universal_newlines] , identifier[env] = identifier[env] ) identifier[stdout] , identifier[stderr] = identifier[proc] . identifier[communicate] () keyword[return] ( identifier[stdout] , identifier[stderr] , identifier[proc] . identifier[returncode] )
def _call_cli(self, command, cwd=None, universal_newlines=False, redirect_stderr=False): """ Executes the given command, internally using Popen. The output of stdout and stderr are returned as a tuple. The returned tuple looks like: (stdout, stderr, returncode) Parameters ---------- command: string The command to execute. cwd: string Change the working directory of the program to the specified path. universal_newlines: boolean Enable the universal_newlines feature of Popen. redirect_stderr: boolean If True, redirect stderr into stdout """ command = str(command.encode('utf-8').decode('ascii', 'ignore')) env = os.environ.copy() env.update(self.envvars) stderr = STDOUT if redirect_stderr else PIPE proc = Popen(shlex.split(command), stdout=PIPE, stderr=stderr, cwd=cwd, universal_newlines=universal_newlines, env=env) (stdout, stderr) = proc.communicate() return (stdout, stderr, proc.returncode)
def convert(self, value, param, ctx): """Return the appropriate integer value. If a non-integer is provided, attempt a name-based lookup and return the primary key. """ resource = tower_cli.get_resource(self.resource_name) # Ensure that None is passed through without trying to # do anything. if value is None: return None # If we were already given an integer, do nothing. # This ensures that the convert method is idempotent. if isinstance(value, int): return value # Do we have a string that contains only digits? # If so, then convert it to an integer and return it. if re.match(r'^[\d]+$', value): return int(value) # Special case to allow disassociations if value == 'null': return value # Okay, we have a string. Try to do a name-based lookup on the # resource, and return back the ID that we get from that. # # This has the chance of erroring out, which is fine. try: debug.log('The %s field is given as a name; ' 'looking it up.' % param.name, header='details') lookup_data = {resource.identity[-1]: value} rel = resource.get(**lookup_data) except exc.MultipleResults: raise exc.MultipleRelatedError( 'Cannot look up {0} exclusively by name, because multiple {0} ' 'objects exist with that name.\n' 'Please send an ID. You can get the ID for the {0} you want ' 'with:\n' ' tower-cli {0} list --name "{1}"'.format(self.resource_name, value), ) except exc.TowerCLIError as ex: raise exc.RelatedError('Could not get %s. %s' % (self.resource_name, str(ex))) # Done! Return the ID. return rel['id']
def function[convert, parameter[self, value, param, ctx]]: constant[Return the appropriate integer value. If a non-integer is provided, attempt a name-based lookup and return the primary key. ] variable[resource] assign[=] call[name[tower_cli].get_resource, parameter[name[self].resource_name]] if compare[name[value] is constant[None]] begin[:] return[constant[None]] if call[name[isinstance], parameter[name[value], name[int]]] begin[:] return[name[value]] if call[name[re].match, parameter[constant[^[\d]+$], name[value]]] begin[:] return[call[name[int], parameter[name[value]]]] if compare[name[value] equal[==] constant[null]] begin[:] return[name[value]] <ast.Try object at 0x7da20c795c30> return[call[name[rel]][constant[id]]]
keyword[def] identifier[convert] ( identifier[self] , identifier[value] , identifier[param] , identifier[ctx] ): literal[string] identifier[resource] = identifier[tower_cli] . identifier[get_resource] ( identifier[self] . identifier[resource_name] ) keyword[if] identifier[value] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[if] identifier[isinstance] ( identifier[value] , identifier[int] ): keyword[return] identifier[value] keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[value] ): keyword[return] identifier[int] ( identifier[value] ) keyword[if] identifier[value] == literal[string] : keyword[return] identifier[value] keyword[try] : identifier[debug] . identifier[log] ( literal[string] literal[string] % identifier[param] . identifier[name] , identifier[header] = literal[string] ) identifier[lookup_data] ={ identifier[resource] . identifier[identity] [- literal[int] ]: identifier[value] } identifier[rel] = identifier[resource] . identifier[get] (** identifier[lookup_data] ) keyword[except] identifier[exc] . identifier[MultipleResults] : keyword[raise] identifier[exc] . identifier[MultipleRelatedError] ( literal[string] literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[self] . identifier[resource_name] , identifier[value] ), ) keyword[except] identifier[exc] . identifier[TowerCLIError] keyword[as] identifier[ex] : keyword[raise] identifier[exc] . identifier[RelatedError] ( literal[string] % ( identifier[self] . identifier[resource_name] , identifier[str] ( identifier[ex] ))) keyword[return] identifier[rel] [ literal[string] ]
def convert(self, value, param, ctx): """Return the appropriate integer value. If a non-integer is provided, attempt a name-based lookup and return the primary key. """ resource = tower_cli.get_resource(self.resource_name) # Ensure that None is passed through without trying to # do anything. if value is None: return None # depends on [control=['if'], data=[]] # If we were already given an integer, do nothing. # This ensures that the convert method is idempotent. if isinstance(value, int): return value # depends on [control=['if'], data=[]] # Do we have a string that contains only digits? # If so, then convert it to an integer and return it. if re.match('^[\\d]+$', value): return int(value) # depends on [control=['if'], data=[]] # Special case to allow disassociations if value == 'null': return value # depends on [control=['if'], data=['value']] # Okay, we have a string. Try to do a name-based lookup on the # resource, and return back the ID that we get from that. # # This has the chance of erroring out, which is fine. try: debug.log('The %s field is given as a name; looking it up.' % param.name, header='details') lookup_data = {resource.identity[-1]: value} rel = resource.get(**lookup_data) # depends on [control=['try'], data=[]] except exc.MultipleResults: raise exc.MultipleRelatedError('Cannot look up {0} exclusively by name, because multiple {0} objects exist with that name.\nPlease send an ID. You can get the ID for the {0} you want with:\n tower-cli {0} list --name "{1}"'.format(self.resource_name, value)) # depends on [control=['except'], data=[]] except exc.TowerCLIError as ex: raise exc.RelatedError('Could not get %s. %s' % (self.resource_name, str(ex))) # depends on [control=['except'], data=['ex']] # Done! Return the ID. return rel['id']
def mb_filter(fastq, cores): ''' Filters umis with non-ACGT bases Expects formatted fastq files. ''' filter_mb = partial(umi_filter) p = multiprocessing.Pool(cores) chunks = tz.partition_all(10000, read_fastq(fastq)) bigchunks = tz.partition_all(cores, chunks) for bigchunk in bigchunks: for chunk in p.map(filter_mb, list(bigchunk)): for read in chunk: sys.stdout.write(read)
def function[mb_filter, parameter[fastq, cores]]: constant[ Filters umis with non-ACGT bases Expects formatted fastq files. ] variable[filter_mb] assign[=] call[name[partial], parameter[name[umi_filter]]] variable[p] assign[=] call[name[multiprocessing].Pool, parameter[name[cores]]] variable[chunks] assign[=] call[name[tz].partition_all, parameter[constant[10000], call[name[read_fastq], parameter[name[fastq]]]]] variable[bigchunks] assign[=] call[name[tz].partition_all, parameter[name[cores], name[chunks]]] for taget[name[bigchunk]] in starred[name[bigchunks]] begin[:] for taget[name[chunk]] in starred[call[name[p].map, parameter[name[filter_mb], call[name[list], parameter[name[bigchunk]]]]]] begin[:] for taget[name[read]] in starred[name[chunk]] begin[:] call[name[sys].stdout.write, parameter[name[read]]]
keyword[def] identifier[mb_filter] ( identifier[fastq] , identifier[cores] ): literal[string] identifier[filter_mb] = identifier[partial] ( identifier[umi_filter] ) identifier[p] = identifier[multiprocessing] . identifier[Pool] ( identifier[cores] ) identifier[chunks] = identifier[tz] . identifier[partition_all] ( literal[int] , identifier[read_fastq] ( identifier[fastq] )) identifier[bigchunks] = identifier[tz] . identifier[partition_all] ( identifier[cores] , identifier[chunks] ) keyword[for] identifier[bigchunk] keyword[in] identifier[bigchunks] : keyword[for] identifier[chunk] keyword[in] identifier[p] . identifier[map] ( identifier[filter_mb] , identifier[list] ( identifier[bigchunk] )): keyword[for] identifier[read] keyword[in] identifier[chunk] : identifier[sys] . identifier[stdout] . identifier[write] ( identifier[read] )
def mb_filter(fastq, cores): """ Filters umis with non-ACGT bases Expects formatted fastq files. """ filter_mb = partial(umi_filter) p = multiprocessing.Pool(cores) chunks = tz.partition_all(10000, read_fastq(fastq)) bigchunks = tz.partition_all(cores, chunks) for bigchunk in bigchunks: for chunk in p.map(filter_mb, list(bigchunk)): for read in chunk: sys.stdout.write(read) # depends on [control=['for'], data=['read']] # depends on [control=['for'], data=['chunk']] # depends on [control=['for'], data=['bigchunk']]
def comp_listing(request, directory_slug=None): """ Output the list of HTML templates and subdirectories in the COMPS_DIR """ context = {} working_dir = settings.COMPS_DIR if directory_slug: working_dir = os.path.join(working_dir, directory_slug) dirnames = [] templates = [] items = os.listdir(working_dir) templates = [x for x in items if os.path.splitext(x)[1] == '.html'] dirnames = [x for x in items if \ not os.path.isfile(os.path.join(working_dir, x))] templates.sort() dirnames.sort() context['directories'] = dirnames context['templates'] = templates context['subdirectory'] = directory_slug return render(request, "comps/comp_listing.html", context)
def function[comp_listing, parameter[request, directory_slug]]: constant[ Output the list of HTML templates and subdirectories in the COMPS_DIR ] variable[context] assign[=] dictionary[[], []] variable[working_dir] assign[=] name[settings].COMPS_DIR if name[directory_slug] begin[:] variable[working_dir] assign[=] call[name[os].path.join, parameter[name[working_dir], name[directory_slug]]] variable[dirnames] assign[=] list[[]] variable[templates] assign[=] list[[]] variable[items] assign[=] call[name[os].listdir, parameter[name[working_dir]]] variable[templates] assign[=] <ast.ListComp object at 0x7da18f09e830> variable[dirnames] assign[=] <ast.ListComp object at 0x7da20e9b15a0> call[name[templates].sort, parameter[]] call[name[dirnames].sort, parameter[]] call[name[context]][constant[directories]] assign[=] name[dirnames] call[name[context]][constant[templates]] assign[=] name[templates] call[name[context]][constant[subdirectory]] assign[=] name[directory_slug] return[call[name[render], parameter[name[request], constant[comps/comp_listing.html], name[context]]]]
keyword[def] identifier[comp_listing] ( identifier[request] , identifier[directory_slug] = keyword[None] ): literal[string] identifier[context] ={} identifier[working_dir] = identifier[settings] . identifier[COMPS_DIR] keyword[if] identifier[directory_slug] : identifier[working_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[working_dir] , identifier[directory_slug] ) identifier[dirnames] =[] identifier[templates] =[] identifier[items] = identifier[os] . identifier[listdir] ( identifier[working_dir] ) identifier[templates] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[items] keyword[if] identifier[os] . identifier[path] . identifier[splitext] ( identifier[x] )[ literal[int] ]== literal[string] ] identifier[dirnames] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[items] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[working_dir] , identifier[x] ))] identifier[templates] . identifier[sort] () identifier[dirnames] . identifier[sort] () identifier[context] [ literal[string] ]= identifier[dirnames] identifier[context] [ literal[string] ]= identifier[templates] identifier[context] [ literal[string] ]= identifier[directory_slug] keyword[return] identifier[render] ( identifier[request] , literal[string] , identifier[context] )
def comp_listing(request, directory_slug=None): """ Output the list of HTML templates and subdirectories in the COMPS_DIR """ context = {} working_dir = settings.COMPS_DIR if directory_slug: working_dir = os.path.join(working_dir, directory_slug) # depends on [control=['if'], data=[]] dirnames = [] templates = [] items = os.listdir(working_dir) templates = [x for x in items if os.path.splitext(x)[1] == '.html'] dirnames = [x for x in items if not os.path.isfile(os.path.join(working_dir, x))] templates.sort() dirnames.sort() context['directories'] = dirnames context['templates'] = templates context['subdirectory'] = directory_slug return render(request, 'comps/comp_listing.html', context)
def add_stats_plot(self): """Plots alignment stats as bargraph.""" keys = OrderedDict() keys['species_a'] = {'color': '#437bb1', 'name': 'Species a'} keys['species_b'] = {'color': '#b1084c', 'name': 'Species b'} keys['ambiguous'] = {'color': '#333333', 'name': 'Ambiguous'} plot_config = { 'id': "disambiguated_alignments", 'title': "Disambiguate: Alignment Counts", 'cpswitch_counts_label': "# Reads", 'ylab': "# Reads" } self.add_section( plot=bargraph.plot(self.data, keys, plot_config) )
def function[add_stats_plot, parameter[self]]: constant[Plots alignment stats as bargraph.] variable[keys] assign[=] call[name[OrderedDict], parameter[]] call[name[keys]][constant[species_a]] assign[=] dictionary[[<ast.Constant object at 0x7da18ede60e0>, <ast.Constant object at 0x7da18c4cc130>], [<ast.Constant object at 0x7da18c4cdab0>, <ast.Constant object at 0x7da18c4ccd00>]] call[name[keys]][constant[species_b]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cce80>, <ast.Constant object at 0x7da18c4cf4f0>], [<ast.Constant object at 0x7da18c4cf700>, <ast.Constant object at 0x7da18c4ccca0>]] call[name[keys]][constant[ambiguous]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf070>, <ast.Constant object at 0x7da18c4cc730>], [<ast.Constant object at 0x7da18c4cd750>, <ast.Constant object at 0x7da18c4cfdf0>]] variable[plot_config] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf340>, <ast.Constant object at 0x7da18c4cf8e0>, <ast.Constant object at 0x7da18c4cf250>, <ast.Constant object at 0x7da18c4ce260>], [<ast.Constant object at 0x7da18c4cfb50>, <ast.Constant object at 0x7da18c4ce770>, <ast.Constant object at 0x7da18c4cc2e0>, <ast.Constant object at 0x7da18c4cd2d0>]] call[name[self].add_section, parameter[]]
keyword[def] identifier[add_stats_plot] ( identifier[self] ): literal[string] identifier[keys] = identifier[OrderedDict] () identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[plot_config] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[self] . identifier[add_section] ( identifier[plot] = identifier[bargraph] . identifier[plot] ( identifier[self] . identifier[data] , identifier[keys] , identifier[plot_config] ) )
def add_stats_plot(self): """Plots alignment stats as bargraph.""" keys = OrderedDict() keys['species_a'] = {'color': '#437bb1', 'name': 'Species a'} keys['species_b'] = {'color': '#b1084c', 'name': 'Species b'} keys['ambiguous'] = {'color': '#333333', 'name': 'Ambiguous'} plot_config = {'id': 'disambiguated_alignments', 'title': 'Disambiguate: Alignment Counts', 'cpswitch_counts_label': '# Reads', 'ylab': '# Reads'} self.add_section(plot=bargraph.plot(self.data, keys, plot_config))
def get_identity(self): """ The assertion can contain zero or more attributeStatements """ ava = {} for _assertion in self.assertions: if _assertion.advice: if _assertion.advice.assertion: for tmp_assertion in _assertion.advice.assertion: if tmp_assertion.attribute_statement: assert len(tmp_assertion.attribute_statement) == 1 ava.update(self.read_attribute_statement( tmp_assertion.attribute_statement[0])) if _assertion.attribute_statement: logger.debug("Assertion contains %s attribute statement(s)", (len(self.assertion.attribute_statement))) for _attr_statem in _assertion.attribute_statement: logger.debug("Attribute Statement: %s" % (_attr_statem,)) ava.update(self.read_attribute_statement(_attr_statem)) if not ava: logger.debug("Assertion contains no attribute statements") return ava
def function[get_identity, parameter[self]]: constant[ The assertion can contain zero or more attributeStatements ] variable[ava] assign[=] dictionary[[], []] for taget[name[_assertion]] in starred[name[self].assertions] begin[:] if name[_assertion].advice begin[:] if name[_assertion].advice.assertion begin[:] for taget[name[tmp_assertion]] in starred[name[_assertion].advice.assertion] begin[:] if name[tmp_assertion].attribute_statement begin[:] assert[compare[call[name[len], parameter[name[tmp_assertion].attribute_statement]] equal[==] constant[1]]] call[name[ava].update, parameter[call[name[self].read_attribute_statement, parameter[call[name[tmp_assertion].attribute_statement][constant[0]]]]]] if name[_assertion].attribute_statement begin[:] call[name[logger].debug, parameter[constant[Assertion contains %s attribute statement(s)], call[name[len], parameter[name[self].assertion.attribute_statement]]]] for taget[name[_attr_statem]] in starred[name[_assertion].attribute_statement] begin[:] call[name[logger].debug, parameter[binary_operation[constant[Attribute Statement: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204961e10>]]]]] call[name[ava].update, parameter[call[name[self].read_attribute_statement, parameter[name[_attr_statem]]]]] if <ast.UnaryOp object at 0x7da2049621a0> begin[:] call[name[logger].debug, parameter[constant[Assertion contains no attribute statements]]] return[name[ava]]
keyword[def] identifier[get_identity] ( identifier[self] ): literal[string] identifier[ava] ={} keyword[for] identifier[_assertion] keyword[in] identifier[self] . identifier[assertions] : keyword[if] identifier[_assertion] . identifier[advice] : keyword[if] identifier[_assertion] . identifier[advice] . identifier[assertion] : keyword[for] identifier[tmp_assertion] keyword[in] identifier[_assertion] . identifier[advice] . identifier[assertion] : keyword[if] identifier[tmp_assertion] . identifier[attribute_statement] : keyword[assert] identifier[len] ( identifier[tmp_assertion] . identifier[attribute_statement] )== literal[int] identifier[ava] . identifier[update] ( identifier[self] . identifier[read_attribute_statement] ( identifier[tmp_assertion] . identifier[attribute_statement] [ literal[int] ])) keyword[if] identifier[_assertion] . identifier[attribute_statement] : identifier[logger] . identifier[debug] ( literal[string] , ( identifier[len] ( identifier[self] . identifier[assertion] . identifier[attribute_statement] ))) keyword[for] identifier[_attr_statem] keyword[in] identifier[_assertion] . identifier[attribute_statement] : identifier[logger] . identifier[debug] ( literal[string] %( identifier[_attr_statem] ,)) identifier[ava] . identifier[update] ( identifier[self] . identifier[read_attribute_statement] ( identifier[_attr_statem] )) keyword[if] keyword[not] identifier[ava] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[ava]
def get_identity(self): """ The assertion can contain zero or more attributeStatements """ ava = {} for _assertion in self.assertions: if _assertion.advice: if _assertion.advice.assertion: for tmp_assertion in _assertion.advice.assertion: if tmp_assertion.attribute_statement: assert len(tmp_assertion.attribute_statement) == 1 ava.update(self.read_attribute_statement(tmp_assertion.attribute_statement[0])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tmp_assertion']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if _assertion.attribute_statement: logger.debug('Assertion contains %s attribute statement(s)', len(self.assertion.attribute_statement)) for _attr_statem in _assertion.attribute_statement: logger.debug('Attribute Statement: %s' % (_attr_statem,)) ava.update(self.read_attribute_statement(_attr_statem)) # depends on [control=['for'], data=['_attr_statem']] # depends on [control=['if'], data=[]] if not ava: logger.debug('Assertion contains no attribute statements') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_assertion']] return ava
def setbpf(self, bpf): """Set number of bits per float output""" self._bpf = min(bpf, self.BPF) self._rng_n = int((self._bpf + self.RNG_RANGE_BITS - 1) / self.RNG_RANGE_BITS)
def function[setbpf, parameter[self, bpf]]: constant[Set number of bits per float output] name[self]._bpf assign[=] call[name[min], parameter[name[bpf], name[self].BPF]] name[self]._rng_n assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[name[self]._bpf + name[self].RNG_RANGE_BITS] - constant[1]] / name[self].RNG_RANGE_BITS]]]
keyword[def] identifier[setbpf] ( identifier[self] , identifier[bpf] ): literal[string] identifier[self] . identifier[_bpf] = identifier[min] ( identifier[bpf] , identifier[self] . identifier[BPF] ) identifier[self] . identifier[_rng_n] = identifier[int] (( identifier[self] . identifier[_bpf] + identifier[self] . identifier[RNG_RANGE_BITS] - literal[int] )/ identifier[self] . identifier[RNG_RANGE_BITS] )
def setbpf(self, bpf): """Set number of bits per float output""" self._bpf = min(bpf, self.BPF) self._rng_n = int((self._bpf + self.RNG_RANGE_BITS - 1) / self.RNG_RANGE_BITS)
def on(self, event, handler): """Attaches the handler to the specified event. @param event: event to attach the handler to. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param handler: event handler. @return: self, so calls like this can be chained together. """ event_hook = self.get_or_create(event) event_hook.subscribe(handler) return self
def function[on, parameter[self, event, handler]]: constant[Attaches the handler to the specified event. @param event: event to attach the handler to. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param handler: event handler. @return: self, so calls like this can be chained together. ] variable[event_hook] assign[=] call[name[self].get_or_create, parameter[name[event]]] call[name[event_hook].subscribe, parameter[name[handler]]] return[name[self]]
keyword[def] identifier[on] ( identifier[self] , identifier[event] , identifier[handler] ): literal[string] identifier[event_hook] = identifier[self] . identifier[get_or_create] ( identifier[event] ) identifier[event_hook] . identifier[subscribe] ( identifier[handler] ) keyword[return] identifier[self]
def on(self, event, handler): """Attaches the handler to the specified event. @param event: event to attach the handler to. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param handler: event handler. @return: self, so calls like this can be chained together. """ event_hook = self.get_or_create(event) event_hook.subscribe(handler) return self
def _get_resources_to_remove(resource, template): """ Given a resource and a template being removed, identify the resource attribtes which can be removed. """ type_ids = [tmpltype.id for tmpltype in template.templatetypes] node_attr_ids = dict([(ra.attr_id, ra) for ra in resource.attributes]) attrs_to_remove = [] attrs_to_keep = [] for nt in resource.types: if nt.templatetype.id in type_ids: for ta in nt.templatetype.typeattrs: if node_attr_ids.get(ta.attr_id): attrs_to_remove.append(node_attr_ids[ta.attr_id]) else: for ta in nt.templatetype.typeattrs: if node_attr_ids.get(ta.attr_id): attrs_to_keep.append(node_attr_ids[ta.attr_id]) #remove any of the attributes marked for deletion as they are #marked for keeping based on being in another type. final_attrs_to_remove = set(attrs_to_remove) - set(attrs_to_keep) return list(final_attrs_to_remove)
def function[_get_resources_to_remove, parameter[resource, template]]: constant[ Given a resource and a template being removed, identify the resource attribtes which can be removed. ] variable[type_ids] assign[=] <ast.ListComp object at 0x7da18f811210> variable[node_attr_ids] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18f812a40>]] variable[attrs_to_remove] assign[=] list[[]] variable[attrs_to_keep] assign[=] list[[]] for taget[name[nt]] in starred[name[resource].types] begin[:] if compare[name[nt].templatetype.id in name[type_ids]] begin[:] for taget[name[ta]] in starred[name[nt].templatetype.typeattrs] begin[:] if call[name[node_attr_ids].get, parameter[name[ta].attr_id]] begin[:] call[name[attrs_to_remove].append, parameter[call[name[node_attr_ids]][name[ta].attr_id]]] variable[final_attrs_to_remove] assign[=] binary_operation[call[name[set], parameter[name[attrs_to_remove]]] - call[name[set], parameter[name[attrs_to_keep]]]] return[call[name[list], parameter[name[final_attrs_to_remove]]]]
keyword[def] identifier[_get_resources_to_remove] ( identifier[resource] , identifier[template] ): literal[string] identifier[type_ids] =[ identifier[tmpltype] . identifier[id] keyword[for] identifier[tmpltype] keyword[in] identifier[template] . identifier[templatetypes] ] identifier[node_attr_ids] = identifier[dict] ([( identifier[ra] . identifier[attr_id] , identifier[ra] ) keyword[for] identifier[ra] keyword[in] identifier[resource] . identifier[attributes] ]) identifier[attrs_to_remove] =[] identifier[attrs_to_keep] =[] keyword[for] identifier[nt] keyword[in] identifier[resource] . identifier[types] : keyword[if] identifier[nt] . identifier[templatetype] . identifier[id] keyword[in] identifier[type_ids] : keyword[for] identifier[ta] keyword[in] identifier[nt] . identifier[templatetype] . identifier[typeattrs] : keyword[if] identifier[node_attr_ids] . identifier[get] ( identifier[ta] . identifier[attr_id] ): identifier[attrs_to_remove] . identifier[append] ( identifier[node_attr_ids] [ identifier[ta] . identifier[attr_id] ]) keyword[else] : keyword[for] identifier[ta] keyword[in] identifier[nt] . identifier[templatetype] . identifier[typeattrs] : keyword[if] identifier[node_attr_ids] . identifier[get] ( identifier[ta] . identifier[attr_id] ): identifier[attrs_to_keep] . identifier[append] ( identifier[node_attr_ids] [ identifier[ta] . identifier[attr_id] ]) identifier[final_attrs_to_remove] = identifier[set] ( identifier[attrs_to_remove] )- identifier[set] ( identifier[attrs_to_keep] ) keyword[return] identifier[list] ( identifier[final_attrs_to_remove] )
def _get_resources_to_remove(resource, template): """ Given a resource and a template being removed, identify the resource attribtes which can be removed. """ type_ids = [tmpltype.id for tmpltype in template.templatetypes] node_attr_ids = dict([(ra.attr_id, ra) for ra in resource.attributes]) attrs_to_remove = [] attrs_to_keep = [] for nt in resource.types: if nt.templatetype.id in type_ids: for ta in nt.templatetype.typeattrs: if node_attr_ids.get(ta.attr_id): attrs_to_remove.append(node_attr_ids[ta.attr_id]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ta']] # depends on [control=['if'], data=[]] else: for ta in nt.templatetype.typeattrs: if node_attr_ids.get(ta.attr_id): attrs_to_keep.append(node_attr_ids[ta.attr_id]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ta']] # depends on [control=['for'], data=['nt']] #remove any of the attributes marked for deletion as they are #marked for keeping based on being in another type. final_attrs_to_remove = set(attrs_to_remove) - set(attrs_to_keep) return list(final_attrs_to_remove)
def list_attached_partitions(self, name=None, status=None): """ Return the partitions to which this storage group is currently attached, optionally filtered by partition name and status. Authorization requirements: * Object-access permission to this storage group. * Task permission to the "Configure Storage - System Programmer" task. Parameters: name (:term:`string`): Filter pattern (regular expression) to limit returned partitions to those that have a matching name. If `None`, no filtering for the partition name takes place. status (:term:`string`): Filter string to limit returned partitions to those that have a matching status. The value must be a valid partition status property value. If `None`, no filtering for the partition status takes place. Returns: List of :class:`~zhmcclient.Partition` objects representing the partitions to whivch this storage group is currently attached, with a minimal set of properties ('object-id', 'name', 'status'). Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms = [] if name is not None: self.manager._append_query_parms(query_parms, 'name', name) if status is not None: self.manager._append_query_parms(query_parms, 'status', status) query_parms_str = '&'.join(query_parms) if query_parms_str: query_parms_str = '?{}'.format(query_parms_str) uri = '{}/operations/get-partitions{}'.format( self.uri, query_parms_str) sg_cpc = self.cpc part_mgr = sg_cpc.partitions result = self.manager.session.get(uri) props_list = result['partitions'] part_list = [] for props in props_list: part = part_mgr.resource_object(props['object-uri'], props) part_list.append(part) return part_list
def function[list_attached_partitions, parameter[self, name, status]]: constant[ Return the partitions to which this storage group is currently attached, optionally filtered by partition name and status. Authorization requirements: * Object-access permission to this storage group. * Task permission to the "Configure Storage - System Programmer" task. Parameters: name (:term:`string`): Filter pattern (regular expression) to limit returned partitions to those that have a matching name. If `None`, no filtering for the partition name takes place. status (:term:`string`): Filter string to limit returned partitions to those that have a matching status. The value must be a valid partition status property value. If `None`, no filtering for the partition status takes place. Returns: List of :class:`~zhmcclient.Partition` objects representing the partitions to whivch this storage group is currently attached, with a minimal set of properties ('object-id', 'name', 'status'). Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` ] variable[query_parms] assign[=] list[[]] if compare[name[name] is_not constant[None]] begin[:] call[name[self].manager._append_query_parms, parameter[name[query_parms], constant[name], name[name]]] if compare[name[status] is_not constant[None]] begin[:] call[name[self].manager._append_query_parms, parameter[name[query_parms], constant[status], name[status]]] variable[query_parms_str] assign[=] call[constant[&].join, parameter[name[query_parms]]] if name[query_parms_str] begin[:] variable[query_parms_str] assign[=] call[constant[?{}].format, parameter[name[query_parms_str]]] variable[uri] assign[=] call[constant[{}/operations/get-partitions{}].format, parameter[name[self].uri, name[query_parms_str]]] variable[sg_cpc] assign[=] name[self].cpc variable[part_mgr] assign[=] name[sg_cpc].partitions variable[result] assign[=] call[name[self].manager.session.get, parameter[name[uri]]] variable[props_list] assign[=] call[name[result]][constant[partitions]] variable[part_list] assign[=] list[[]] for taget[name[props]] in starred[name[props_list]] begin[:] variable[part] assign[=] call[name[part_mgr].resource_object, parameter[call[name[props]][constant[object-uri]], name[props]]] call[name[part_list].append, parameter[name[part]]] return[name[part_list]]
keyword[def] identifier[list_attached_partitions] ( identifier[self] , identifier[name] = keyword[None] , identifier[status] = keyword[None] ): literal[string] identifier[query_parms] =[] keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[manager] . identifier[_append_query_parms] ( identifier[query_parms] , literal[string] , identifier[name] ) keyword[if] identifier[status] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[manager] . identifier[_append_query_parms] ( identifier[query_parms] , literal[string] , identifier[status] ) identifier[query_parms_str] = literal[string] . identifier[join] ( identifier[query_parms] ) keyword[if] identifier[query_parms_str] : identifier[query_parms_str] = literal[string] . identifier[format] ( identifier[query_parms_str] ) identifier[uri] = literal[string] . identifier[format] ( identifier[self] . identifier[uri] , identifier[query_parms_str] ) identifier[sg_cpc] = identifier[self] . identifier[cpc] identifier[part_mgr] = identifier[sg_cpc] . identifier[partitions] identifier[result] = identifier[self] . identifier[manager] . identifier[session] . identifier[get] ( identifier[uri] ) identifier[props_list] = identifier[result] [ literal[string] ] identifier[part_list] =[] keyword[for] identifier[props] keyword[in] identifier[props_list] : identifier[part] = identifier[part_mgr] . identifier[resource_object] ( identifier[props] [ literal[string] ], identifier[props] ) identifier[part_list] . identifier[append] ( identifier[part] ) keyword[return] identifier[part_list]
def list_attached_partitions(self, name=None, status=None): """ Return the partitions to which this storage group is currently attached, optionally filtered by partition name and status. Authorization requirements: * Object-access permission to this storage group. * Task permission to the "Configure Storage - System Programmer" task. Parameters: name (:term:`string`): Filter pattern (regular expression) to limit returned partitions to those that have a matching name. If `None`, no filtering for the partition name takes place. status (:term:`string`): Filter string to limit returned partitions to those that have a matching status. The value must be a valid partition status property value. If `None`, no filtering for the partition status takes place. Returns: List of :class:`~zhmcclient.Partition` objects representing the partitions to whivch this storage group is currently attached, with a minimal set of properties ('object-id', 'name', 'status'). Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms = [] if name is not None: self.manager._append_query_parms(query_parms, 'name', name) # depends on [control=['if'], data=['name']] if status is not None: self.manager._append_query_parms(query_parms, 'status', status) # depends on [control=['if'], data=['status']] query_parms_str = '&'.join(query_parms) if query_parms_str: query_parms_str = '?{}'.format(query_parms_str) # depends on [control=['if'], data=[]] uri = '{}/operations/get-partitions{}'.format(self.uri, query_parms_str) sg_cpc = self.cpc part_mgr = sg_cpc.partitions result = self.manager.session.get(uri) props_list = result['partitions'] part_list = [] for props in props_list: part = part_mgr.resource_object(props['object-uri'], props) part_list.append(part) # depends on [control=['for'], data=['props']] return part_list
def update_lux(self, extend=0): """ Communicates with the TSL2550D light sensor and returns a lux value. Note that this method contains approximately 1 second of total delay. This delay is necessary in order to obtain full resolution compensated lux values. Alternatively, the device could be put in extended mode, which drops some resolution in favor of shorter delays. """ DEVICE_REG_OUT = 0x1d LUX_PWR_ON = 0x03 if extend == 1: LUX_MODE = 0x1d delay = .08 scale = 5 else: LUX_MODE = 0x18 delay = .4 scale = 1 LUX_READ_CH0 = 0x43 LUX_READ_CH1 = 0x83 # Select correct I2C mux channel on TCA module TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan) # Make sure lux sensor is powered up. SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON) lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON) # Check for successful powerup if (lux_on == LUX_PWR_ON): # Send command to initiate ADC on each channel # Read each channel after the new data is ready SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE) SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0) sleep(delay) adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1) sleep(delay) adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode ratio = count1 / (count0 - count1) lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2)) self.light_ratio = float(count1)/float(count0) print("Light ratio Ch1/Ch0: ", self.light_ratio) self.lux = round(lux, 3) return TCA_select(SensorCluster.bus, self.mux_addr, "off") else: raise SensorError("The lux sensor is powered down.")
def function[update_lux, parameter[self, extend]]: constant[ Communicates with the TSL2550D light sensor and returns a lux value. Note that this method contains approximately 1 second of total delay. This delay is necessary in order to obtain full resolution compensated lux values. Alternatively, the device could be put in extended mode, which drops some resolution in favor of shorter delays. ] variable[DEVICE_REG_OUT] assign[=] constant[29] variable[LUX_PWR_ON] assign[=] constant[3] if compare[name[extend] equal[==] constant[1]] begin[:] variable[LUX_MODE] assign[=] constant[29] variable[delay] assign[=] constant[0.08] variable[scale] assign[=] constant[5] variable[LUX_READ_CH0] assign[=] constant[67] variable[LUX_READ_CH1] assign[=] constant[131] call[name[TCA_select], parameter[name[SensorCluster].bus, name[self].mux_addr, name[SensorCluster].lux_chan]] call[name[SensorCluster].bus.write_byte, parameter[name[SensorCluster].lux_addr, name[LUX_PWR_ON]]] variable[lux_on] assign[=] call[name[SensorCluster].bus.read_byte_data, parameter[name[SensorCluster].lux_addr, name[LUX_PWR_ON]]] if compare[name[lux_on] equal[==] name[LUX_PWR_ON]] begin[:] call[name[SensorCluster].bus.write_byte, parameter[name[SensorCluster].lux_addr, name[LUX_MODE]]] call[name[SensorCluster].bus.write_byte, parameter[name[SensorCluster].lux_addr, name[LUX_READ_CH0]]] call[name[sleep], parameter[name[delay]]] variable[adc_ch0] assign[=] call[name[SensorCluster].bus.read_byte, parameter[name[SensorCluster].lux_addr]] variable[count0] assign[=] binary_operation[call[name[get_lux_count], parameter[name[adc_ch0]]] * name[scale]] call[name[SensorCluster].bus.write_byte, parameter[name[SensorCluster].lux_addr, name[LUX_READ_CH1]]] call[name[sleep], parameter[name[delay]]] variable[adc_ch1] assign[=] call[name[SensorCluster].bus.read_byte, parameter[name[SensorCluster].lux_addr]] variable[count1] assign[=] binary_operation[call[name[get_lux_count], parameter[name[adc_ch1]]] * name[scale]] variable[ratio] assign[=] binary_operation[name[count1] / binary_operation[name[count0] - name[count1]]] variable[lux] assign[=] binary_operation[binary_operation[binary_operation[name[count0] - name[count1]] * constant[0.39]] * binary_operation[name[e] ** binary_operation[<ast.UnaryOp object at 0x7da204345f30> * binary_operation[name[ratio] ** constant[2]]]]] name[self].light_ratio assign[=] binary_operation[call[name[float], parameter[name[count1]]] / call[name[float], parameter[name[count0]]]] call[name[print], parameter[constant[Light ratio Ch1/Ch0: ], name[self].light_ratio]] name[self].lux assign[=] call[name[round], parameter[name[lux], constant[3]]] return[call[name[TCA_select], parameter[name[SensorCluster].bus, name[self].mux_addr, constant[off]]]]
keyword[def] identifier[update_lux] ( identifier[self] , identifier[extend] = literal[int] ): literal[string] identifier[DEVICE_REG_OUT] = literal[int] identifier[LUX_PWR_ON] = literal[int] keyword[if] identifier[extend] == literal[int] : identifier[LUX_MODE] = literal[int] identifier[delay] = literal[int] identifier[scale] = literal[int] keyword[else] : identifier[LUX_MODE] = literal[int] identifier[delay] = literal[int] identifier[scale] = literal[int] identifier[LUX_READ_CH0] = literal[int] identifier[LUX_READ_CH1] = literal[int] identifier[TCA_select] ( identifier[SensorCluster] . identifier[bus] , identifier[self] . identifier[mux_addr] , identifier[SensorCluster] . identifier[lux_chan] ) identifier[SensorCluster] . identifier[bus] . identifier[write_byte] ( identifier[SensorCluster] . identifier[lux_addr] , identifier[LUX_PWR_ON] ) identifier[lux_on] = identifier[SensorCluster] . identifier[bus] . identifier[read_byte_data] ( identifier[SensorCluster] . identifier[lux_addr] , identifier[LUX_PWR_ON] ) keyword[if] ( identifier[lux_on] == identifier[LUX_PWR_ON] ): identifier[SensorCluster] . identifier[bus] . identifier[write_byte] ( identifier[SensorCluster] . identifier[lux_addr] , identifier[LUX_MODE] ) identifier[SensorCluster] . identifier[bus] . identifier[write_byte] ( identifier[SensorCluster] . identifier[lux_addr] , identifier[LUX_READ_CH0] ) identifier[sleep] ( identifier[delay] ) identifier[adc_ch0] = identifier[SensorCluster] . identifier[bus] . identifier[read_byte] ( identifier[SensorCluster] . identifier[lux_addr] ) identifier[count0] = identifier[get_lux_count] ( identifier[adc_ch0] )* identifier[scale] identifier[SensorCluster] . identifier[bus] . identifier[write_byte] ( identifier[SensorCluster] . identifier[lux_addr] , identifier[LUX_READ_CH1] ) identifier[sleep] ( identifier[delay] ) identifier[adc_ch1] = identifier[SensorCluster] . identifier[bus] . identifier[read_byte] ( identifier[SensorCluster] . identifier[lux_addr] ) identifier[count1] = identifier[get_lux_count] ( identifier[adc_ch1] )* identifier[scale] identifier[ratio] = identifier[count1] /( identifier[count0] - identifier[count1] ) identifier[lux] =( identifier[count0] - identifier[count1] )* literal[int] * identifier[e] **(- literal[int] *( identifier[ratio] ** literal[int] )) identifier[self] . identifier[light_ratio] = identifier[float] ( identifier[count1] )/ identifier[float] ( identifier[count0] ) identifier[print] ( literal[string] , identifier[self] . identifier[light_ratio] ) identifier[self] . identifier[lux] = identifier[round] ( identifier[lux] , literal[int] ) keyword[return] identifier[TCA_select] ( identifier[SensorCluster] . identifier[bus] , identifier[self] . identifier[mux_addr] , literal[string] ) keyword[else] : keyword[raise] identifier[SensorError] ( literal[string] )
def update_lux(self, extend=0): """ Communicates with the TSL2550D light sensor and returns a lux value. Note that this method contains approximately 1 second of total delay. This delay is necessary in order to obtain full resolution compensated lux values. Alternatively, the device could be put in extended mode, which drops some resolution in favor of shorter delays. """ DEVICE_REG_OUT = 29 LUX_PWR_ON = 3 if extend == 1: LUX_MODE = 29 delay = 0.08 scale = 5 # depends on [control=['if'], data=[]] else: LUX_MODE = 24 delay = 0.4 scale = 1 LUX_READ_CH0 = 67 LUX_READ_CH1 = 131 # Select correct I2C mux channel on TCA module TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan) # Make sure lux sensor is powered up. SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON) lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON) # Check for successful powerup if lux_on == LUX_PWR_ON: # Send command to initiate ADC on each channel # Read each channel after the new data is ready SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE) SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0) sleep(delay) adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1) sleep(delay) adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode ratio = count1 / (count0 - count1) lux = (count0 - count1) * 0.39 * e ** (-0.181 * ratio ** 2) self.light_ratio = float(count1) / float(count0) print('Light ratio Ch1/Ch0: ', self.light_ratio) self.lux = round(lux, 3) return TCA_select(SensorCluster.bus, self.mux_addr, 'off') # depends on [control=['if'], data=[]] else: raise SensorError('The lux sensor is powered down.')
def _VarintBytes(value): """Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast.""" pieces = [] _EncodeVarint(pieces.append, value) return b"".join(pieces)
def function[_VarintBytes, parameter[value]]: constant[Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast.] variable[pieces] assign[=] list[[]] call[name[_EncodeVarint], parameter[name[pieces].append, name[value]]] return[call[constant[b''].join, parameter[name[pieces]]]]
keyword[def] identifier[_VarintBytes] ( identifier[value] ): literal[string] identifier[pieces] =[] identifier[_EncodeVarint] ( identifier[pieces] . identifier[append] , identifier[value] ) keyword[return] literal[string] . identifier[join] ( identifier[pieces] )
def _VarintBytes(value): """Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast.""" pieces = [] _EncodeVarint(pieces.append, value) return b''.join(pieces)
def create_config(allow_insecure_config_file=False): """ Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF) :param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions. :return: Config with the configuration to use for DDSClient. """ config = Config() config.add_properties(GLOBAL_CONFIG_FILENAME) user_config_filename = get_user_config_filename() if user_config_filename == LOCAL_CONFIG_FILENAME and not allow_insecure_config_file: verify_file_private(user_config_filename) config.add_properties(user_config_filename) return config
def function[create_config, parameter[allow_insecure_config_file]]: constant[ Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF) :param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions. :return: Config with the configuration to use for DDSClient. ] variable[config] assign[=] call[name[Config], parameter[]] call[name[config].add_properties, parameter[name[GLOBAL_CONFIG_FILENAME]]] variable[user_config_filename] assign[=] call[name[get_user_config_filename], parameter[]] if <ast.BoolOp object at 0x7da18ede5e70> begin[:] call[name[verify_file_private], parameter[name[user_config_filename]]] call[name[config].add_properties, parameter[name[user_config_filename]]] return[name[config]]
keyword[def] identifier[create_config] ( identifier[allow_insecure_config_file] = keyword[False] ): literal[string] identifier[config] = identifier[Config] () identifier[config] . identifier[add_properties] ( identifier[GLOBAL_CONFIG_FILENAME] ) identifier[user_config_filename] = identifier[get_user_config_filename] () keyword[if] identifier[user_config_filename] == identifier[LOCAL_CONFIG_FILENAME] keyword[and] keyword[not] identifier[allow_insecure_config_file] : identifier[verify_file_private] ( identifier[user_config_filename] ) identifier[config] . identifier[add_properties] ( identifier[user_config_filename] ) keyword[return] identifier[config]
def create_config(allow_insecure_config_file=False): """ Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF) :param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions. :return: Config with the configuration to use for DDSClient. """ config = Config() config.add_properties(GLOBAL_CONFIG_FILENAME) user_config_filename = get_user_config_filename() if user_config_filename == LOCAL_CONFIG_FILENAME and (not allow_insecure_config_file): verify_file_private(user_config_filename) # depends on [control=['if'], data=[]] config.add_properties(user_config_filename) return config
async def sysinfo(dev: Device): """Print out system information (version, MAC addrs).""" click.echo(await dev.get_system_info()) click.echo(await dev.get_interface_information())
<ast.AsyncFunctionDef object at 0x7da20cabf730>
keyword[async] keyword[def] identifier[sysinfo] ( identifier[dev] : identifier[Device] ): literal[string] identifier[click] . identifier[echo] ( keyword[await] identifier[dev] . identifier[get_system_info] ()) identifier[click] . identifier[echo] ( keyword[await] identifier[dev] . identifier[get_interface_information] ())
async def sysinfo(dev: Device): """Print out system information (version, MAC addrs).""" click.echo(await dev.get_system_info()) click.echo(await dev.get_interface_information())
def v1_tag_list(tags, tag=''): '''List all direct children tags of the given parent. If no parent is specified, then list all top-level tags. The JSON returned for ``/dossier/v1/tags/list/foo/bar`` might look like this: .. code-block:: python { 'children': [ {'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'}, ] } ''' tag = tag.decode('utf-8').strip() return {'children': tags.list(tag)}
def function[v1_tag_list, parameter[tags, tag]]: constant[List all direct children tags of the given parent. If no parent is specified, then list all top-level tags. The JSON returned for ``/dossier/v1/tags/list/foo/bar`` might look like this: .. code-block:: python { 'children': [ {'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'}, ] } ] variable[tag] assign[=] call[call[name[tag].decode, parameter[constant[utf-8]]].strip, parameter[]] return[dictionary[[<ast.Constant object at 0x7da1b1463be0>], [<ast.Call object at 0x7da1b1461540>]]]
keyword[def] identifier[v1_tag_list] ( identifier[tags] , identifier[tag] = literal[string] ): literal[string] identifier[tag] = identifier[tag] . identifier[decode] ( literal[string] ). identifier[strip] () keyword[return] { literal[string] : identifier[tags] . identifier[list] ( identifier[tag] )}
def v1_tag_list(tags, tag=''): """List all direct children tags of the given parent. If no parent is specified, then list all top-level tags. The JSON returned for ``/dossier/v1/tags/list/foo/bar`` might look like this: .. code-block:: python { 'children': [ {'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'}, ] } """ tag = tag.decode('utf-8').strip() return {'children': tags.list(tag)}
def coord(self, func:CoordFunc, *args, **kwargs)->'Image': "Equivalent to `image.flow = func(image.flow, image.size)`." self.flow = func(self.flow, *args, **kwargs) return self
def function[coord, parameter[self, func]]: constant[Equivalent to `image.flow = func(image.flow, image.size)`.] name[self].flow assign[=] call[name[func], parameter[name[self].flow, <ast.Starred object at 0x7da20e9b2560>]] return[name[self]]
keyword[def] identifier[coord] ( identifier[self] , identifier[func] : identifier[CoordFunc] ,* identifier[args] ,** identifier[kwargs] )-> literal[string] : literal[string] identifier[self] . identifier[flow] = identifier[func] ( identifier[self] . identifier[flow] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[self]
def coord(self, func: CoordFunc, *args, **kwargs) -> 'Image': """Equivalent to `image.flow = func(image.flow, image.size)`.""" self.flow = func(self.flow, *args, **kwargs) return self
def post_attachment(session, thread_id, attachments): """ Add a message to a thread """ files = [] filenames = [] for attachment in attachments: files.append(attachment['file']) filenames.append(attachment['filename']) message_data = { 'attachments[]': filenames, } # POST /api/messages/0.1/threads/{thread_id}/messages/ endpoint = 'threads/{}/messages'.format(thread_id) response = make_post_request(session, endpoint, form_data=message_data, files=files) json_data = response.json() if response.status_code == 200: return Message(json_data['result']) else: raise MessageNotCreatedException(message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
def function[post_attachment, parameter[session, thread_id, attachments]]: constant[ Add a message to a thread ] variable[files] assign[=] list[[]] variable[filenames] assign[=] list[[]] for taget[name[attachment]] in starred[name[attachments]] begin[:] call[name[files].append, parameter[call[name[attachment]][constant[file]]]] call[name[filenames].append, parameter[call[name[attachment]][constant[filename]]]] variable[message_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b00f7df0>], [<ast.Name object at 0x7da1b00f5a50>]] variable[endpoint] assign[=] call[constant[threads/{}/messages].format, parameter[name[thread_id]]] variable[response] assign[=] call[name[make_post_request], parameter[name[session], name[endpoint]]] variable[json_data] assign[=] call[name[response].json, parameter[]] if compare[name[response].status_code equal[==] constant[200]] begin[:] return[call[name[Message], parameter[call[name[json_data]][constant[result]]]]]
keyword[def] identifier[post_attachment] ( identifier[session] , identifier[thread_id] , identifier[attachments] ): literal[string] identifier[files] =[] identifier[filenames] =[] keyword[for] identifier[attachment] keyword[in] identifier[attachments] : identifier[files] . identifier[append] ( identifier[attachment] [ literal[string] ]) identifier[filenames] . identifier[append] ( identifier[attachment] [ literal[string] ]) identifier[message_data] ={ literal[string] : identifier[filenames] , } identifier[endpoint] = literal[string] . identifier[format] ( identifier[thread_id] ) identifier[response] = identifier[make_post_request] ( identifier[session] , identifier[endpoint] , identifier[form_data] = identifier[message_data] , identifier[files] = identifier[files] ) identifier[json_data] = identifier[response] . identifier[json] () keyword[if] identifier[response] . identifier[status_code] == literal[int] : keyword[return] identifier[Message] ( identifier[json_data] [ literal[string] ]) keyword[else] : keyword[raise] identifier[MessageNotCreatedException] ( identifier[message] = identifier[json_data] [ literal[string] ], identifier[error_code] = identifier[json_data] [ literal[string] ], identifier[request_id] = identifier[json_data] [ literal[string] ])
def post_attachment(session, thread_id, attachments): """ Add a message to a thread """ files = [] filenames = [] for attachment in attachments: files.append(attachment['file']) filenames.append(attachment['filename']) # depends on [control=['for'], data=['attachment']] message_data = {'attachments[]': filenames} # POST /api/messages/0.1/threads/{thread_id}/messages/ endpoint = 'threads/{}/messages'.format(thread_id) response = make_post_request(session, endpoint, form_data=message_data, files=files) json_data = response.json() if response.status_code == 200: return Message(json_data['result']) # depends on [control=['if'], data=[]] else: raise MessageNotCreatedException(message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
def _maybe_init_user(self): """Returns the ID for the current user, creating the row if needed.""" user_name = os.environ.get('USER', '') or os.environ.get('USERNAME', '') cursor = self._db.cursor() cursor.execute('SELECT user_id FROM Users WHERE user_name = ?', (user_name,)) row = cursor.fetchone() if row: return row[0] user_id = self._create_id() cursor.execute( """ INSERT INTO USERS (user_id, user_name, inserted_time) VALUES (?, ?, ?) """, (user_id, user_name, time.time())) return user_id
def function[_maybe_init_user, parameter[self]]: constant[Returns the ID for the current user, creating the row if needed.] variable[user_name] assign[=] <ast.BoolOp object at 0x7da1b1f99ba0> variable[cursor] assign[=] call[name[self]._db.cursor, parameter[]] call[name[cursor].execute, parameter[constant[SELECT user_id FROM Users WHERE user_name = ?], tuple[[<ast.Name object at 0x7da1b21e0fa0>]]]] variable[row] assign[=] call[name[cursor].fetchone, parameter[]] if name[row] begin[:] return[call[name[row]][constant[0]]] variable[user_id] assign[=] call[name[self]._create_id, parameter[]] call[name[cursor].execute, parameter[constant[ INSERT INTO USERS (user_id, user_name, inserted_time) VALUES (?, ?, ?) ], tuple[[<ast.Name object at 0x7da1b2168fa0>, <ast.Name object at 0x7da1b2168970>, <ast.Call object at 0x7da1b2168e20>]]]] return[name[user_id]]
keyword[def] identifier[_maybe_init_user] ( identifier[self] ): literal[string] identifier[user_name] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ) keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ) identifier[cursor] = identifier[self] . identifier[_db] . identifier[cursor] () identifier[cursor] . identifier[execute] ( literal[string] , ( identifier[user_name] ,)) identifier[row] = identifier[cursor] . identifier[fetchone] () keyword[if] identifier[row] : keyword[return] identifier[row] [ literal[int] ] identifier[user_id] = identifier[self] . identifier[_create_id] () identifier[cursor] . identifier[execute] ( literal[string] , ( identifier[user_id] , identifier[user_name] , identifier[time] . identifier[time] ())) keyword[return] identifier[user_id]
def _maybe_init_user(self): """Returns the ID for the current user, creating the row if needed.""" user_name = os.environ.get('USER', '') or os.environ.get('USERNAME', '') cursor = self._db.cursor() cursor.execute('SELECT user_id FROM Users WHERE user_name = ?', (user_name,)) row = cursor.fetchone() if row: return row[0] # depends on [control=['if'], data=[]] user_id = self._create_id() cursor.execute('\n INSERT INTO USERS (user_id, user_name, inserted_time)\n VALUES (?, ?, ?)\n ', (user_id, user_name, time.time())) return user_id
def parse_yaml_config(args): """Parse yaml config""" try: import yaml except ImportError: yaml = None yml = {} try: with open(args.coveralls_yaml, 'r') as fp: if not yaml: raise SystemExit('PyYAML is required for parsing configuration') yml = yaml.load(fp) except IOError: pass yml = yml or {} return yml
def function[parse_yaml_config, parameter[args]]: constant[Parse yaml config] <ast.Try object at 0x7da1b12aa440> variable[yml] assign[=] dictionary[[], []] <ast.Try object at 0x7da1b12a84f0> variable[yml] assign[=] <ast.BoolOp object at 0x7da1b12ab580> return[name[yml]]
keyword[def] identifier[parse_yaml_config] ( identifier[args] ): literal[string] keyword[try] : keyword[import] identifier[yaml] keyword[except] identifier[ImportError] : identifier[yaml] = keyword[None] identifier[yml] ={} keyword[try] : keyword[with] identifier[open] ( identifier[args] . identifier[coveralls_yaml] , literal[string] ) keyword[as] identifier[fp] : keyword[if] keyword[not] identifier[yaml] : keyword[raise] identifier[SystemExit] ( literal[string] ) identifier[yml] = identifier[yaml] . identifier[load] ( identifier[fp] ) keyword[except] identifier[IOError] : keyword[pass] identifier[yml] = identifier[yml] keyword[or] {} keyword[return] identifier[yml]
def parse_yaml_config(args): """Parse yaml config""" try: import yaml # depends on [control=['try'], data=[]] except ImportError: yaml = None # depends on [control=['except'], data=[]] yml = {} try: with open(args.coveralls_yaml, 'r') as fp: if not yaml: raise SystemExit('PyYAML is required for parsing configuration') # depends on [control=['if'], data=[]] yml = yaml.load(fp) # depends on [control=['with'], data=['fp']] # depends on [control=['try'], data=[]] except IOError: pass # depends on [control=['except'], data=[]] yml = yml or {} return yml
def country_code_for_valid_region(region_code): """Returns the country calling code for a specific region. For example, this would be 1 for the United States, and 64 for New Zealand. Assumes the region is already valid. Arguments: region_code -- The region that we want to get the country calling code for. Returns the country calling code for the region denoted by region_code. """ metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: raise Exception("Invalid region code %s" % region_code) return metadata.country_code
def function[country_code_for_valid_region, parameter[region_code]]: constant[Returns the country calling code for a specific region. For example, this would be 1 for the United States, and 64 for New Zealand. Assumes the region is already valid. Arguments: region_code -- The region that we want to get the country calling code for. Returns the country calling code for the region denoted by region_code. ] variable[metadata] assign[=] call[name[PhoneMetadata].metadata_for_region, parameter[call[name[region_code].upper, parameter[]], constant[None]]] if compare[name[metadata] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1906230> return[name[metadata].country_code]
keyword[def] identifier[country_code_for_valid_region] ( identifier[region_code] ): literal[string] identifier[metadata] = identifier[PhoneMetadata] . identifier[metadata_for_region] ( identifier[region_code] . identifier[upper] (), keyword[None] ) keyword[if] identifier[metadata] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] % identifier[region_code] ) keyword[return] identifier[metadata] . identifier[country_code]
def country_code_for_valid_region(region_code): """Returns the country calling code for a specific region. For example, this would be 1 for the United States, and 64 for New Zealand. Assumes the region is already valid. Arguments: region_code -- The region that we want to get the country calling code for. Returns the country calling code for the region denoted by region_code. """ metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: raise Exception('Invalid region code %s' % region_code) # depends on [control=['if'], data=[]] return metadata.country_code
def score(self, experiment_name): """ Used to mark the current user's experiment variant as "converted" e.g., "Suzy, who was shown the large button, just signed up." Conversions will *only* be marked for visitors who have been verified as humans (to avoid skewing reports with requests from bots and web crawlers). :param experiment_name the string name of the experiment """ if self._backend.get_variant(self.identity, experiment_name) and \ self.human is True: self._backend.mark_conversion( experiment_name, self._backend.get_variant(self.identity, experiment_name) )
def function[score, parameter[self, experiment_name]]: constant[ Used to mark the current user's experiment variant as "converted" e.g., "Suzy, who was shown the large button, just signed up." Conversions will *only* be marked for visitors who have been verified as humans (to avoid skewing reports with requests from bots and web crawlers). :param experiment_name the string name of the experiment ] if <ast.BoolOp object at 0x7da1b095e6b0> begin[:] call[name[self]._backend.mark_conversion, parameter[name[experiment_name], call[name[self]._backend.get_variant, parameter[name[self].identity, name[experiment_name]]]]]
keyword[def] identifier[score] ( identifier[self] , identifier[experiment_name] ): literal[string] keyword[if] identifier[self] . identifier[_backend] . identifier[get_variant] ( identifier[self] . identifier[identity] , identifier[experiment_name] ) keyword[and] identifier[self] . identifier[human] keyword[is] keyword[True] : identifier[self] . identifier[_backend] . identifier[mark_conversion] ( identifier[experiment_name] , identifier[self] . identifier[_backend] . identifier[get_variant] ( identifier[self] . identifier[identity] , identifier[experiment_name] ) )
def score(self, experiment_name): """ Used to mark the current user's experiment variant as "converted" e.g., "Suzy, who was shown the large button, just signed up." Conversions will *only* be marked for visitors who have been verified as humans (to avoid skewing reports with requests from bots and web crawlers). :param experiment_name the string name of the experiment """ if self._backend.get_variant(self.identity, experiment_name) and self.human is True: self._backend.mark_conversion(experiment_name, self._backend.get_variant(self.identity, experiment_name)) # depends on [control=['if'], data=[]]
def resume_runs(dirnames, t_output_every, t_upto, parallel=False): """Resume many models, and run. Parameters ---------- dirnames: list[str] List of output directory paths from which to resume. output_every: int see :class:`Runner`. t_upto: float Run each model until the time is equal to this parallel: bool Whether or not to run the models in parallel, using the Multiprocessing library. If `True`, the number of concurrent tasks will be equal to one less than the number of available cores detected. """ run_model_partial = partial(run_model, t_output_every, force_resume=True, t_upto=t_upto) run_func(run_model_partial, dirnames, parallel)
def function[resume_runs, parameter[dirnames, t_output_every, t_upto, parallel]]: constant[Resume many models, and run. Parameters ---------- dirnames: list[str] List of output directory paths from which to resume. output_every: int see :class:`Runner`. t_upto: float Run each model until the time is equal to this parallel: bool Whether or not to run the models in parallel, using the Multiprocessing library. If `True`, the number of concurrent tasks will be equal to one less than the number of available cores detected. ] variable[run_model_partial] assign[=] call[name[partial], parameter[name[run_model], name[t_output_every]]] call[name[run_func], parameter[name[run_model_partial], name[dirnames], name[parallel]]]
keyword[def] identifier[resume_runs] ( identifier[dirnames] , identifier[t_output_every] , identifier[t_upto] , identifier[parallel] = keyword[False] ): literal[string] identifier[run_model_partial] = identifier[partial] ( identifier[run_model] , identifier[t_output_every] , identifier[force_resume] = keyword[True] , identifier[t_upto] = identifier[t_upto] ) identifier[run_func] ( identifier[run_model_partial] , identifier[dirnames] , identifier[parallel] )
def resume_runs(dirnames, t_output_every, t_upto, parallel=False): """Resume many models, and run. Parameters ---------- dirnames: list[str] List of output directory paths from which to resume. output_every: int see :class:`Runner`. t_upto: float Run each model until the time is equal to this parallel: bool Whether or not to run the models in parallel, using the Multiprocessing library. If `True`, the number of concurrent tasks will be equal to one less than the number of available cores detected. """ run_model_partial = partial(run_model, t_output_every, force_resume=True, t_upto=t_upto) run_func(run_model_partial, dirnames, parallel)
def generate_kmers(seq, k=4): """Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string Not effiicent for large k and long strings. Doesn't form substrings that are shorter than k, only exactly k-mers Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing. jellyfish is a C implementation of k-mer counting If seq is a string generate a sequence of k-mer string If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings If seq is a sequence of sequences of strings generate a sequence of sequence of generators ... Default k = 4 because that's the length of a gene base-pair? >>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC')) 'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC' """ if isinstance(seq, basestring): for i in range(len(seq) - k + 1): yield seq[i:i + k] elif isinstance(seq, (int, float, Decimal)): for s in generate_kmers(str(seq)): yield s else: for s in seq: yield generate_kmers(s, k)
def function[generate_kmers, parameter[seq, k]]: constant[Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string Not effiicent for large k and long strings. Doesn't form substrings that are shorter than k, only exactly k-mers Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing. jellyfish is a C implementation of k-mer counting If seq is a string generate a sequence of k-mer string If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings If seq is a sequence of sequences of strings generate a sequence of sequence of generators ... Default k = 4 because that's the length of a gene base-pair? >>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC')) 'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC' ] if call[name[isinstance], parameter[name[seq], name[basestring]]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[binary_operation[binary_operation[call[name[len], parameter[name[seq]]] - name[k]] + constant[1]]]]] begin[:] <ast.Yield object at 0x7da18fe912a0>
keyword[def] identifier[generate_kmers] ( identifier[seq] , identifier[k] = literal[int] ): literal[string] keyword[if] identifier[isinstance] ( identifier[seq] , identifier[basestring] ): keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[seq] )- identifier[k] + literal[int] ): keyword[yield] identifier[seq] [ identifier[i] : identifier[i] + identifier[k] ] keyword[elif] identifier[isinstance] ( identifier[seq] ,( identifier[int] , identifier[float] , identifier[Decimal] )): keyword[for] identifier[s] keyword[in] identifier[generate_kmers] ( identifier[str] ( identifier[seq] )): keyword[yield] identifier[s] keyword[else] : keyword[for] identifier[s] keyword[in] identifier[seq] : keyword[yield] identifier[generate_kmers] ( identifier[s] , identifier[k] )
def generate_kmers(seq, k=4): """Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string Not effiicent for large k and long strings. Doesn't form substrings that are shorter than k, only exactly k-mers Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing. jellyfish is a C implementation of k-mer counting If seq is a string generate a sequence of k-mer string If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings If seq is a sequence of sequences of strings generate a sequence of sequence of generators ... Default k = 4 because that's the length of a gene base-pair? >>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC')) 'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC' """ if isinstance(seq, basestring): for i in range(len(seq) - k + 1): yield seq[i:i + k] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] elif isinstance(seq, (int, float, Decimal)): for s in generate_kmers(str(seq)): yield s # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] else: for s in seq: yield generate_kmers(s, k) # depends on [control=['for'], data=['s']]
def map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d, shape): """For a 1D array that was flattened from a 2D array of shape (rows, columns), map its values back to the \ original 2D array. The pixel coordinate origin is at the top left corner of the 2D array and goes right-wards and downwards, such that for an array of shape (3,3): - pixel 0 of the 1D array will correspond to index [0,0] of the 2D array. - pixel 1 of the 1D array will correspond to index [0,1] of the 2D array. - pixel 4 of the 1D array will correspond to index [1,0] of the 2D array. Parameters ---------- array_1d : ndarray The 1D array of values which are mapped to a 2D array. shape : (int, int) The shape of the 2D array which the pixels are defined on. Returns -------- ndarray A 2D array of values mapped from the 1D array with dimensions (shape). Examples -------- one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]]) array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0]) array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3), one_to_two=one_to_two) """ array_2d = np.zeros(shape) index = 0 for y in range(shape[0]): for x in range(shape[1]): array_2d[y, x] = array_1d[index] index += 1 return array_2d
def function[map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape, parameter[array_1d, shape]]: constant[For a 1D array that was flattened from a 2D array of shape (rows, columns), map its values back to the original 2D array. The pixel coordinate origin is at the top left corner of the 2D array and goes right-wards and downwards, such that for an array of shape (3,3): - pixel 0 of the 1D array will correspond to index [0,0] of the 2D array. - pixel 1 of the 1D array will correspond to index [0,1] of the 2D array. - pixel 4 of the 1D array will correspond to index [1,0] of the 2D array. Parameters ---------- array_1d : ndarray The 1D array of values which are mapped to a 2D array. shape : (int, int) The shape of the 2D array which the pixels are defined on. Returns -------- ndarray A 2D array of values mapped from the 1D array with dimensions (shape). Examples -------- one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]]) array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0]) array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3), one_to_two=one_to_two) ] variable[array_2d] assign[=] call[name[np].zeros, parameter[name[shape]]] variable[index] assign[=] constant[0] for taget[name[y]] in starred[call[name[range], parameter[call[name[shape]][constant[0]]]]] begin[:] for taget[name[x]] in starred[call[name[range], parameter[call[name[shape]][constant[1]]]]] begin[:] call[name[array_2d]][tuple[[<ast.Name object at 0x7da20cabcee0>, <ast.Name object at 0x7da20cabf190>]]] assign[=] call[name[array_1d]][name[index]] <ast.AugAssign object at 0x7da20cabd2a0> return[name[array_2d]]
keyword[def] identifier[map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape] ( identifier[array_1d] , identifier[shape] ): literal[string] identifier[array_2d] = identifier[np] . identifier[zeros] ( identifier[shape] ) identifier[index] = literal[int] keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[shape] [ literal[int] ]): keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[shape] [ literal[int] ]): identifier[array_2d] [ identifier[y] , identifier[x] ]= identifier[array_1d] [ identifier[index] ] identifier[index] += literal[int] keyword[return] identifier[array_2d]
def map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d, shape): """For a 1D array that was flattened from a 2D array of shape (rows, columns), map its values back to the original 2D array. The pixel coordinate origin is at the top left corner of the 2D array and goes right-wards and downwards, such that for an array of shape (3,3): - pixel 0 of the 1D array will correspond to index [0,0] of the 2D array. - pixel 1 of the 1D array will correspond to index [0,1] of the 2D array. - pixel 4 of the 1D array will correspond to index [1,0] of the 2D array. Parameters ---------- array_1d : ndarray The 1D array of values which are mapped to a 2D array. shape : (int, int) The shape of the 2D array which the pixels are defined on. Returns -------- ndarray A 2D array of values mapped from the 1D array with dimensions (shape). Examples -------- one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]]) array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0]) array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3), one_to_two=one_to_two) """ array_2d = np.zeros(shape) index = 0 for y in range(shape[0]): for x in range(shape[1]): array_2d[y, x] = array_1d[index] index += 1 # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']] return array_2d
def unregister(self, event): """ Remove all registered handlers for an event. Silent return when event was not registered. Usage: dispatch.unregister("my_event") dispatch.unregister("my_event") # no-op """ if self.running: raise RuntimeError("Can't unregister while running") self._handlers.pop(event, None)
def function[unregister, parameter[self, event]]: constant[ Remove all registered handlers for an event. Silent return when event was not registered. Usage: dispatch.unregister("my_event") dispatch.unregister("my_event") # no-op ] if name[self].running begin[:] <ast.Raise object at 0x7da20e957c70> call[name[self]._handlers.pop, parameter[name[event], constant[None]]]
keyword[def] identifier[unregister] ( identifier[self] , identifier[event] ): literal[string] keyword[if] identifier[self] . identifier[running] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[self] . identifier[_handlers] . identifier[pop] ( identifier[event] , keyword[None] )
def unregister(self, event): """ Remove all registered handlers for an event. Silent return when event was not registered. Usage: dispatch.unregister("my_event") dispatch.unregister("my_event") # no-op """ if self.running: raise RuntimeError("Can't unregister while running") # depends on [control=['if'], data=[]] self._handlers.pop(event, None)
def _copy(source, destination, ignore=None): """Effective copy""" if os.path.isdir(source): shutil.copytree(source, destination, symlinks=True, ignore=ignore) else: shutil.copy(source, destination) shutil.copystat(source, destination)
def function[_copy, parameter[source, destination, ignore]]: constant[Effective copy] if call[name[os].path.isdir, parameter[name[source]]] begin[:] call[name[shutil].copytree, parameter[name[source], name[destination]]] call[name[shutil].copystat, parameter[name[source], name[destination]]]
keyword[def] identifier[_copy] ( identifier[source] , identifier[destination] , identifier[ignore] = keyword[None] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[source] ): identifier[shutil] . identifier[copytree] ( identifier[source] , identifier[destination] , identifier[symlinks] = keyword[True] , identifier[ignore] = identifier[ignore] ) keyword[else] : identifier[shutil] . identifier[copy] ( identifier[source] , identifier[destination] ) identifier[shutil] . identifier[copystat] ( identifier[source] , identifier[destination] )
def _copy(source, destination, ignore=None): """Effective copy""" if os.path.isdir(source): shutil.copytree(source, destination, symlinks=True, ignore=ignore) # depends on [control=['if'], data=[]] else: shutil.copy(source, destination) shutil.copystat(source, destination)
def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=10 * 60): """Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found. """ lock_file = _lock_filename(module_dir) task_uid = uuid.uuid4().hex lock_contents = _lock_file_contents(task_uid) tmp_dir = _temp_download_dir(module_dir, task_uid) # Attempt to protect against cases of processes being cancelled with # KeyboardInterrupt by using a try/finally clause to remove the lock # and tmp_dir. try: while True: try: tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False) # Must test condition again, since another process could have created # the module and deleted the old lock file since last test. if tf_v1.gfile.Exists(module_dir): # Lock file will be deleted in the finally-clause. return module_dir break # Proceed to downloading the module. except tf.errors.OpError: pass # Wait for lock file to disappear. _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec) # At this point we either deleted a lock or a lock got removed by the # owner or another process. Perform one more iteration of the while-loop, # we would either terminate due tf_v1.gfile.Exists(module_dir) or because # we would obtain a lock ourselves, or wait again for the lock to # disappear. # Lock file acquired. logging.info("Downloading TF-Hub Module '%s'.", handle) tf_v1.gfile.MakeDirs(tmp_dir) download_fn(handle, tmp_dir) # Write module descriptor to capture information about which module was # downloaded by whom and when. The file stored at the same level as a # directory in order to keep the content of the 'model_dir' exactly as it # was define by the module publisher. # # Note: The descriptor is written purely to help the end-user to identify # which directory belongs to which module. The descriptor is not part of the # module caching protocol and no code in the TF-Hub library reads its # content. _write_module_descriptor_file(handle, module_dir) try: tf_v1.gfile.Rename(tmp_dir, module_dir) logging.info("Downloaded TF-Hub Module '%s'.", handle) except tf.errors.AlreadyExistsError: logging.warning("Module already exists in %s", module_dir) finally: try: # Temp directory is owned by the current process, remove it. tf_v1.gfile.DeleteRecursively(tmp_dir) except tf.errors.NotFoundError: pass try: contents = tf_utils.read_file_to_string(lock_file) except tf.errors.NotFoundError: contents = "" if contents == lock_contents: # Lock file exists and is owned by this process. try: tf_v1.gfile.Remove(lock_file) except tf.errors.NotFoundError: pass return module_dir
def function[atomic_download, parameter[handle, download_fn, module_dir, lock_file_timeout_sec]]: constant[Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found. ] variable[lock_file] assign[=] call[name[_lock_filename], parameter[name[module_dir]]] variable[task_uid] assign[=] call[name[uuid].uuid4, parameter[]].hex variable[lock_contents] assign[=] call[name[_lock_file_contents], parameter[name[task_uid]]] variable[tmp_dir] assign[=] call[name[_temp_download_dir], parameter[name[module_dir], name[task_uid]]] <ast.Try object at 0x7da1b2011c00> return[name[module_dir]]
keyword[def] identifier[atomic_download] ( identifier[handle] , identifier[download_fn] , identifier[module_dir] , identifier[lock_file_timeout_sec] = literal[int] * literal[int] ): literal[string] identifier[lock_file] = identifier[_lock_filename] ( identifier[module_dir] ) identifier[task_uid] = identifier[uuid] . identifier[uuid4] (). identifier[hex] identifier[lock_contents] = identifier[_lock_file_contents] ( identifier[task_uid] ) identifier[tmp_dir] = identifier[_temp_download_dir] ( identifier[module_dir] , identifier[task_uid] ) keyword[try] : keyword[while] keyword[True] : keyword[try] : identifier[tf_utils] . identifier[atomic_write_string_to_file] ( identifier[lock_file] , identifier[lock_contents] , identifier[overwrite] = keyword[False] ) keyword[if] identifier[tf_v1] . identifier[gfile] . identifier[Exists] ( identifier[module_dir] ): keyword[return] identifier[module_dir] keyword[break] keyword[except] identifier[tf] . identifier[errors] . identifier[OpError] : keyword[pass] identifier[_wait_for_lock_to_disappear] ( identifier[handle] , identifier[lock_file] , identifier[lock_file_timeout_sec] ) identifier[logging] . identifier[info] ( literal[string] , identifier[handle] ) identifier[tf_v1] . identifier[gfile] . identifier[MakeDirs] ( identifier[tmp_dir] ) identifier[download_fn] ( identifier[handle] , identifier[tmp_dir] ) identifier[_write_module_descriptor_file] ( identifier[handle] , identifier[module_dir] ) keyword[try] : identifier[tf_v1] . identifier[gfile] . identifier[Rename] ( identifier[tmp_dir] , identifier[module_dir] ) identifier[logging] . identifier[info] ( literal[string] , identifier[handle] ) keyword[except] identifier[tf] . identifier[errors] . identifier[AlreadyExistsError] : identifier[logging] . identifier[warning] ( literal[string] , identifier[module_dir] ) keyword[finally] : keyword[try] : identifier[tf_v1] . identifier[gfile] . identifier[DeleteRecursively] ( identifier[tmp_dir] ) keyword[except] identifier[tf] . identifier[errors] . identifier[NotFoundError] : keyword[pass] keyword[try] : identifier[contents] = identifier[tf_utils] . identifier[read_file_to_string] ( identifier[lock_file] ) keyword[except] identifier[tf] . identifier[errors] . identifier[NotFoundError] : identifier[contents] = literal[string] keyword[if] identifier[contents] == identifier[lock_contents] : keyword[try] : identifier[tf_v1] . identifier[gfile] . identifier[Remove] ( identifier[lock_file] ) keyword[except] identifier[tf] . identifier[errors] . identifier[NotFoundError] : keyword[pass] keyword[return] identifier[module_dir]
def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=10 * 60): """Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found. """ lock_file = _lock_filename(module_dir) task_uid = uuid.uuid4().hex lock_contents = _lock_file_contents(task_uid) tmp_dir = _temp_download_dir(module_dir, task_uid) # Attempt to protect against cases of processes being cancelled with # KeyboardInterrupt by using a try/finally clause to remove the lock # and tmp_dir. try: while True: try: tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False) # Must test condition again, since another process could have created # the module and deleted the old lock file since last test. if tf_v1.gfile.Exists(module_dir): # Lock file will be deleted in the finally-clause. return module_dir # depends on [control=['if'], data=[]] break # Proceed to downloading the module. # depends on [control=['try'], data=[]] except tf.errors.OpError: pass # depends on [control=['except'], data=[]] # Wait for lock file to disappear. _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec) # depends on [control=['while'], data=[]] # At this point we either deleted a lock or a lock got removed by the # owner or another process. Perform one more iteration of the while-loop, # we would either terminate due tf_v1.gfile.Exists(module_dir) or because # we would obtain a lock ourselves, or wait again for the lock to # disappear. # Lock file acquired. logging.info("Downloading TF-Hub Module '%s'.", handle) tf_v1.gfile.MakeDirs(tmp_dir) download_fn(handle, tmp_dir) # Write module descriptor to capture information about which module was # downloaded by whom and when. The file stored at the same level as a # directory in order to keep the content of the 'model_dir' exactly as it # was define by the module publisher. # # Note: The descriptor is written purely to help the end-user to identify # which directory belongs to which module. The descriptor is not part of the # module caching protocol and no code in the TF-Hub library reads its # content. _write_module_descriptor_file(handle, module_dir) try: tf_v1.gfile.Rename(tmp_dir, module_dir) logging.info("Downloaded TF-Hub Module '%s'.", handle) # depends on [control=['try'], data=[]] except tf.errors.AlreadyExistsError: logging.warning('Module already exists in %s', module_dir) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]] finally: try: # Temp directory is owned by the current process, remove it. tf_v1.gfile.DeleteRecursively(tmp_dir) # depends on [control=['try'], data=[]] except tf.errors.NotFoundError: pass # depends on [control=['except'], data=[]] try: contents = tf_utils.read_file_to_string(lock_file) # depends on [control=['try'], data=[]] except tf.errors.NotFoundError: contents = '' # depends on [control=['except'], data=[]] if contents == lock_contents: # Lock file exists and is owned by this process. try: tf_v1.gfile.Remove(lock_file) # depends on [control=['try'], data=[]] except tf.errors.NotFoundError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return module_dir
def vote(self, request, pk=None): """ post: A description of the post method on the custom action. """ choice = self.get_object() choice.vote() serializer = self.get_serializer(choice) return Response(serializer.data)
def function[vote, parameter[self, request, pk]]: constant[ post: A description of the post method on the custom action. ] variable[choice] assign[=] call[name[self].get_object, parameter[]] call[name[choice].vote, parameter[]] variable[serializer] assign[=] call[name[self].get_serializer, parameter[name[choice]]] return[call[name[Response], parameter[name[serializer].data]]]
keyword[def] identifier[vote] ( identifier[self] , identifier[request] , identifier[pk] = keyword[None] ): literal[string] identifier[choice] = identifier[self] . identifier[get_object] () identifier[choice] . identifier[vote] () identifier[serializer] = identifier[self] . identifier[get_serializer] ( identifier[choice] ) keyword[return] identifier[Response] ( identifier[serializer] . identifier[data] )
def vote(self, request, pk=None): """ post: A description of the post method on the custom action. """ choice = self.get_object() choice.vote() serializer = self.get_serializer(choice) return Response(serializer.data)
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) return self.show_page(course, web.input())
def function[GET_AUTH, parameter[self, courseid]]: constant[ GET request ] <ast.Tuple object at 0x7da18f58ebc0> assign[=] call[name[self].get_course_and_check_rights, parameter[name[courseid]]] return[call[name[self].show_page, parameter[name[course], call[name[web].input, parameter[]]]]]
keyword[def] identifier[GET_AUTH] ( identifier[self] , identifier[courseid] ): literal[string] identifier[course] , identifier[__] = identifier[self] . identifier[get_course_and_check_rights] ( identifier[courseid] , identifier[allow_all_staff] = keyword[False] ) keyword[return] identifier[self] . identifier[show_page] ( identifier[course] , identifier[web] . identifier[input] ())
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ ' GET request ' (course, __) = self.get_course_and_check_rights(courseid, allow_all_staff=False) return self.show_page(course, web.input())
def get_default_download_dir(self, *subdirs): """ Get the download path for a file. If not defined, return default from config. Parameters ========== subdirs: a single (or list of) subfolders under the basepath """ # Look up value for key "path" in the config path = self.get_config_value(self.CONFIG_NAME_PATH) # If not set in config, default to present working directory if path is None: return os.getcwd() return os.path.join(path, *subdirs)
def function[get_default_download_dir, parameter[self]]: constant[ Get the download path for a file. If not defined, return default from config. Parameters ========== subdirs: a single (or list of) subfolders under the basepath ] variable[path] assign[=] call[name[self].get_config_value, parameter[name[self].CONFIG_NAME_PATH]] if compare[name[path] is constant[None]] begin[:] return[call[name[os].getcwd, parameter[]]] return[call[name[os].path.join, parameter[name[path], <ast.Starred object at 0x7da1b212e680>]]]
keyword[def] identifier[get_default_download_dir] ( identifier[self] ,* identifier[subdirs] ): literal[string] identifier[path] = identifier[self] . identifier[get_config_value] ( identifier[self] . identifier[CONFIG_NAME_PATH] ) keyword[if] identifier[path] keyword[is] keyword[None] : keyword[return] identifier[os] . identifier[getcwd] () keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[path] ,* identifier[subdirs] )
def get_default_download_dir(self, *subdirs): """ Get the download path for a file. If not defined, return default from config. Parameters ========== subdirs: a single (or list of) subfolders under the basepath """ # Look up value for key "path" in the config path = self.get_config_value(self.CONFIG_NAME_PATH) # If not set in config, default to present working directory if path is None: return os.getcwd() # depends on [control=['if'], data=[]] return os.path.join(path, *subdirs)
def extern_generator_send(self, context_handle, func, arg): """Given a generator, send it the given value and return a response.""" c = self._ffi.from_handle(context_handle) response = self._ffi.new('PyGeneratorResponse*') try: res = c.from_value(func[0]).send(c.from_value(arg[0])) if isinstance(res, Get): # Get. response.tag = self._lib.Get response.get = ( TypeId(c.to_id(res.product)), c.to_value(res.subject), c.identify(res.subject), ) elif type(res) in (tuple, list): # GetMulti. response.tag = self._lib.GetMulti response.get_multi = ( c.type_ids_buf([TypeId(c.to_id(g.product)) for g in res]), c.vals_buf([c.to_value(g.subject) for g in res]), c.identities_buf([c.identify(g.subject) for g in res]), ) else: # Break. response.tag = self._lib.Broke response.broke = (c.to_value(res),) except Exception as e: # Throw. response.tag = self._lib.Throw val = e val._formatted_exc = traceback.format_exc() response.throw = (c.to_value(val),) return response[0]
def function[extern_generator_send, parameter[self, context_handle, func, arg]]: constant[Given a generator, send it the given value and return a response.] variable[c] assign[=] call[name[self]._ffi.from_handle, parameter[name[context_handle]]] variable[response] assign[=] call[name[self]._ffi.new, parameter[constant[PyGeneratorResponse*]]] <ast.Try object at 0x7da1b22d0250> return[call[name[response]][constant[0]]]
keyword[def] identifier[extern_generator_send] ( identifier[self] , identifier[context_handle] , identifier[func] , identifier[arg] ): literal[string] identifier[c] = identifier[self] . identifier[_ffi] . identifier[from_handle] ( identifier[context_handle] ) identifier[response] = identifier[self] . identifier[_ffi] . identifier[new] ( literal[string] ) keyword[try] : identifier[res] = identifier[c] . identifier[from_value] ( identifier[func] [ literal[int] ]). identifier[send] ( identifier[c] . identifier[from_value] ( identifier[arg] [ literal[int] ])) keyword[if] identifier[isinstance] ( identifier[res] , identifier[Get] ): identifier[response] . identifier[tag] = identifier[self] . identifier[_lib] . identifier[Get] identifier[response] . identifier[get] =( identifier[TypeId] ( identifier[c] . identifier[to_id] ( identifier[res] . identifier[product] )), identifier[c] . identifier[to_value] ( identifier[res] . identifier[subject] ), identifier[c] . identifier[identify] ( identifier[res] . identifier[subject] ), ) keyword[elif] identifier[type] ( identifier[res] ) keyword[in] ( identifier[tuple] , identifier[list] ): identifier[response] . identifier[tag] = identifier[self] . identifier[_lib] . identifier[GetMulti] identifier[response] . identifier[get_multi] =( identifier[c] . identifier[type_ids_buf] ([ identifier[TypeId] ( identifier[c] . identifier[to_id] ( identifier[g] . identifier[product] )) keyword[for] identifier[g] keyword[in] identifier[res] ]), identifier[c] . identifier[vals_buf] ([ identifier[c] . identifier[to_value] ( identifier[g] . identifier[subject] ) keyword[for] identifier[g] keyword[in] identifier[res] ]), identifier[c] . identifier[identities_buf] ([ identifier[c] . identifier[identify] ( identifier[g] . identifier[subject] ) keyword[for] identifier[g] keyword[in] identifier[res] ]), ) keyword[else] : identifier[response] . identifier[tag] = identifier[self] . identifier[_lib] . identifier[Broke] identifier[response] . identifier[broke] =( identifier[c] . identifier[to_value] ( identifier[res] ),) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[response] . identifier[tag] = identifier[self] . identifier[_lib] . identifier[Throw] identifier[val] = identifier[e] identifier[val] . identifier[_formatted_exc] = identifier[traceback] . identifier[format_exc] () identifier[response] . identifier[throw] =( identifier[c] . identifier[to_value] ( identifier[val] ),) keyword[return] identifier[response] [ literal[int] ]
def extern_generator_send(self, context_handle, func, arg): """Given a generator, send it the given value and return a response.""" c = self._ffi.from_handle(context_handle) response = self._ffi.new('PyGeneratorResponse*') try: res = c.from_value(func[0]).send(c.from_value(arg[0])) if isinstance(res, Get): # Get. response.tag = self._lib.Get response.get = (TypeId(c.to_id(res.product)), c.to_value(res.subject), c.identify(res.subject)) # depends on [control=['if'], data=[]] elif type(res) in (tuple, list): # GetMulti. response.tag = self._lib.GetMulti response.get_multi = (c.type_ids_buf([TypeId(c.to_id(g.product)) for g in res]), c.vals_buf([c.to_value(g.subject) for g in res]), c.identities_buf([c.identify(g.subject) for g in res])) # depends on [control=['if'], data=[]] else: # Break. response.tag = self._lib.Broke response.broke = (c.to_value(res),) # depends on [control=['try'], data=[]] except Exception as e: # Throw. response.tag = self._lib.Throw val = e val._formatted_exc = traceback.format_exc() response.throw = (c.to_value(val),) # depends on [control=['except'], data=['e']] return response[0]
def _get_logical_raid_levels(self): """Gets the different raid levels configured on a server. :returns a dictionary of logical_raid_levels set to true. Example if raid level 1+0 and 6 are configured, it returns {'logical_raid_level_10': 'true', 'logical_raid_level_6': 'true'} """ logical_drive_details = self._get_logical_drive_resource() raid_level = {} if logical_drive_details: for item in logical_drive_details: if 'Raid' in item: raid_level_var = "logical_raid_level_" + item['Raid'] raid_level.update({raid_level_var: 'true'}) return raid_level if len(raid_level.keys()) > 0 else None
def function[_get_logical_raid_levels, parameter[self]]: constant[Gets the different raid levels configured on a server. :returns a dictionary of logical_raid_levels set to true. Example if raid level 1+0 and 6 are configured, it returns {'logical_raid_level_10': 'true', 'logical_raid_level_6': 'true'} ] variable[logical_drive_details] assign[=] call[name[self]._get_logical_drive_resource, parameter[]] variable[raid_level] assign[=] dictionary[[], []] if name[logical_drive_details] begin[:] for taget[name[item]] in starred[name[logical_drive_details]] begin[:] if compare[constant[Raid] in name[item]] begin[:] variable[raid_level_var] assign[=] binary_operation[constant[logical_raid_level_] + call[name[item]][constant[Raid]]] call[name[raid_level].update, parameter[dictionary[[<ast.Name object at 0x7da1b1a8e2f0>], [<ast.Constant object at 0x7da1b1a8d300>]]]] return[<ast.IfExp object at 0x7da1b1a8dd50>]
keyword[def] identifier[_get_logical_raid_levels] ( identifier[self] ): literal[string] identifier[logical_drive_details] = identifier[self] . identifier[_get_logical_drive_resource] () identifier[raid_level] ={} keyword[if] identifier[logical_drive_details] : keyword[for] identifier[item] keyword[in] identifier[logical_drive_details] : keyword[if] literal[string] keyword[in] identifier[item] : identifier[raid_level_var] = literal[string] + identifier[item] [ literal[string] ] identifier[raid_level] . identifier[update] ({ identifier[raid_level_var] : literal[string] }) keyword[return] identifier[raid_level] keyword[if] identifier[len] ( identifier[raid_level] . identifier[keys] ())> literal[int] keyword[else] keyword[None]
def _get_logical_raid_levels(self): """Gets the different raid levels configured on a server. :returns a dictionary of logical_raid_levels set to true. Example if raid level 1+0 and 6 are configured, it returns {'logical_raid_level_10': 'true', 'logical_raid_level_6': 'true'} """ logical_drive_details = self._get_logical_drive_resource() raid_level = {} if logical_drive_details: for item in logical_drive_details: if 'Raid' in item: raid_level_var = 'logical_raid_level_' + item['Raid'] raid_level.update({raid_level_var: 'true'}) # depends on [control=['if'], data=['item']] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] return raid_level if len(raid_level.keys()) > 0 else None
def raw_data(self, filename): """Return the raw pickled data from `filename`.""" if self.debug and self.debug.should('dataio'): self.debug.write("Reading data from %r" % (filename,)) fdata = open(filename, 'rb') try: data = pickle.load(fdata) finally: fdata.close() return data
def function[raw_data, parameter[self, filename]]: constant[Return the raw pickled data from `filename`.] if <ast.BoolOp object at 0x7da18ede4be0> begin[:] call[name[self].debug.write, parameter[binary_operation[constant[Reading data from %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18ede4520>]]]]] variable[fdata] assign[=] call[name[open], parameter[name[filename], constant[rb]]] <ast.Try object at 0x7da18ede6860> return[name[data]]
keyword[def] identifier[raw_data] ( identifier[self] , identifier[filename] ): literal[string] keyword[if] identifier[self] . identifier[debug] keyword[and] identifier[self] . identifier[debug] . identifier[should] ( literal[string] ): identifier[self] . identifier[debug] . identifier[write] ( literal[string] %( identifier[filename] ,)) identifier[fdata] = identifier[open] ( identifier[filename] , literal[string] ) keyword[try] : identifier[data] = identifier[pickle] . identifier[load] ( identifier[fdata] ) keyword[finally] : identifier[fdata] . identifier[close] () keyword[return] identifier[data]
def raw_data(self, filename): """Return the raw pickled data from `filename`.""" if self.debug and self.debug.should('dataio'): self.debug.write('Reading data from %r' % (filename,)) # depends on [control=['if'], data=[]] fdata = open(filename, 'rb') try: data = pickle.load(fdata) # depends on [control=['try'], data=[]] finally: fdata.close() return data
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
def function[parse, parameter[self, fo]]: constant[ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. ] variable[motifs] assign[=] list[[]] variable[m] assign[=] <ast.ListComp object at 0x7da2054a6380> variable[matrix] assign[=] <ast.ListComp object at 0x7da1b10a41f0> variable[motifs] assign[=] list[[<ast.Call object at 0x7da1b10a5330>]] call[name[motifs]][<ast.UnaryOp object at 0x7da1b10a60b0>].id assign[=] name[self].name return[name[motifs]]
keyword[def] identifier[parse] ( identifier[self] , identifier[fo] ): literal[string] identifier[motifs] =[] identifier[m] =[[ identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[fo] . identifier[readline] (). identifier[strip] (). identifier[split] ( literal[string] )] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[matrix] =[[ identifier[m] [ literal[int] ][ identifier[i] ], identifier[m] [ literal[int] ][ identifier[i] ], identifier[m] [ literal[int] ][ identifier[i] ], identifier[m] [ literal[int] ][ identifier[i] ]] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[m] [ literal[int] ]))] identifier[motifs] =[ identifier[Motif] ( identifier[matrix] )] identifier[motifs] [- literal[int] ]. identifier[id] = identifier[self] . identifier[name] keyword[return] identifier[motifs]
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(' ')] for i in range(4)] matrix = [[m[0][i], m[1][i], m[2][i], m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
def do_options(self, labels, values): """Replace the drop down fields. Parameters ---------- labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None """ return [dict(label=l, value=v) for l, v in zip(labels, values)]
def function[do_options, parameter[self, labels, values]]: constant[Replace the drop down fields. Parameters ---------- labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None ] return[<ast.ListComp object at 0x7da18ede5930>]
keyword[def] identifier[do_options] ( identifier[self] , identifier[labels] , identifier[values] ): literal[string] keyword[return] [ identifier[dict] ( identifier[label] = identifier[l] , identifier[value] = identifier[v] ) keyword[for] identifier[l] , identifier[v] keyword[in] identifier[zip] ( identifier[labels] , identifier[values] )]
def do_options(self, labels, values): """Replace the drop down fields. Parameters ---------- labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None """ return [dict(label=l, value=v) for (l, v) in zip(labels, values)]
def gaps(args): """ %prog gaps agpfile Print out the distribution of gapsizes. Option --merge allows merging of adjacent gaps which is used by tidy(). """ from jcvi.graphics.histogram import loghistogram p = OptionParser(gaps.__doc__) p.add_option("--merge", dest="merge", default=False, action="store_true", help="Merge adjacent gaps (to conform to AGP specification)") p.add_option("--header", default=False, action="store_true", help="Produce an AGP header [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) merge = opts.merge agpfile, = args if merge: merged_agpfile = agpfile.replace(".agp", ".merged.agp") fw = open(merged_agpfile, "w") agp = AGP(agpfile) sizes = [] data = [] # store merged AGPLine's priorities = ("centromere", "telomere", "scaffold", "contig", \ "clone", "fragment") for is_gap, alines in groupby(agp, key=lambda x: (x.object, x.is_gap)): alines = list(alines) is_gap = is_gap[1] if is_gap: gap_size = sum(x.gap_length for x in alines) gap_types = set(x.gap_type for x in alines) for gtype in ("centromere", "telomere"): if gtype in gap_types: gap_size = gtype sizes.append(gap_size) b = deepcopy(alines[0]) b.object_beg = min(x.object_beg for x in alines) b.object_end = max(x.object_end for x in alines) b.gap_length = sum(x.gap_length for x in alines) assert b.gap_length == b.object_end - b.object_beg + 1 b.component_type = 'U' if b.gap_length == 100 else 'N' gtypes = [x.gap_type for x in alines] for gtype in priorities: if gtype in gtypes: b.gap_type = gtype break linkages = [x.linkage for x in alines] for linkage in ("no", "yes"): if linkage in linkages: b.linkage = linkage break alines = [b] data.extend(alines) loghistogram(sizes) if opts.header: AGP.print_header(fw, organism="Medicago truncatula", taxid=3880, source="J. Craig Venter Institute") if merge: for ob, bb in groupby(data, lambda x: x.object): for i, b in enumerate(bb): b.part_number = i + 1 print(b, file=fw) return merged_agpfile
def function[gaps, parameter[args]]: constant[ %prog gaps agpfile Print out the distribution of gapsizes. Option --merge allows merging of adjacent gaps which is used by tidy(). ] from relative_module[jcvi.graphics.histogram] import module[loghistogram] variable[p] assign[=] call[name[OptionParser], parameter[name[gaps].__doc__]] call[name[p].add_option, parameter[constant[--merge]]] call[name[p].add_option, parameter[constant[--header]]] <ast.Tuple object at 0x7da18f00e680> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18f00eef0>]] variable[merge] assign[=] name[opts].merge <ast.Tuple object at 0x7da18f00c3d0> assign[=] name[args] if name[merge] begin[:] variable[merged_agpfile] assign[=] call[name[agpfile].replace, parameter[constant[.agp], constant[.merged.agp]]] variable[fw] assign[=] call[name[open], parameter[name[merged_agpfile], constant[w]]] variable[agp] assign[=] call[name[AGP], parameter[name[agpfile]]] variable[sizes] assign[=] list[[]] variable[data] assign[=] list[[]] variable[priorities] assign[=] tuple[[<ast.Constant object at 0x7da18f00dba0>, <ast.Constant object at 0x7da18f00cc40>, <ast.Constant object at 0x7da18f00c4c0>, <ast.Constant object at 0x7da18f00f430>, <ast.Constant object at 0x7da18f00fa90>, <ast.Constant object at 0x7da18f00c520>]] for taget[tuple[[<ast.Name object at 0x7da18f00e500>, <ast.Name object at 0x7da18f00e3b0>]]] in starred[call[name[groupby], parameter[name[agp]]]] begin[:] variable[alines] assign[=] call[name[list], parameter[name[alines]]] variable[is_gap] assign[=] call[name[is_gap]][constant[1]] if name[is_gap] begin[:] variable[gap_size] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18f00d750>]] variable[gap_types] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da18f00eaa0>]] for taget[name[gtype]] in starred[tuple[[<ast.Constant object at 0x7da18f00db40>, <ast.Constant object at 0x7da18f00c880>]]] begin[:] if compare[name[gtype] in name[gap_types]] begin[:] variable[gap_size] assign[=] name[gtype] call[name[sizes].append, parameter[name[gap_size]]] variable[b] assign[=] call[name[deepcopy], parameter[call[name[alines]][constant[0]]]] name[b].object_beg assign[=] call[name[min], parameter[<ast.GeneratorExp object at 0x7da18f00fd90>]] name[b].object_end assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da18f00f5b0>]] name[b].gap_length assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18f00d8d0>]] assert[compare[name[b].gap_length equal[==] binary_operation[binary_operation[name[b].object_end - name[b].object_beg] + constant[1]]]] name[b].component_type assign[=] <ast.IfExp object at 0x7da18f00d0f0> variable[gtypes] assign[=] <ast.ListComp object at 0x7da18f00cd30> for taget[name[gtype]] in starred[name[priorities]] begin[:] if compare[name[gtype] in name[gtypes]] begin[:] name[b].gap_type assign[=] name[gtype] break variable[linkages] assign[=] <ast.ListComp object at 0x7da18f00cfd0> for taget[name[linkage]] in starred[tuple[[<ast.Constant object at 0x7da18f00e350>, <ast.Constant object at 0x7da18f00ca30>]]] begin[:] if compare[name[linkage] in name[linkages]] begin[:] name[b].linkage assign[=] name[linkage] break variable[alines] assign[=] list[[<ast.Name object at 0x7da18f00caf0>]] call[name[data].extend, parameter[name[alines]]] call[name[loghistogram], parameter[name[sizes]]] if name[opts].header begin[:] call[name[AGP].print_header, parameter[name[fw]]] if name[merge] begin[:] for taget[tuple[[<ast.Name object at 0x7da18f00f370>, <ast.Name object at 0x7da18f00ce80>]]] in starred[call[name[groupby], parameter[name[data], <ast.Lambda object at 0x7da18f00d060>]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18f00d930>, <ast.Name object at 0x7da18f00eb60>]]] in starred[call[name[enumerate], parameter[name[bb]]]] begin[:] name[b].part_number assign[=] binary_operation[name[i] + constant[1]] call[name[print], parameter[name[b]]] return[name[merged_agpfile]]
keyword[def] identifier[gaps] ( identifier[args] ): literal[string] keyword[from] identifier[jcvi] . identifier[graphics] . identifier[histogram] keyword[import] identifier[loghistogram] identifier[p] = identifier[OptionParser] ( identifier[gaps] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[merge] = identifier[opts] . identifier[merge] identifier[agpfile] ,= identifier[args] keyword[if] identifier[merge] : identifier[merged_agpfile] = identifier[agpfile] . identifier[replace] ( literal[string] , literal[string] ) identifier[fw] = identifier[open] ( identifier[merged_agpfile] , literal[string] ) identifier[agp] = identifier[AGP] ( identifier[agpfile] ) identifier[sizes] =[] identifier[data] =[] identifier[priorities] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[for] identifier[is_gap] , identifier[alines] keyword[in] identifier[groupby] ( identifier[agp] , identifier[key] = keyword[lambda] identifier[x] :( identifier[x] . identifier[object] , identifier[x] . identifier[is_gap] )): identifier[alines] = identifier[list] ( identifier[alines] ) identifier[is_gap] = identifier[is_gap] [ literal[int] ] keyword[if] identifier[is_gap] : identifier[gap_size] = identifier[sum] ( identifier[x] . identifier[gap_length] keyword[for] identifier[x] keyword[in] identifier[alines] ) identifier[gap_types] = identifier[set] ( identifier[x] . identifier[gap_type] keyword[for] identifier[x] keyword[in] identifier[alines] ) keyword[for] identifier[gtype] keyword[in] ( literal[string] , literal[string] ): keyword[if] identifier[gtype] keyword[in] identifier[gap_types] : identifier[gap_size] = identifier[gtype] identifier[sizes] . identifier[append] ( identifier[gap_size] ) identifier[b] = identifier[deepcopy] ( identifier[alines] [ literal[int] ]) identifier[b] . identifier[object_beg] = identifier[min] ( identifier[x] . identifier[object_beg] keyword[for] identifier[x] keyword[in] identifier[alines] ) identifier[b] . identifier[object_end] = identifier[max] ( identifier[x] . identifier[object_end] keyword[for] identifier[x] keyword[in] identifier[alines] ) identifier[b] . identifier[gap_length] = identifier[sum] ( identifier[x] . identifier[gap_length] keyword[for] identifier[x] keyword[in] identifier[alines] ) keyword[assert] identifier[b] . identifier[gap_length] == identifier[b] . identifier[object_end] - identifier[b] . identifier[object_beg] + literal[int] identifier[b] . identifier[component_type] = literal[string] keyword[if] identifier[b] . identifier[gap_length] == literal[int] keyword[else] literal[string] identifier[gtypes] =[ identifier[x] . identifier[gap_type] keyword[for] identifier[x] keyword[in] identifier[alines] ] keyword[for] identifier[gtype] keyword[in] identifier[priorities] : keyword[if] identifier[gtype] keyword[in] identifier[gtypes] : identifier[b] . identifier[gap_type] = identifier[gtype] keyword[break] identifier[linkages] =[ identifier[x] . identifier[linkage] keyword[for] identifier[x] keyword[in] identifier[alines] ] keyword[for] identifier[linkage] keyword[in] ( literal[string] , literal[string] ): keyword[if] identifier[linkage] keyword[in] identifier[linkages] : identifier[b] . identifier[linkage] = identifier[linkage] keyword[break] identifier[alines] =[ identifier[b] ] identifier[data] . identifier[extend] ( identifier[alines] ) identifier[loghistogram] ( identifier[sizes] ) keyword[if] identifier[opts] . identifier[header] : identifier[AGP] . identifier[print_header] ( identifier[fw] , identifier[organism] = literal[string] , identifier[taxid] = literal[int] , identifier[source] = literal[string] ) keyword[if] identifier[merge] : keyword[for] identifier[ob] , identifier[bb] keyword[in] identifier[groupby] ( identifier[data] , keyword[lambda] identifier[x] : identifier[x] . identifier[object] ): keyword[for] identifier[i] , identifier[b] keyword[in] identifier[enumerate] ( identifier[bb] ): identifier[b] . identifier[part_number] = identifier[i] + literal[int] identifier[print] ( identifier[b] , identifier[file] = identifier[fw] ) keyword[return] identifier[merged_agpfile]
def gaps(args): """ %prog gaps agpfile Print out the distribution of gapsizes. Option --merge allows merging of adjacent gaps which is used by tidy(). """ from jcvi.graphics.histogram import loghistogram p = OptionParser(gaps.__doc__) p.add_option('--merge', dest='merge', default=False, action='store_true', help='Merge adjacent gaps (to conform to AGP specification)') p.add_option('--header', default=False, action='store_true', help='Produce an AGP header [default: %default]') (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] merge = opts.merge (agpfile,) = args if merge: merged_agpfile = agpfile.replace('.agp', '.merged.agp') fw = open(merged_agpfile, 'w') # depends on [control=['if'], data=[]] agp = AGP(agpfile) sizes = [] data = [] # store merged AGPLine's priorities = ('centromere', 'telomere', 'scaffold', 'contig', 'clone', 'fragment') for (is_gap, alines) in groupby(agp, key=lambda x: (x.object, x.is_gap)): alines = list(alines) is_gap = is_gap[1] if is_gap: gap_size = sum((x.gap_length for x in alines)) gap_types = set((x.gap_type for x in alines)) for gtype in ('centromere', 'telomere'): if gtype in gap_types: gap_size = gtype # depends on [control=['if'], data=['gtype']] # depends on [control=['for'], data=['gtype']] sizes.append(gap_size) b = deepcopy(alines[0]) b.object_beg = min((x.object_beg for x in alines)) b.object_end = max((x.object_end for x in alines)) b.gap_length = sum((x.gap_length for x in alines)) assert b.gap_length == b.object_end - b.object_beg + 1 b.component_type = 'U' if b.gap_length == 100 else 'N' gtypes = [x.gap_type for x in alines] for gtype in priorities: if gtype in gtypes: b.gap_type = gtype break # depends on [control=['if'], data=['gtype']] # depends on [control=['for'], data=['gtype']] linkages = [x.linkage for x in alines] for linkage in ('no', 'yes'): if linkage in linkages: b.linkage = linkage break # depends on [control=['if'], data=['linkage']] # depends on [control=['for'], data=['linkage']] alines = [b] # depends on [control=['if'], data=[]] data.extend(alines) # depends on [control=['for'], data=[]] loghistogram(sizes) if opts.header: AGP.print_header(fw, organism='Medicago truncatula', taxid=3880, source='J. Craig Venter Institute') # depends on [control=['if'], data=[]] if merge: for (ob, bb) in groupby(data, lambda x: x.object): for (i, b) in enumerate(bb): b.part_number = i + 1 print(b, file=fw) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] return merged_agpfile # depends on [control=['if'], data=[]]
def revoke(self, *capabilities_to_revoke): """Revokes zero or more capabilities from this role. :param capabilities_to_revoke: Zero or more capabilities to grant this role. For a list of capabilities, see `Capabilities <http://dev.splunk.com/view/SP-CAAAEJ6#capabilities>`_ on Splunk Developer Portal. :type capabilities_to_revoke: ``string`` or ``list`` :return: The :class:`Role`. **Example**:: service = client.connect(...) role = service.roles['somerole'] role.revoke('change_own_password', 'search') """ possible_capabilities = self.service.capabilities for capability in capabilities_to_revoke: if capability not in possible_capabilities: raise NoSuchCapability(capability) old_capabilities = self['capabilities'] new_capabilities = [] for c in old_capabilities: if c not in capabilities_to_revoke: new_capabilities.append(c) if new_capabilities == []: new_capabilities = '' # Empty lists don't get passed in the body, so we have to force an empty argument. self.post(capabilities=new_capabilities) return self
def function[revoke, parameter[self]]: constant[Revokes zero or more capabilities from this role. :param capabilities_to_revoke: Zero or more capabilities to grant this role. For a list of capabilities, see `Capabilities <http://dev.splunk.com/view/SP-CAAAEJ6#capabilities>`_ on Splunk Developer Portal. :type capabilities_to_revoke: ``string`` or ``list`` :return: The :class:`Role`. **Example**:: service = client.connect(...) role = service.roles['somerole'] role.revoke('change_own_password', 'search') ] variable[possible_capabilities] assign[=] name[self].service.capabilities for taget[name[capability]] in starred[name[capabilities_to_revoke]] begin[:] if compare[name[capability] <ast.NotIn object at 0x7da2590d7190> name[possible_capabilities]] begin[:] <ast.Raise object at 0x7da1b1981ae0> variable[old_capabilities] assign[=] call[name[self]][constant[capabilities]] variable[new_capabilities] assign[=] list[[]] for taget[name[c]] in starred[name[old_capabilities]] begin[:] if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[capabilities_to_revoke]] begin[:] call[name[new_capabilities].append, parameter[name[c]]] if compare[name[new_capabilities] equal[==] list[[]]] begin[:] variable[new_capabilities] assign[=] constant[] call[name[self].post, parameter[]] return[name[self]]
keyword[def] identifier[revoke] ( identifier[self] ,* identifier[capabilities_to_revoke] ): literal[string] identifier[possible_capabilities] = identifier[self] . identifier[service] . identifier[capabilities] keyword[for] identifier[capability] keyword[in] identifier[capabilities_to_revoke] : keyword[if] identifier[capability] keyword[not] keyword[in] identifier[possible_capabilities] : keyword[raise] identifier[NoSuchCapability] ( identifier[capability] ) identifier[old_capabilities] = identifier[self] [ literal[string] ] identifier[new_capabilities] =[] keyword[for] identifier[c] keyword[in] identifier[old_capabilities] : keyword[if] identifier[c] keyword[not] keyword[in] identifier[capabilities_to_revoke] : identifier[new_capabilities] . identifier[append] ( identifier[c] ) keyword[if] identifier[new_capabilities] ==[]: identifier[new_capabilities] = literal[string] identifier[self] . identifier[post] ( identifier[capabilities] = identifier[new_capabilities] ) keyword[return] identifier[self]
def revoke(self, *capabilities_to_revoke): """Revokes zero or more capabilities from this role. :param capabilities_to_revoke: Zero or more capabilities to grant this role. For a list of capabilities, see `Capabilities <http://dev.splunk.com/view/SP-CAAAEJ6#capabilities>`_ on Splunk Developer Portal. :type capabilities_to_revoke: ``string`` or ``list`` :return: The :class:`Role`. **Example**:: service = client.connect(...) role = service.roles['somerole'] role.revoke('change_own_password', 'search') """ possible_capabilities = self.service.capabilities for capability in capabilities_to_revoke: if capability not in possible_capabilities: raise NoSuchCapability(capability) # depends on [control=['if'], data=['capability']] # depends on [control=['for'], data=['capability']] old_capabilities = self['capabilities'] new_capabilities = [] for c in old_capabilities: if c not in capabilities_to_revoke: new_capabilities.append(c) # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['c']] if new_capabilities == []: new_capabilities = '' # Empty lists don't get passed in the body, so we have to force an empty argument. # depends on [control=['if'], data=['new_capabilities']] self.post(capabilities=new_capabilities) return self
def _makeInstance(self, clazz, args, kwargs): '''Creates an instance of a class defined in this document. This method sets the context of the object to the current context.''' inst = clazz(self, *args, **kwargs) return inst
def function[_makeInstance, parameter[self, clazz, args, kwargs]]: constant[Creates an instance of a class defined in this document. This method sets the context of the object to the current context.] variable[inst] assign[=] call[name[clazz], parameter[name[self], <ast.Starred object at 0x7da1aff572e0>]] return[name[inst]]
keyword[def] identifier[_makeInstance] ( identifier[self] , identifier[clazz] , identifier[args] , identifier[kwargs] ): literal[string] identifier[inst] = identifier[clazz] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[inst]
def _makeInstance(self, clazz, args, kwargs): """Creates an instance of a class defined in this document. This method sets the context of the object to the current context.""" inst = clazz(self, *args, **kwargs) return inst
def validate(cls, full_config: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ Validator for filters factory From filters factory this filter can be registered with arguments: - ``command`` - ``commands_prefix`` (will be passed as ``prefixes``) - ``commands_ignore_mention`` (will be passed as ``ignore_mention`` :param full_config: :return: config or empty dict """ config = {} if 'commands' in full_config: config['commands'] = full_config.pop('commands') if config and 'commands_prefix' in full_config: config['prefixes'] = full_config.pop('commands_prefix') if config and 'commands_ignore_mention' in full_config: config['ignore_mention'] = full_config.pop('commands_ignore_mention') return config
def function[validate, parameter[cls, full_config]]: constant[ Validator for filters factory From filters factory this filter can be registered with arguments: - ``command`` - ``commands_prefix`` (will be passed as ``prefixes``) - ``commands_ignore_mention`` (will be passed as ``ignore_mention`` :param full_config: :return: config or empty dict ] variable[config] assign[=] dictionary[[], []] if compare[constant[commands] in name[full_config]] begin[:] call[name[config]][constant[commands]] assign[=] call[name[full_config].pop, parameter[constant[commands]]] if <ast.BoolOp object at 0x7da1b18fad70> begin[:] call[name[config]][constant[prefixes]] assign[=] call[name[full_config].pop, parameter[constant[commands_prefix]]] if <ast.BoolOp object at 0x7da1b1846650> begin[:] call[name[config]][constant[ignore_mention]] assign[=] call[name[full_config].pop, parameter[constant[commands_ignore_mention]]] return[name[config]]
keyword[def] identifier[validate] ( identifier[cls] , identifier[full_config] : identifier[Dict] [ identifier[str] , identifier[Any] ])-> identifier[Optional] [ identifier[Dict] [ identifier[str] , identifier[Any] ]]: literal[string] identifier[config] ={} keyword[if] literal[string] keyword[in] identifier[full_config] : identifier[config] [ literal[string] ]= identifier[full_config] . identifier[pop] ( literal[string] ) keyword[if] identifier[config] keyword[and] literal[string] keyword[in] identifier[full_config] : identifier[config] [ literal[string] ]= identifier[full_config] . identifier[pop] ( literal[string] ) keyword[if] identifier[config] keyword[and] literal[string] keyword[in] identifier[full_config] : identifier[config] [ literal[string] ]= identifier[full_config] . identifier[pop] ( literal[string] ) keyword[return] identifier[config]
def validate(cls, full_config: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ Validator for filters factory From filters factory this filter can be registered with arguments: - ``command`` - ``commands_prefix`` (will be passed as ``prefixes``) - ``commands_ignore_mention`` (will be passed as ``ignore_mention`` :param full_config: :return: config or empty dict """ config = {} if 'commands' in full_config: config['commands'] = full_config.pop('commands') # depends on [control=['if'], data=['full_config']] if config and 'commands_prefix' in full_config: config['prefixes'] = full_config.pop('commands_prefix') # depends on [control=['if'], data=[]] if config and 'commands_ignore_mention' in full_config: config['ignore_mention'] = full_config.pop('commands_ignore_mention') # depends on [control=['if'], data=[]] return config
def valid_usaf_id_or_raise(usaf_id): """ Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. """ conn = metadata_db_connection_proxy.get_connection() cur = conn.cursor() cur.execute( """ select exists ( select usaf_id from isd_station_metadata where usaf_id = ? ) """, (usaf_id,), ) (exists,) = cur.fetchone() if exists: return True else: raise UnrecognizedUSAFIDError(usaf_id)
def function[valid_usaf_id_or_raise, parameter[usaf_id]]: constant[ Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. ] variable[conn] assign[=] call[name[metadata_db_connection_proxy].get_connection, parameter[]] variable[cur] assign[=] call[name[conn].cursor, parameter[]] call[name[cur].execute, parameter[constant[ select exists ( select usaf_id from isd_station_metadata where usaf_id = ? ) ], tuple[[<ast.Name object at 0x7da1b26ade10>]]]] <ast.Tuple object at 0x7da1b26ad330> assign[=] call[name[cur].fetchone, parameter[]] if name[exists] begin[:] return[constant[True]]
keyword[def] identifier[valid_usaf_id_or_raise] ( identifier[usaf_id] ): literal[string] identifier[conn] = identifier[metadata_db_connection_proxy] . identifier[get_connection] () identifier[cur] = identifier[conn] . identifier[cursor] () identifier[cur] . identifier[execute] ( literal[string] , ( identifier[usaf_id] ,), ) ( identifier[exists] ,)= identifier[cur] . identifier[fetchone] () keyword[if] identifier[exists] : keyword[return] keyword[True] keyword[else] : keyword[raise] identifier[UnrecognizedUSAFIDError] ( identifier[usaf_id] )
def valid_usaf_id_or_raise(usaf_id): """ Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. """ conn = metadata_db_connection_proxy.get_connection() cur = conn.cursor() cur.execute('\n select exists (\n select\n usaf_id\n from\n isd_station_metadata\n where\n usaf_id = ?\n )\n ', (usaf_id,)) (exists,) = cur.fetchone() if exists: return True # depends on [control=['if'], data=[]] else: raise UnrecognizedUSAFIDError(usaf_id)
def _generate_type_code_query(self, value): """Generate type-code queries. Notes: If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we query the specified field, along with the given value according to the mapping. See: https://github.com/inspirehep/inspire-query-parser/issues/79 Otherwise, we query both ``document_type`` and ``publication_info``. """ mapping_for_value = self.TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get(value, None) if mapping_for_value: return generate_match_query(*mapping_for_value, with_operator_and=True) else: return { 'bool': { 'minimum_should_match': 1, 'should': [ generate_match_query('document_type', value, with_operator_and=True), generate_match_query('publication_type', value, with_operator_and=True), ] } }
def function[_generate_type_code_query, parameter[self, value]]: constant[Generate type-code queries. Notes: If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we query the specified field, along with the given value according to the mapping. See: https://github.com/inspirehep/inspire-query-parser/issues/79 Otherwise, we query both ``document_type`` and ``publication_info``. ] variable[mapping_for_value] assign[=] call[name[self].TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get, parameter[name[value], constant[None]]] if name[mapping_for_value] begin[:] return[call[name[generate_match_query], parameter[<ast.Starred object at 0x7da18f00eb60>]]]
keyword[def] identifier[_generate_type_code_query] ( identifier[self] , identifier[value] ): literal[string] identifier[mapping_for_value] = identifier[self] . identifier[TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING] . identifier[get] ( identifier[value] , keyword[None] ) keyword[if] identifier[mapping_for_value] : keyword[return] identifier[generate_match_query] (* identifier[mapping_for_value] , identifier[with_operator_and] = keyword[True] ) keyword[else] : keyword[return] { literal[string] :{ literal[string] : literal[int] , literal[string] :[ identifier[generate_match_query] ( literal[string] , identifier[value] , identifier[with_operator_and] = keyword[True] ), identifier[generate_match_query] ( literal[string] , identifier[value] , identifier[with_operator_and] = keyword[True] ), ] } }
def _generate_type_code_query(self, value): """Generate type-code queries. Notes: If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we query the specified field, along with the given value according to the mapping. See: https://github.com/inspirehep/inspire-query-parser/issues/79 Otherwise, we query both ``document_type`` and ``publication_info``. """ mapping_for_value = self.TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get(value, None) if mapping_for_value: return generate_match_query(*mapping_for_value, with_operator_and=True) # depends on [control=['if'], data=[]] else: return {'bool': {'minimum_should_match': 1, 'should': [generate_match_query('document_type', value, with_operator_and=True), generate_match_query('publication_type', value, with_operator_and=True)]}}
def _preoptimize_model(self, initials, method): """ Preoptimizes the model by estimating a static model, then a quick search of good dynamic parameters Parameters ---------- initials : np.array A vector of inital values method : str One of 'MLE' or 'PML' (the optimization options) Returns ---------- Y_exp : np.array Vector of past values and predictions """ if not (self.ar==0 and self.sc == 0): toy_model = GASX(formula=self.formula, ar=0, sc=0, integ=self.integ, family=self.family, data=self.data_original) toy_model.fit(method) self.latent_variables.z_list[0].start = toy_model.latent_variables.get_z_values(transformed=False)[0] for extra_z in range(len(self.family.build_latent_variables())): self.latent_variables.z_list[self.ar+self.sc+extra_z].start = toy_model.latent_variables.get_z_values(transformed=False)[extra_z] # Random search for good AR/SC starting values random_starts = np.random.normal(0.3, 0.3, [self.ar+self.sc+len(self.X_names), 1000]) best_start = self.latent_variables.get_z_starting_values() best_lik = self.neg_loglik(self.latent_variables.get_z_starting_values()) proposal_start = best_start.copy() for start in range(random_starts.shape[1]): proposal_start[:self.ar+self.sc+len(self.X_names)] = random_starts[:,start] proposal_start[0] = proposal_start[0]*(1.0-np.sum(random_starts[:self.ar,start])) proposal_likelihood = self.neg_loglik(proposal_start) if proposal_likelihood < best_lik: best_lik = proposal_likelihood best_start = proposal_start.copy() return best_start else: return initials
def function[_preoptimize_model, parameter[self, initials, method]]: constant[ Preoptimizes the model by estimating a static model, then a quick search of good dynamic parameters Parameters ---------- initials : np.array A vector of inital values method : str One of 'MLE' or 'PML' (the optimization options) Returns ---------- Y_exp : np.array Vector of past values and predictions ] if <ast.UnaryOp object at 0x7da207f9bc10> begin[:] variable[toy_model] assign[=] call[name[GASX], parameter[]] call[name[toy_model].fit, parameter[name[method]]] call[name[self].latent_variables.z_list][constant[0]].start assign[=] call[call[name[toy_model].latent_variables.get_z_values, parameter[]]][constant[0]] for taget[name[extra_z]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[self].family.build_latent_variables, parameter[]]]]]]] begin[:] call[name[self].latent_variables.z_list][binary_operation[binary_operation[name[self].ar + name[self].sc] + name[extra_z]]].start assign[=] call[call[name[toy_model].latent_variables.get_z_values, parameter[]]][name[extra_z]] variable[random_starts] assign[=] call[name[np].random.normal, parameter[constant[0.3], constant[0.3], list[[<ast.BinOp object at 0x7da207f99690>, <ast.Constant object at 0x7da20e961e40>]]]] variable[best_start] assign[=] call[name[self].latent_variables.get_z_starting_values, parameter[]] variable[best_lik] assign[=] call[name[self].neg_loglik, parameter[call[name[self].latent_variables.get_z_starting_values, parameter[]]]] variable[proposal_start] assign[=] call[name[best_start].copy, parameter[]] for taget[name[start]] in starred[call[name[range], parameter[call[name[random_starts].shape][constant[1]]]]] begin[:] call[name[proposal_start]][<ast.Slice object at 0x7da20e963760>] assign[=] call[name[random_starts]][tuple[[<ast.Slice object at 0x7da20e961a50>, <ast.Name object at 0x7da20e960610>]]] call[name[proposal_start]][constant[0]] assign[=] binary_operation[call[name[proposal_start]][constant[0]] * binary_operation[constant[1.0] - call[name[np].sum, parameter[call[name[random_starts]][tuple[[<ast.Slice object at 0x7da20e963160>, <ast.Name object at 0x7da20e960880>]]]]]]] variable[proposal_likelihood] assign[=] call[name[self].neg_loglik, parameter[name[proposal_start]]] if compare[name[proposal_likelihood] less[<] name[best_lik]] begin[:] variable[best_lik] assign[=] name[proposal_likelihood] variable[best_start] assign[=] call[name[proposal_start].copy, parameter[]] return[name[best_start]]
keyword[def] identifier[_preoptimize_model] ( identifier[self] , identifier[initials] , identifier[method] ): literal[string] keyword[if] keyword[not] ( identifier[self] . identifier[ar] == literal[int] keyword[and] identifier[self] . identifier[sc] == literal[int] ): identifier[toy_model] = identifier[GASX] ( identifier[formula] = identifier[self] . identifier[formula] , identifier[ar] = literal[int] , identifier[sc] = literal[int] , identifier[integ] = identifier[self] . identifier[integ] , identifier[family] = identifier[self] . identifier[family] , identifier[data] = identifier[self] . identifier[data_original] ) identifier[toy_model] . identifier[fit] ( identifier[method] ) identifier[self] . identifier[latent_variables] . identifier[z_list] [ literal[int] ]. identifier[start] = identifier[toy_model] . identifier[latent_variables] . identifier[get_z_values] ( identifier[transformed] = keyword[False] )[ literal[int] ] keyword[for] identifier[extra_z] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[family] . identifier[build_latent_variables] ())): identifier[self] . identifier[latent_variables] . identifier[z_list] [ identifier[self] . identifier[ar] + identifier[self] . identifier[sc] + identifier[extra_z] ]. identifier[start] = identifier[toy_model] . identifier[latent_variables] . identifier[get_z_values] ( identifier[transformed] = keyword[False] )[ identifier[extra_z] ] identifier[random_starts] = identifier[np] . identifier[random] . identifier[normal] ( literal[int] , literal[int] ,[ identifier[self] . identifier[ar] + identifier[self] . identifier[sc] + identifier[len] ( identifier[self] . identifier[X_names] ), literal[int] ]) identifier[best_start] = identifier[self] . identifier[latent_variables] . identifier[get_z_starting_values] () identifier[best_lik] = identifier[self] . identifier[neg_loglik] ( identifier[self] . identifier[latent_variables] . identifier[get_z_starting_values] ()) identifier[proposal_start] = identifier[best_start] . identifier[copy] () keyword[for] identifier[start] keyword[in] identifier[range] ( identifier[random_starts] . identifier[shape] [ literal[int] ]): identifier[proposal_start] [: identifier[self] . identifier[ar] + identifier[self] . identifier[sc] + identifier[len] ( identifier[self] . identifier[X_names] )]= identifier[random_starts] [:, identifier[start] ] identifier[proposal_start] [ literal[int] ]= identifier[proposal_start] [ literal[int] ]*( literal[int] - identifier[np] . identifier[sum] ( identifier[random_starts] [: identifier[self] . identifier[ar] , identifier[start] ])) identifier[proposal_likelihood] = identifier[self] . identifier[neg_loglik] ( identifier[proposal_start] ) keyword[if] identifier[proposal_likelihood] < identifier[best_lik] : identifier[best_lik] = identifier[proposal_likelihood] identifier[best_start] = identifier[proposal_start] . identifier[copy] () keyword[return] identifier[best_start] keyword[else] : keyword[return] identifier[initials]
def _preoptimize_model(self, initials, method): """ Preoptimizes the model by estimating a static model, then a quick search of good dynamic parameters Parameters ---------- initials : np.array A vector of inital values method : str One of 'MLE' or 'PML' (the optimization options) Returns ---------- Y_exp : np.array Vector of past values and predictions """ if not (self.ar == 0 and self.sc == 0): toy_model = GASX(formula=self.formula, ar=0, sc=0, integ=self.integ, family=self.family, data=self.data_original) toy_model.fit(method) self.latent_variables.z_list[0].start = toy_model.latent_variables.get_z_values(transformed=False)[0] for extra_z in range(len(self.family.build_latent_variables())): self.latent_variables.z_list[self.ar + self.sc + extra_z].start = toy_model.latent_variables.get_z_values(transformed=False)[extra_z] # depends on [control=['for'], data=['extra_z']] # Random search for good AR/SC starting values random_starts = np.random.normal(0.3, 0.3, [self.ar + self.sc + len(self.X_names), 1000]) best_start = self.latent_variables.get_z_starting_values() best_lik = self.neg_loglik(self.latent_variables.get_z_starting_values()) proposal_start = best_start.copy() for start in range(random_starts.shape[1]): proposal_start[:self.ar + self.sc + len(self.X_names)] = random_starts[:, start] proposal_start[0] = proposal_start[0] * (1.0 - np.sum(random_starts[:self.ar, start])) proposal_likelihood = self.neg_loglik(proposal_start) if proposal_likelihood < best_lik: best_lik = proposal_likelihood best_start = proposal_start.copy() # depends on [control=['if'], data=['proposal_likelihood', 'best_lik']] # depends on [control=['for'], data=['start']] return best_start # depends on [control=['if'], data=[]] else: return initials
def add_speaker(self, collection_name, metadata): """Add a new speaker to this collection. :type collection_name: String :param collection_name: the name of the collection to search :type metadata: Dict :param metadata: dictionary of metadata properties and values for this speaker. Must include 'dcterms:identifier' a unique identifier for the speaker. :rtype: String :returns: the URL of the newly created speaker, or None if there was an error """ if 'dcterms:identifier' not in metadata: raise APIError(msg="No identifier in speaker metadata") if '@context' not in metadata: metadata['@context'] = CONTEXT speakers_url = "/speakers/"+collection_name+"/" resp = self.api_request(speakers_url, data=json.dumps(metadata), method="POST") if 'success' in resp: return resp['success']['URI'] else: return None
def function[add_speaker, parameter[self, collection_name, metadata]]: constant[Add a new speaker to this collection. :type collection_name: String :param collection_name: the name of the collection to search :type metadata: Dict :param metadata: dictionary of metadata properties and values for this speaker. Must include 'dcterms:identifier' a unique identifier for the speaker. :rtype: String :returns: the URL of the newly created speaker, or None if there was an error ] if compare[constant[dcterms:identifier] <ast.NotIn object at 0x7da2590d7190> name[metadata]] begin[:] <ast.Raise object at 0x7da20c6c7fd0> if compare[constant[@context] <ast.NotIn object at 0x7da2590d7190> name[metadata]] begin[:] call[name[metadata]][constant[@context]] assign[=] name[CONTEXT] variable[speakers_url] assign[=] binary_operation[binary_operation[constant[/speakers/] + name[collection_name]] + constant[/]] variable[resp] assign[=] call[name[self].api_request, parameter[name[speakers_url]]] if compare[constant[success] in name[resp]] begin[:] return[call[call[name[resp]][constant[success]]][constant[URI]]]
keyword[def] identifier[add_speaker] ( identifier[self] , identifier[collection_name] , identifier[metadata] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[metadata] : keyword[raise] identifier[APIError] ( identifier[msg] = literal[string] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[metadata] : identifier[metadata] [ literal[string] ]= identifier[CONTEXT] identifier[speakers_url] = literal[string] + identifier[collection_name] + literal[string] identifier[resp] = identifier[self] . identifier[api_request] ( identifier[speakers_url] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[metadata] ), identifier[method] = literal[string] ) keyword[if] literal[string] keyword[in] identifier[resp] : keyword[return] identifier[resp] [ literal[string] ][ literal[string] ] keyword[else] : keyword[return] keyword[None]
def add_speaker(self, collection_name, metadata): """Add a new speaker to this collection. :type collection_name: String :param collection_name: the name of the collection to search :type metadata: Dict :param metadata: dictionary of metadata properties and values for this speaker. Must include 'dcterms:identifier' a unique identifier for the speaker. :rtype: String :returns: the URL of the newly created speaker, or None if there was an error """ if 'dcterms:identifier' not in metadata: raise APIError(msg='No identifier in speaker metadata') # depends on [control=['if'], data=[]] if '@context' not in metadata: metadata['@context'] = CONTEXT # depends on [control=['if'], data=['metadata']] speakers_url = '/speakers/' + collection_name + '/' resp = self.api_request(speakers_url, data=json.dumps(metadata), method='POST') if 'success' in resp: return resp['success']['URI'] # depends on [control=['if'], data=['resp']] else: return None
def do_IHaveRequest(self, apdu): """Respond to a I-Have request.""" if _debug: WhoHasIHaveServices._debug("do_IHaveRequest %r", apdu) # check for required parameters if apdu.deviceIdentifier is None: raise MissingRequiredParameter("deviceIdentifier required") if apdu.objectIdentifier is None: raise MissingRequiredParameter("objectIdentifier required") if apdu.objectName is None: raise MissingRequiredParameter("objectName required")
def function[do_IHaveRequest, parameter[self, apdu]]: constant[Respond to a I-Have request.] if name[_debug] begin[:] call[name[WhoHasIHaveServices]._debug, parameter[constant[do_IHaveRequest %r], name[apdu]]] if compare[name[apdu].deviceIdentifier is constant[None]] begin[:] <ast.Raise object at 0x7da1b26adf60> if compare[name[apdu].objectIdentifier is constant[None]] begin[:] <ast.Raise object at 0x7da1b26adb70> if compare[name[apdu].objectName is constant[None]] begin[:] <ast.Raise object at 0x7da1b26af4c0>
keyword[def] identifier[do_IHaveRequest] ( identifier[self] , identifier[apdu] ): literal[string] keyword[if] identifier[_debug] : identifier[WhoHasIHaveServices] . identifier[_debug] ( literal[string] , identifier[apdu] ) keyword[if] identifier[apdu] . identifier[deviceIdentifier] keyword[is] keyword[None] : keyword[raise] identifier[MissingRequiredParameter] ( literal[string] ) keyword[if] identifier[apdu] . identifier[objectIdentifier] keyword[is] keyword[None] : keyword[raise] identifier[MissingRequiredParameter] ( literal[string] ) keyword[if] identifier[apdu] . identifier[objectName] keyword[is] keyword[None] : keyword[raise] identifier[MissingRequiredParameter] ( literal[string] )
def do_IHaveRequest(self, apdu): """Respond to a I-Have request.""" if _debug: WhoHasIHaveServices._debug('do_IHaveRequest %r', apdu) # depends on [control=['if'], data=[]] # check for required parameters if apdu.deviceIdentifier is None: raise MissingRequiredParameter('deviceIdentifier required') # depends on [control=['if'], data=[]] if apdu.objectIdentifier is None: raise MissingRequiredParameter('objectIdentifier required') # depends on [control=['if'], data=[]] if apdu.objectName is None: raise MissingRequiredParameter('objectName required') # depends on [control=['if'], data=[]]
def get_layer_groups(self): """ Return layers grouped """ g1 = list(self.model[:self.group_cut_layers[0]]) g2 = list(self.model[self.group_cut_layers[0]:self.group_cut_layers[1]]) g3 = list(self.model[self.group_cut_layers[1]:]) return [g1, g2, g3]
def function[get_layer_groups, parameter[self]]: constant[ Return layers grouped ] variable[g1] assign[=] call[name[list], parameter[call[name[self].model][<ast.Slice object at 0x7da1b1519630>]]] variable[g2] assign[=] call[name[list], parameter[call[name[self].model][<ast.Slice object at 0x7da1b16bfa00>]]] variable[g3] assign[=] call[name[list], parameter[call[name[self].model][<ast.Slice object at 0x7da1b16be440>]]] return[list[[<ast.Name object at 0x7da1b16bc850>, <ast.Name object at 0x7da1b16bdc60>, <ast.Name object at 0x7da1b16bec20>]]]
keyword[def] identifier[get_layer_groups] ( identifier[self] ): literal[string] identifier[g1] = identifier[list] ( identifier[self] . identifier[model] [: identifier[self] . identifier[group_cut_layers] [ literal[int] ]]) identifier[g2] = identifier[list] ( identifier[self] . identifier[model] [ identifier[self] . identifier[group_cut_layers] [ literal[int] ]: identifier[self] . identifier[group_cut_layers] [ literal[int] ]]) identifier[g3] = identifier[list] ( identifier[self] . identifier[model] [ identifier[self] . identifier[group_cut_layers] [ literal[int] ]:]) keyword[return] [ identifier[g1] , identifier[g2] , identifier[g3] ]
def get_layer_groups(self): """ Return layers grouped """ g1 = list(self.model[:self.group_cut_layers[0]]) g2 = list(self.model[self.group_cut_layers[0]:self.group_cut_layers[1]]) g3 = list(self.model[self.group_cut_layers[1]:]) return [g1, g2, g3]
def mean(self): """ Mean of all the values in the SArray, or mean image. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type or non-Image type. Returns ------- out : float | turicreate.Image Mean of all values in SArray, or image holding per-pixel mean across the input SArray. """ with cython_context(): if self.dtype == _Image: from .. import extensions return extensions.generate_mean(self) else: return self.__proxy__.mean()
def function[mean, parameter[self]]: constant[ Mean of all the values in the SArray, or mean image. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type or non-Image type. Returns ------- out : float | turicreate.Image Mean of all values in SArray, or image holding per-pixel mean across the input SArray. ] with call[name[cython_context], parameter[]] begin[:] if compare[name[self].dtype equal[==] name[_Image]] begin[:] from relative_module[None] import module[extensions] return[call[name[extensions].generate_mean, parameter[name[self]]]]
keyword[def] identifier[mean] ( identifier[self] ): literal[string] keyword[with] identifier[cython_context] (): keyword[if] identifier[self] . identifier[dtype] == identifier[_Image] : keyword[from] .. keyword[import] identifier[extensions] keyword[return] identifier[extensions] . identifier[generate_mean] ( identifier[self] ) keyword[else] : keyword[return] identifier[self] . identifier[__proxy__] . identifier[mean] ()
def mean(self): """ Mean of all the values in the SArray, or mean image. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type or non-Image type. Returns ------- out : float | turicreate.Image Mean of all values in SArray, or image holding per-pixel mean across the input SArray. """ with cython_context(): if self.dtype == _Image: from .. import extensions return extensions.generate_mean(self) # depends on [control=['if'], data=[]] else: return self.__proxy__.mean() # depends on [control=['with'], data=[]]
def close(self): """Close the connection""" if self.pinger: self.pinger.cancel() self.pinger = None if getattr(self, 'protocol', None): self.protocol.close()
def function[close, parameter[self]]: constant[Close the connection] if name[self].pinger begin[:] call[name[self].pinger.cancel, parameter[]] name[self].pinger assign[=] constant[None] if call[name[getattr], parameter[name[self], constant[protocol], constant[None]]] begin[:] call[name[self].protocol.close, parameter[]]
keyword[def] identifier[close] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[pinger] : identifier[self] . identifier[pinger] . identifier[cancel] () identifier[self] . identifier[pinger] = keyword[None] keyword[if] identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ): identifier[self] . identifier[protocol] . identifier[close] ()
def close(self): """Close the connection""" if self.pinger: self.pinger.cancel() self.pinger = None # depends on [control=['if'], data=[]] if getattr(self, 'protocol', None): self.protocol.close() # depends on [control=['if'], data=[]]
def keepalive(self): """启动保持在线的进程 """ if self.heart_thread.is_alive(): self.heart_active = True else: self.heart_thread.start()
def function[keepalive, parameter[self]]: constant[启动保持在线的进程 ] if call[name[self].heart_thread.is_alive, parameter[]] begin[:] name[self].heart_active assign[=] constant[True]
keyword[def] identifier[keepalive] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[heart_thread] . identifier[is_alive] (): identifier[self] . identifier[heart_active] = keyword[True] keyword[else] : identifier[self] . identifier[heart_thread] . identifier[start] ()
def keepalive(self): """启动保持在线的进程 """ if self.heart_thread.is_alive(): self.heart_active = True # depends on [control=['if'], data=[]] else: self.heart_thread.start()
def _colorize(output): """ Return `output` colorized with Pygments, if available. """ if not pygments: return output # Available styles # ['monokai', 'manni', 'rrt', 'perldoc', 'borland', 'colorful', 'default', # 'murphy', 'vs', 'trac', 'tango', 'fruity', 'autumn', 'bw', 'emacs', # 'vim', 'pastie', 'friendly', 'native'] return pygments.highlight(output, pygments.lexers.PythonLexer(), pygments.formatters.Terminal256Formatter(style='monokai'))
def function[_colorize, parameter[output]]: constant[ Return `output` colorized with Pygments, if available. ] if <ast.UnaryOp object at 0x7da18dc05600> begin[:] return[name[output]] return[call[name[pygments].highlight, parameter[name[output], call[name[pygments].lexers.PythonLexer, parameter[]], call[name[pygments].formatters.Terminal256Formatter, parameter[]]]]]
keyword[def] identifier[_colorize] ( identifier[output] ): literal[string] keyword[if] keyword[not] identifier[pygments] : keyword[return] identifier[output] keyword[return] identifier[pygments] . identifier[highlight] ( identifier[output] , identifier[pygments] . identifier[lexers] . identifier[PythonLexer] (), identifier[pygments] . identifier[formatters] . identifier[Terminal256Formatter] ( identifier[style] = literal[string] ))
def _colorize(output): """ Return `output` colorized with Pygments, if available. """ if not pygments: return output # depends on [control=['if'], data=[]] # Available styles # ['monokai', 'manni', 'rrt', 'perldoc', 'borland', 'colorful', 'default', # 'murphy', 'vs', 'trac', 'tango', 'fruity', 'autumn', 'bw', 'emacs', # 'vim', 'pastie', 'friendly', 'native'] return pygments.highlight(output, pygments.lexers.PythonLexer(), pygments.formatters.Terminal256Formatter(style='monokai'))
def parse_bookmark_file (file): """Parse file object. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. """ for url, name in parse_bookmark_json(json.load(file)): yield url, name
def function[parse_bookmark_file, parameter[file]]: constant[Parse file object. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. ] for taget[tuple[[<ast.Name object at 0x7da18fe910f0>, <ast.Name object at 0x7da18fe92aa0>]]] in starred[call[name[parse_bookmark_json], parameter[call[name[json].load, parameter[name[file]]]]]] begin[:] <ast.Yield object at 0x7da18fe90640>
keyword[def] identifier[parse_bookmark_file] ( identifier[file] ): literal[string] keyword[for] identifier[url] , identifier[name] keyword[in] identifier[parse_bookmark_json] ( identifier[json] . identifier[load] ( identifier[file] )): keyword[yield] identifier[url] , identifier[name]
def parse_bookmark_file(file): """Parse file object. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. """ for (url, name) in parse_bookmark_json(json.load(file)): yield (url, name) # depends on [control=['for'], data=[]]
def createuser(self, email, name='', password=''): """ Return a bugzilla User for the given username :arg email: The email address to use in bugzilla :kwarg name: Real name to associate with the account :kwarg password: Password to set for the bugzilla account :raises XMLRPC Fault: Code 501 if the username already exists Code 500 if the email address isn't valid Code 502 if the password is too short Code 503 if the password is too long :return: User record for the username """ self._proxy.User.create(email, name, password) return self.getuser(email)
def function[createuser, parameter[self, email, name, password]]: constant[ Return a bugzilla User for the given username :arg email: The email address to use in bugzilla :kwarg name: Real name to associate with the account :kwarg password: Password to set for the bugzilla account :raises XMLRPC Fault: Code 501 if the username already exists Code 500 if the email address isn't valid Code 502 if the password is too short Code 503 if the password is too long :return: User record for the username ] call[name[self]._proxy.User.create, parameter[name[email], name[name], name[password]]] return[call[name[self].getuser, parameter[name[email]]]]
keyword[def] identifier[createuser] ( identifier[self] , identifier[email] , identifier[name] = literal[string] , identifier[password] = literal[string] ): literal[string] identifier[self] . identifier[_proxy] . identifier[User] . identifier[create] ( identifier[email] , identifier[name] , identifier[password] ) keyword[return] identifier[self] . identifier[getuser] ( identifier[email] )
def createuser(self, email, name='', password=''): """ Return a bugzilla User for the given username :arg email: The email address to use in bugzilla :kwarg name: Real name to associate with the account :kwarg password: Password to set for the bugzilla account :raises XMLRPC Fault: Code 501 if the username already exists Code 500 if the email address isn't valid Code 502 if the password is too short Code 503 if the password is too long :return: User record for the username """ self._proxy.User.create(email, name, password) return self.getuser(email)
def memset(self, allocation, value, size): """set the memory in allocation to the value in value :param allocation: A GPU memory allocation unit :type allocation: pycuda.driver.DeviceAllocation :param value: The value to set the memory to :type value: a single 8-bit unsigned int :param size: The size of to the allocation unit in bytes :type size: int """ drv.memset_d8(allocation, value, size)
def function[memset, parameter[self, allocation, value, size]]: constant[set the memory in allocation to the value in value :param allocation: A GPU memory allocation unit :type allocation: pycuda.driver.DeviceAllocation :param value: The value to set the memory to :type value: a single 8-bit unsigned int :param size: The size of to the allocation unit in bytes :type size: int ] call[name[drv].memset_d8, parameter[name[allocation], name[value], name[size]]]
keyword[def] identifier[memset] ( identifier[self] , identifier[allocation] , identifier[value] , identifier[size] ): literal[string] identifier[drv] . identifier[memset_d8] ( identifier[allocation] , identifier[value] , identifier[size] )
def memset(self, allocation, value, size): """set the memory in allocation to the value in value :param allocation: A GPU memory allocation unit :type allocation: pycuda.driver.DeviceAllocation :param value: The value to set the memory to :type value: a single 8-bit unsigned int :param size: The size of to the allocation unit in bytes :type size: int """ drv.memset_d8(allocation, value, size)
def PortageFactory(name, NAME, DOMAIN, BaseClass=autoportage.AutoPortage): """ Create a new dynamic portage project. Auto-Generated projects can only be used for compilie-time experiments, because there simply is no run-time test defined for it. Therefore, we implement the run symbol as a noop (with minor logging). This way we avoid the default implementation for run() that all projects inherit. Args: name: Name of the dynamic class. NAME: NAME property of the dynamic class. DOMAIN: DOMAIN property of the dynamic class. BaseClass: Base class to use for the dynamic class. Returns: A new class with NAME,DOMAIN properties set, unable to perform run-time tests. Examples: >>> from benchbuild.projects.gentoo.portage_gen import PortageFactory >>> from benchbuild.experiments.empty import Empty >>> c = PortageFactory("test", "NAME", "DOMAIN") >>> c <class '__main__.test'> >>> i = c(Empty()) >>> i.NAME 'NAME' >>> i.DOMAIN 'DOMAIN' """ def run_not_supported(self, *args, **kwargs): """Dynamic projects don't support a run() test.""" del args, kwargs # Unused LOG.warning( "Runtime testing not supported on auto-generated projects.") return newclass = type( name, (BaseClass, ), { "NAME": NAME, "DOMAIN": DOMAIN, "SRC_FILE": "none", "VERSION": BaseClass.VERSION, "GROUP": "auto-gentoo", "run": run_not_supported, "__module__": "__main__" }) return newclass
def function[PortageFactory, parameter[name, NAME, DOMAIN, BaseClass]]: constant[ Create a new dynamic portage project. Auto-Generated projects can only be used for compilie-time experiments, because there simply is no run-time test defined for it. Therefore, we implement the run symbol as a noop (with minor logging). This way we avoid the default implementation for run() that all projects inherit. Args: name: Name of the dynamic class. NAME: NAME property of the dynamic class. DOMAIN: DOMAIN property of the dynamic class. BaseClass: Base class to use for the dynamic class. Returns: A new class with NAME,DOMAIN properties set, unable to perform run-time tests. Examples: >>> from benchbuild.projects.gentoo.portage_gen import PortageFactory >>> from benchbuild.experiments.empty import Empty >>> c = PortageFactory("test", "NAME", "DOMAIN") >>> c <class '__main__.test'> >>> i = c(Empty()) >>> i.NAME 'NAME' >>> i.DOMAIN 'DOMAIN' ] def function[run_not_supported, parameter[self]]: constant[Dynamic projects don't support a run() test.] <ast.Delete object at 0x7da2041daf80> call[name[LOG].warning, parameter[constant[Runtime testing not supported on auto-generated projects.]]] return[None] variable[newclass] assign[=] call[name[type], parameter[name[name], tuple[[<ast.Name object at 0x7da2041d8070>]], dictionary[[<ast.Constant object at 0x7da2041dace0>, <ast.Constant object at 0x7da2041d8340>, <ast.Constant object at 0x7da2041d8190>, <ast.Constant object at 0x7da2041dba60>, <ast.Constant object at 0x7da2041db490>, <ast.Constant object at 0x7da2041d82e0>, <ast.Constant object at 0x7da18c4cc190>], [<ast.Name object at 0x7da18f09cb80>, <ast.Name object at 0x7da18f09d780>, <ast.Constant object at 0x7da18f09c340>, <ast.Attribute object at 0x7da18f09c6a0>, <ast.Constant object at 0x7da18f09cf40>, <ast.Name object at 0x7da18f09fb20>, <ast.Constant object at 0x7da18f09da80>]]]] return[name[newclass]]
keyword[def] identifier[PortageFactory] ( identifier[name] , identifier[NAME] , identifier[DOMAIN] , identifier[BaseClass] = identifier[autoportage] . identifier[AutoPortage] ): literal[string] keyword[def] identifier[run_not_supported] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[del] identifier[args] , identifier[kwargs] identifier[LOG] . identifier[warning] ( literal[string] ) keyword[return] identifier[newclass] = identifier[type] ( identifier[name] ,( identifier[BaseClass] ,),{ literal[string] : identifier[NAME] , literal[string] : identifier[DOMAIN] , literal[string] : literal[string] , literal[string] : identifier[BaseClass] . identifier[VERSION] , literal[string] : literal[string] , literal[string] : identifier[run_not_supported] , literal[string] : literal[string] }) keyword[return] identifier[newclass]
def PortageFactory(name, NAME, DOMAIN, BaseClass=autoportage.AutoPortage): """ Create a new dynamic portage project. Auto-Generated projects can only be used for compilie-time experiments, because there simply is no run-time test defined for it. Therefore, we implement the run symbol as a noop (with minor logging). This way we avoid the default implementation for run() that all projects inherit. Args: name: Name of the dynamic class. NAME: NAME property of the dynamic class. DOMAIN: DOMAIN property of the dynamic class. BaseClass: Base class to use for the dynamic class. Returns: A new class with NAME,DOMAIN properties set, unable to perform run-time tests. Examples: >>> from benchbuild.projects.gentoo.portage_gen import PortageFactory >>> from benchbuild.experiments.empty import Empty >>> c = PortageFactory("test", "NAME", "DOMAIN") >>> c <class '__main__.test'> >>> i = c(Empty()) >>> i.NAME 'NAME' >>> i.DOMAIN 'DOMAIN' """ def run_not_supported(self, *args, **kwargs): """Dynamic projects don't support a run() test.""" del args, kwargs # Unused LOG.warning('Runtime testing not supported on auto-generated projects.') return newclass = type(name, (BaseClass,), {'NAME': NAME, 'DOMAIN': DOMAIN, 'SRC_FILE': 'none', 'VERSION': BaseClass.VERSION, 'GROUP': 'auto-gentoo', 'run': run_not_supported, '__module__': '__main__'}) return newclass
def sitetree_tree(parser, token): """Parses sitetree tag parameters. Two notation types are possible: 1. Two arguments: {% sitetree_tree from "mytree" %} Used to render tree for "mytree" site tree. 2. Four arguments: {% sitetree_tree from "mytree" template "sitetree/mytree.html" %} Used to render tree for "mytree" site tree using specific template "sitetree/mytree.html" """ tokens = token.split_contents() use_template = detect_clause(parser, 'template', tokens) tokens_num = len(tokens) if tokens_num in (3, 5): tree_alias = parser.compile_filter(tokens[2]) return sitetree_treeNode(tree_alias, use_template) else: raise template.TemplateSyntaxError( '%r tag requires two arguments. E.g. {%% sitetree_tree from "mytree" %%}.' % tokens[0])
def function[sitetree_tree, parameter[parser, token]]: constant[Parses sitetree tag parameters. Two notation types are possible: 1. Two arguments: {% sitetree_tree from "mytree" %} Used to render tree for "mytree" site tree. 2. Four arguments: {% sitetree_tree from "mytree" template "sitetree/mytree.html" %} Used to render tree for "mytree" site tree using specific template "sitetree/mytree.html" ] variable[tokens] assign[=] call[name[token].split_contents, parameter[]] variable[use_template] assign[=] call[name[detect_clause], parameter[name[parser], constant[template], name[tokens]]] variable[tokens_num] assign[=] call[name[len], parameter[name[tokens]]] if compare[name[tokens_num] in tuple[[<ast.Constant object at 0x7da2041da080>, <ast.Constant object at 0x7da2041dba00>]]] begin[:] variable[tree_alias] assign[=] call[name[parser].compile_filter, parameter[call[name[tokens]][constant[2]]]] return[call[name[sitetree_treeNode], parameter[name[tree_alias], name[use_template]]]]
keyword[def] identifier[sitetree_tree] ( identifier[parser] , identifier[token] ): literal[string] identifier[tokens] = identifier[token] . identifier[split_contents] () identifier[use_template] = identifier[detect_clause] ( identifier[parser] , literal[string] , identifier[tokens] ) identifier[tokens_num] = identifier[len] ( identifier[tokens] ) keyword[if] identifier[tokens_num] keyword[in] ( literal[int] , literal[int] ): identifier[tree_alias] = identifier[parser] . identifier[compile_filter] ( identifier[tokens] [ literal[int] ]) keyword[return] identifier[sitetree_treeNode] ( identifier[tree_alias] , identifier[use_template] ) keyword[else] : keyword[raise] identifier[template] . identifier[TemplateSyntaxError] ( literal[string] % identifier[tokens] [ literal[int] ])
def sitetree_tree(parser, token): """Parses sitetree tag parameters. Two notation types are possible: 1. Two arguments: {% sitetree_tree from "mytree" %} Used to render tree for "mytree" site tree. 2. Four arguments: {% sitetree_tree from "mytree" template "sitetree/mytree.html" %} Used to render tree for "mytree" site tree using specific template "sitetree/mytree.html" """ tokens = token.split_contents() use_template = detect_clause(parser, 'template', tokens) tokens_num = len(tokens) if tokens_num in (3, 5): tree_alias = parser.compile_filter(tokens[2]) return sitetree_treeNode(tree_alias, use_template) # depends on [control=['if'], data=[]] else: raise template.TemplateSyntaxError('%r tag requires two arguments. E.g. {%% sitetree_tree from "mytree" %%}.' % tokens[0])
def replace_python_tag(wheelname, new_tag): # type: (str, str) -> str """Replace the Python tag in a wheel file name with a new value. """ parts = wheelname.split('-') parts[-3] = new_tag return '-'.join(parts)
def function[replace_python_tag, parameter[wheelname, new_tag]]: constant[Replace the Python tag in a wheel file name with a new value. ] variable[parts] assign[=] call[name[wheelname].split, parameter[constant[-]]] call[name[parts]][<ast.UnaryOp object at 0x7da2044c3190>] assign[=] name[new_tag] return[call[constant[-].join, parameter[name[parts]]]]
keyword[def] identifier[replace_python_tag] ( identifier[wheelname] , identifier[new_tag] ): literal[string] identifier[parts] = identifier[wheelname] . identifier[split] ( literal[string] ) identifier[parts] [- literal[int] ]= identifier[new_tag] keyword[return] literal[string] . identifier[join] ( identifier[parts] )
def replace_python_tag(wheelname, new_tag): # type: (str, str) -> str 'Replace the Python tag in a wheel file name with a new value.\n ' parts = wheelname.split('-') parts[-3] = new_tag return '-'.join(parts)
def modify_pattern(self, pattern, group): """Rename groups in regex pattern and enclose it in named group""" pattern = group_regex.sub(r'?P<{}_\1>'.format(self.name), pattern) return r'(?P<{}>{})'.format(group, pattern)
def function[modify_pattern, parameter[self, pattern, group]]: constant[Rename groups in regex pattern and enclose it in named group] variable[pattern] assign[=] call[name[group_regex].sub, parameter[call[constant[?P<{}_\1>].format, parameter[name[self].name]], name[pattern]]] return[call[constant[(?P<{}>{})].format, parameter[name[group], name[pattern]]]]
keyword[def] identifier[modify_pattern] ( identifier[self] , identifier[pattern] , identifier[group] ): literal[string] identifier[pattern] = identifier[group_regex] . identifier[sub] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] ), identifier[pattern] ) keyword[return] literal[string] . identifier[format] ( identifier[group] , identifier[pattern] )
def modify_pattern(self, pattern, group): """Rename groups in regex pattern and enclose it in named group""" pattern = group_regex.sub('?P<{}_\\1>'.format(self.name), pattern) return '(?P<{}>{})'.format(group, pattern)
def listdir(self, path): ''' Return a list of all non dotfiles in a given directory. ''' for f in os.listdir(path): if not f.startswith('.'): yield f
def function[listdir, parameter[self, path]]: constant[ Return a list of all non dotfiles in a given directory. ] for taget[name[f]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:] if <ast.UnaryOp object at 0x7da1b22573a0> begin[:] <ast.Yield object at 0x7da1b2257280>
keyword[def] identifier[listdir] ( identifier[self] , identifier[path] ): literal[string] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ): keyword[if] keyword[not] identifier[f] . identifier[startswith] ( literal[string] ): keyword[yield] identifier[f]
def listdir(self, path): """ Return a list of all non dotfiles in a given directory. """ for f in os.listdir(path): if not f.startswith('.'): yield f # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
def curriculum2schedule(curriculum, first_day, compress=False, time_table=None): """ 将课程表转换为上课时间表, 如果 compress=False 结果是未排序的, 否则为压缩并排序后的上课时间表 :param curriculum: 课表 :param first_day: 第一周周一, 如 datetime.datetime(2016, 8, 29) :param compress: 压缩连续的课时为一个 :param time_table: 每天上课的时间表, 形如 ``((start timedelta, end timedelta), ...)`` 的 11 × 2 的矩阵 :return: [(datetime.datetime, str) ...] """ schedule = [] time_table = time_table or ( (timedelta(hours=8), timedelta(hours=8, minutes=50)), (timedelta(hours=9), timedelta(hours=9, minutes=50)), (timedelta(hours=10, minutes=10), timedelta(hours=11)), (timedelta(hours=11, minutes=10), timedelta(hours=12)), (timedelta(hours=14), timedelta(hours=14, minutes=50)), (timedelta(hours=15), timedelta(hours=15, minutes=50)), (timedelta(hours=16), timedelta(hours=16, minutes=50)), (timedelta(hours=17), timedelta(hours=17, minutes=50)), (timedelta(hours=19), timedelta(hours=19, minutes=50)), (timedelta(hours=19, minutes=50), timedelta(hours=20, minutes=40)), (timedelta(hours=20, minutes=40), timedelta(hours=21, minutes=30)) ) for i, d in enumerate(curriculum): for j, cs in enumerate(d): for c in cs or []: course = '{name}[{place}]'.format(name=c['课程名称'], place=c['课程地点']) for week in c['上课周数']: day = first_day + timedelta(weeks=week - 1, days=i) start, end = time_table[j] item = (week, day + start, day + end, course) schedule.append(item) schedule.sort() if compress: new_schedule = [schedule[0]] for i in range(1, len(schedule)): sch = schedule[i] # 同一天的连续课程 if new_schedule[-1][1].date() == sch[1].date() and new_schedule[-1][3] == sch[3]: # 更新结束时间 old_item = new_schedule.pop() # week, start, end, course new_item = (old_item[0], old_item[1], sch[2], old_item[3]) else: new_item = sch new_schedule.append(new_item) return new_schedule return schedule
def function[curriculum2schedule, parameter[curriculum, first_day, compress, time_table]]: constant[ 将课程表转换为上课时间表, 如果 compress=False 结果是未排序的, 否则为压缩并排序后的上课时间表 :param curriculum: 课表 :param first_day: 第一周周一, 如 datetime.datetime(2016, 8, 29) :param compress: 压缩连续的课时为一个 :param time_table: 每天上课的时间表, 形如 ``((start timedelta, end timedelta), ...)`` 的 11 × 2 的矩阵 :return: [(datetime.datetime, str) ...] ] variable[schedule] assign[=] list[[]] variable[time_table] assign[=] <ast.BoolOp object at 0x7da18ede4370> for taget[tuple[[<ast.Name object at 0x7da18ede4340>, <ast.Name object at 0x7da18ede4730>]]] in starred[call[name[enumerate], parameter[name[curriculum]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18ede7010>, <ast.Name object at 0x7da18ede6c50>]]] in starred[call[name[enumerate], parameter[name[d]]]] begin[:] for taget[name[c]] in starred[<ast.BoolOp object at 0x7da18ede4a00>] begin[:] variable[course] assign[=] call[constant[{name}[{place}]].format, parameter[]] for taget[name[week]] in starred[call[name[c]][constant[上课周数]]] begin[:] variable[day] assign[=] binary_operation[name[first_day] + call[name[timedelta], parameter[]]] <ast.Tuple object at 0x7da18ede40d0> assign[=] call[name[time_table]][name[j]] variable[item] assign[=] tuple[[<ast.Name object at 0x7da18ede4610>, <ast.BinOp object at 0x7da18ede75e0>, <ast.BinOp object at 0x7da18ede50f0>, <ast.Name object at 0x7da18ede6d40>]] call[name[schedule].append, parameter[name[item]]] call[name[schedule].sort, parameter[]] if name[compress] begin[:] variable[new_schedule] assign[=] list[[<ast.Subscript object at 0x7da18ede5420>]] for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[schedule]]]]]] begin[:] variable[sch] assign[=] call[name[schedule]][name[i]] if <ast.BoolOp object at 0x7da18ede5db0> begin[:] variable[old_item] assign[=] call[name[new_schedule].pop, parameter[]] variable[new_item] assign[=] tuple[[<ast.Subscript object at 0x7da20c7caaa0>, <ast.Subscript object at 0x7da20c7cb610>, <ast.Subscript object at 0x7da20c7c8820>, <ast.Subscript object at 0x7da20c7c80d0>]] call[name[new_schedule].append, parameter[name[new_item]]] return[name[new_schedule]] return[name[schedule]]
keyword[def] identifier[curriculum2schedule] ( identifier[curriculum] , identifier[first_day] , identifier[compress] = keyword[False] , identifier[time_table] = keyword[None] ): literal[string] identifier[schedule] =[] identifier[time_table] = identifier[time_table] keyword[or] ( ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )), ( identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] ), identifier[timedelta] ( identifier[hours] = literal[int] , identifier[minutes] = literal[int] )) ) keyword[for] identifier[i] , identifier[d] keyword[in] identifier[enumerate] ( identifier[curriculum] ): keyword[for] identifier[j] , identifier[cs] keyword[in] identifier[enumerate] ( identifier[d] ): keyword[for] identifier[c] keyword[in] identifier[cs] keyword[or] []: identifier[course] = literal[string] . identifier[format] ( identifier[name] = identifier[c] [ literal[string] ], identifier[place] = identifier[c] [ literal[string] ]) keyword[for] identifier[week] keyword[in] identifier[c] [ literal[string] ]: identifier[day] = identifier[first_day] + identifier[timedelta] ( identifier[weeks] = identifier[week] - literal[int] , identifier[days] = identifier[i] ) identifier[start] , identifier[end] = identifier[time_table] [ identifier[j] ] identifier[item] =( identifier[week] , identifier[day] + identifier[start] , identifier[day] + identifier[end] , identifier[course] ) identifier[schedule] . identifier[append] ( identifier[item] ) identifier[schedule] . identifier[sort] () keyword[if] identifier[compress] : identifier[new_schedule] =[ identifier[schedule] [ literal[int] ]] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[schedule] )): identifier[sch] = identifier[schedule] [ identifier[i] ] keyword[if] identifier[new_schedule] [- literal[int] ][ literal[int] ]. identifier[date] ()== identifier[sch] [ literal[int] ]. identifier[date] () keyword[and] identifier[new_schedule] [- literal[int] ][ literal[int] ]== identifier[sch] [ literal[int] ]: identifier[old_item] = identifier[new_schedule] . identifier[pop] () identifier[new_item] =( identifier[old_item] [ literal[int] ], identifier[old_item] [ literal[int] ], identifier[sch] [ literal[int] ], identifier[old_item] [ literal[int] ]) keyword[else] : identifier[new_item] = identifier[sch] identifier[new_schedule] . identifier[append] ( identifier[new_item] ) keyword[return] identifier[new_schedule] keyword[return] identifier[schedule]
def curriculum2schedule(curriculum, first_day, compress=False, time_table=None): """ 将课程表转换为上课时间表, 如果 compress=False 结果是未排序的, 否则为压缩并排序后的上课时间表 :param curriculum: 课表 :param first_day: 第一周周一, 如 datetime.datetime(2016, 8, 29) :param compress: 压缩连续的课时为一个 :param time_table: 每天上课的时间表, 形如 ``((start timedelta, end timedelta), ...)`` 的 11 × 2 的矩阵 :return: [(datetime.datetime, str) ...] """ schedule = [] time_table = time_table or ((timedelta(hours=8), timedelta(hours=8, minutes=50)), (timedelta(hours=9), timedelta(hours=9, minutes=50)), (timedelta(hours=10, minutes=10), timedelta(hours=11)), (timedelta(hours=11, minutes=10), timedelta(hours=12)), (timedelta(hours=14), timedelta(hours=14, minutes=50)), (timedelta(hours=15), timedelta(hours=15, minutes=50)), (timedelta(hours=16), timedelta(hours=16, minutes=50)), (timedelta(hours=17), timedelta(hours=17, minutes=50)), (timedelta(hours=19), timedelta(hours=19, minutes=50)), (timedelta(hours=19, minutes=50), timedelta(hours=20, minutes=40)), (timedelta(hours=20, minutes=40), timedelta(hours=21, minutes=30))) for (i, d) in enumerate(curriculum): for (j, cs) in enumerate(d): for c in cs or []: course = '{name}[{place}]'.format(name=c['课程名称'], place=c['课程地点']) for week in c['上课周数']: day = first_day + timedelta(weeks=week - 1, days=i) (start, end) = time_table[j] item = (week, day + start, day + end, course) schedule.append(item) # depends on [control=['for'], data=['week']] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] schedule.sort() if compress: new_schedule = [schedule[0]] for i in range(1, len(schedule)): sch = schedule[i] # 同一天的连续课程 if new_schedule[-1][1].date() == sch[1].date() and new_schedule[-1][3] == sch[3]: # 更新结束时间 old_item = new_schedule.pop() # week, start, end, course new_item = (old_item[0], old_item[1], sch[2], old_item[3]) # depends on [control=['if'], data=[]] else: new_item = sch new_schedule.append(new_item) # depends on [control=['for'], data=['i']] return new_schedule # depends on [control=['if'], data=[]] return schedule
def createGUI( self ): '''Create the graphical user interface.''' our_font = "Helvetica 16 bold" small_font = "Helvetica 9 bold" self.root_frame = Frame(self.root) if self.action_space == 'continuous': desc = "Running continuous-action mission.\nUse the mouse to turn, WASD to move." else: desc = "Running discrete-action mission.\nUse the arrow keys to turn and move." Label(self.root_frame, text=desc,font = our_font,wraplength=640).pack(padx=5, pady=5) self.canvas = Canvas(self.root_frame, borderwidth=0, highlightthickness=0, width=640, height=480, bg="gray" ) self.canvas.bind('<Motion>',self.onMouseMoveInCanvas) self.canvas.bind('<Button-1>',self.onLeftMouseDownInCanvas) self.canvas.bind('<ButtonRelease-1>',self.onLeftMouseUpInCanvas) if sys.platform == 'darwin': right_mouse_button = '2' # on MacOSX, the right button is 'Button-2' else: right_mouse_button = '3' # on Windows and Linux the right button is 'Button-3' self.canvas.bind('<Button-'+right_mouse_button+'>',self.onRightMouseDownInCanvas) self.canvas.bind('<ButtonRelease-'+right_mouse_button+'>',self.onRightMouseUpInCanvas) self.canvas.bind('<KeyPress>',self.onKeyPressInCanvas) self.canvas.bind('<KeyRelease>',self.onKeyReleaseInCanvas) self.canvas.pack(padx=5, pady=5) self.entry_frame = Frame(self.root_frame) Label(self.entry_frame, text="Type '/' to enter command:",font = small_font).pack(padx=5, pady=5, side=LEFT) self.command_entry = Entry(self.entry_frame,font = small_font) self.command_entry.bind('<Key>',self.onKeyInCommandEntry) self.command_entry.pack(padx=5, pady=5, side=LEFT) Button(self.entry_frame, text='Send', command=self.onSendCommand,font = small_font).pack(padx=5, pady=5, side=LEFT) self.entry_frame.pack() self.observation = Label(self.root_frame, text='observations will appear here', wraplength=640, font = small_font) self.observation.pack() self.reward = Label(self.root_frame, text='rewards will appear here', wraplength=640, font = small_font) self.reward.pack() self.root_frame.pack() self.mouse_event = self.prev_mouse_event = None
def function[createGUI, parameter[self]]: constant[Create the graphical user interface.] variable[our_font] assign[=] constant[Helvetica 16 bold] variable[small_font] assign[=] constant[Helvetica 9 bold] name[self].root_frame assign[=] call[name[Frame], parameter[name[self].root]] if compare[name[self].action_space equal[==] constant[continuous]] begin[:] variable[desc] assign[=] constant[Running continuous-action mission. Use the mouse to turn, WASD to move.] call[call[name[Label], parameter[name[self].root_frame]].pack, parameter[]] name[self].canvas assign[=] call[name[Canvas], parameter[name[self].root_frame]] call[name[self].canvas.bind, parameter[constant[<Motion>], name[self].onMouseMoveInCanvas]] call[name[self].canvas.bind, parameter[constant[<Button-1>], name[self].onLeftMouseDownInCanvas]] call[name[self].canvas.bind, parameter[constant[<ButtonRelease-1>], name[self].onLeftMouseUpInCanvas]] if compare[name[sys].platform equal[==] constant[darwin]] begin[:] variable[right_mouse_button] assign[=] constant[2] call[name[self].canvas.bind, parameter[binary_operation[binary_operation[constant[<Button-] + name[right_mouse_button]] + constant[>]], name[self].onRightMouseDownInCanvas]] call[name[self].canvas.bind, parameter[binary_operation[binary_operation[constant[<ButtonRelease-] + name[right_mouse_button]] + constant[>]], name[self].onRightMouseUpInCanvas]] call[name[self].canvas.bind, parameter[constant[<KeyPress>], name[self].onKeyPressInCanvas]] call[name[self].canvas.bind, parameter[constant[<KeyRelease>], name[self].onKeyReleaseInCanvas]] call[name[self].canvas.pack, parameter[]] name[self].entry_frame assign[=] call[name[Frame], parameter[name[self].root_frame]] call[call[name[Label], parameter[name[self].entry_frame]].pack, parameter[]] name[self].command_entry assign[=] call[name[Entry], parameter[name[self].entry_frame]] call[name[self].command_entry.bind, parameter[constant[<Key>], name[self].onKeyInCommandEntry]] call[name[self].command_entry.pack, parameter[]] call[call[name[Button], parameter[name[self].entry_frame]].pack, parameter[]] call[name[self].entry_frame.pack, parameter[]] name[self].observation assign[=] call[name[Label], parameter[name[self].root_frame]] call[name[self].observation.pack, parameter[]] name[self].reward assign[=] call[name[Label], parameter[name[self].root_frame]] call[name[self].reward.pack, parameter[]] call[name[self].root_frame.pack, parameter[]] name[self].mouse_event assign[=] constant[None]
keyword[def] identifier[createGUI] ( identifier[self] ): literal[string] identifier[our_font] = literal[string] identifier[small_font] = literal[string] identifier[self] . identifier[root_frame] = identifier[Frame] ( identifier[self] . identifier[root] ) keyword[if] identifier[self] . identifier[action_space] == literal[string] : identifier[desc] = literal[string] keyword[else] : identifier[desc] = literal[string] identifier[Label] ( identifier[self] . identifier[root_frame] , identifier[text] = identifier[desc] , identifier[font] = identifier[our_font] , identifier[wraplength] = literal[int] ). identifier[pack] ( identifier[padx] = literal[int] , identifier[pady] = literal[int] ) identifier[self] . identifier[canvas] = identifier[Canvas] ( identifier[self] . identifier[root_frame] , identifier[borderwidth] = literal[int] , identifier[highlightthickness] = literal[int] , identifier[width] = literal[int] , identifier[height] = literal[int] , identifier[bg] = literal[string] ) identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] , identifier[self] . identifier[onMouseMoveInCanvas] ) identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] , identifier[self] . identifier[onLeftMouseDownInCanvas] ) identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] , identifier[self] . identifier[onLeftMouseUpInCanvas] ) keyword[if] identifier[sys] . identifier[platform] == literal[string] : identifier[right_mouse_button] = literal[string] keyword[else] : identifier[right_mouse_button] = literal[string] identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] + identifier[right_mouse_button] + literal[string] , identifier[self] . identifier[onRightMouseDownInCanvas] ) identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] + identifier[right_mouse_button] + literal[string] , identifier[self] . identifier[onRightMouseUpInCanvas] ) identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] , identifier[self] . identifier[onKeyPressInCanvas] ) identifier[self] . identifier[canvas] . identifier[bind] ( literal[string] , identifier[self] . identifier[onKeyReleaseInCanvas] ) identifier[self] . identifier[canvas] . identifier[pack] ( identifier[padx] = literal[int] , identifier[pady] = literal[int] ) identifier[self] . identifier[entry_frame] = identifier[Frame] ( identifier[self] . identifier[root_frame] ) identifier[Label] ( identifier[self] . identifier[entry_frame] , identifier[text] = literal[string] , identifier[font] = identifier[small_font] ). identifier[pack] ( identifier[padx] = literal[int] , identifier[pady] = literal[int] , identifier[side] = identifier[LEFT] ) identifier[self] . identifier[command_entry] = identifier[Entry] ( identifier[self] . identifier[entry_frame] , identifier[font] = identifier[small_font] ) identifier[self] . identifier[command_entry] . identifier[bind] ( literal[string] , identifier[self] . identifier[onKeyInCommandEntry] ) identifier[self] . identifier[command_entry] . identifier[pack] ( identifier[padx] = literal[int] , identifier[pady] = literal[int] , identifier[side] = identifier[LEFT] ) identifier[Button] ( identifier[self] . identifier[entry_frame] , identifier[text] = literal[string] , identifier[command] = identifier[self] . identifier[onSendCommand] , identifier[font] = identifier[small_font] ). identifier[pack] ( identifier[padx] = literal[int] , identifier[pady] = literal[int] , identifier[side] = identifier[LEFT] ) identifier[self] . identifier[entry_frame] . identifier[pack] () identifier[self] . identifier[observation] = identifier[Label] ( identifier[self] . identifier[root_frame] , identifier[text] = literal[string] , identifier[wraplength] = literal[int] , identifier[font] = identifier[small_font] ) identifier[self] . identifier[observation] . identifier[pack] () identifier[self] . identifier[reward] = identifier[Label] ( identifier[self] . identifier[root_frame] , identifier[text] = literal[string] , identifier[wraplength] = literal[int] , identifier[font] = identifier[small_font] ) identifier[self] . identifier[reward] . identifier[pack] () identifier[self] . identifier[root_frame] . identifier[pack] () identifier[self] . identifier[mouse_event] = identifier[self] . identifier[prev_mouse_event] = keyword[None]
def createGUI(self): """Create the graphical user interface.""" our_font = 'Helvetica 16 bold' small_font = 'Helvetica 9 bold' self.root_frame = Frame(self.root) if self.action_space == 'continuous': desc = 'Running continuous-action mission.\nUse the mouse to turn, WASD to move.' # depends on [control=['if'], data=[]] else: desc = 'Running discrete-action mission.\nUse the arrow keys to turn and move.' Label(self.root_frame, text=desc, font=our_font, wraplength=640).pack(padx=5, pady=5) self.canvas = Canvas(self.root_frame, borderwidth=0, highlightthickness=0, width=640, height=480, bg='gray') self.canvas.bind('<Motion>', self.onMouseMoveInCanvas) self.canvas.bind('<Button-1>', self.onLeftMouseDownInCanvas) self.canvas.bind('<ButtonRelease-1>', self.onLeftMouseUpInCanvas) if sys.platform == 'darwin': right_mouse_button = '2' # on MacOSX, the right button is 'Button-2' # depends on [control=['if'], data=[]] else: right_mouse_button = '3' # on Windows and Linux the right button is 'Button-3' self.canvas.bind('<Button-' + right_mouse_button + '>', self.onRightMouseDownInCanvas) self.canvas.bind('<ButtonRelease-' + right_mouse_button + '>', self.onRightMouseUpInCanvas) self.canvas.bind('<KeyPress>', self.onKeyPressInCanvas) self.canvas.bind('<KeyRelease>', self.onKeyReleaseInCanvas) self.canvas.pack(padx=5, pady=5) self.entry_frame = Frame(self.root_frame) Label(self.entry_frame, text="Type '/' to enter command:", font=small_font).pack(padx=5, pady=5, side=LEFT) self.command_entry = Entry(self.entry_frame, font=small_font) self.command_entry.bind('<Key>', self.onKeyInCommandEntry) self.command_entry.pack(padx=5, pady=5, side=LEFT) Button(self.entry_frame, text='Send', command=self.onSendCommand, font=small_font).pack(padx=5, pady=5, side=LEFT) self.entry_frame.pack() self.observation = Label(self.root_frame, text='observations will appear here', wraplength=640, font=small_font) self.observation.pack() self.reward = Label(self.root_frame, text='rewards will appear here', wraplength=640, font=small_font) self.reward.pack() self.root_frame.pack() self.mouse_event = self.prev_mouse_event = None
def _repr_png_(self): """iPython display hook returns png version of image """ data = ffi.new('l_uint8 **') size = ffi.new('size_t *') err = lept.pixWriteMemPng(data, size, self._cdata, 0) if err != 0: raise LeptonicaIOError("pixWriteMemPng") char_data = ffi.cast('char *', data[0]) return ffi.buffer(char_data, size[0])[:]
def function[_repr_png_, parameter[self]]: constant[iPython display hook returns png version of image ] variable[data] assign[=] call[name[ffi].new, parameter[constant[l_uint8 **]]] variable[size] assign[=] call[name[ffi].new, parameter[constant[size_t *]]] variable[err] assign[=] call[name[lept].pixWriteMemPng, parameter[name[data], name[size], name[self]._cdata, constant[0]]] if compare[name[err] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da1b1c5a6e0> variable[char_data] assign[=] call[name[ffi].cast, parameter[constant[char *], call[name[data]][constant[0]]]] return[call[call[name[ffi].buffer, parameter[name[char_data], call[name[size]][constant[0]]]]][<ast.Slice object at 0x7da1b1bc2f50>]]
keyword[def] identifier[_repr_png_] ( identifier[self] ): literal[string] identifier[data] = identifier[ffi] . identifier[new] ( literal[string] ) identifier[size] = identifier[ffi] . identifier[new] ( literal[string] ) identifier[err] = identifier[lept] . identifier[pixWriteMemPng] ( identifier[data] , identifier[size] , identifier[self] . identifier[_cdata] , literal[int] ) keyword[if] identifier[err] != literal[int] : keyword[raise] identifier[LeptonicaIOError] ( literal[string] ) identifier[char_data] = identifier[ffi] . identifier[cast] ( literal[string] , identifier[data] [ literal[int] ]) keyword[return] identifier[ffi] . identifier[buffer] ( identifier[char_data] , identifier[size] [ literal[int] ])[:]
def _repr_png_(self): """iPython display hook returns png version of image """ data = ffi.new('l_uint8 **') size = ffi.new('size_t *') err = lept.pixWriteMemPng(data, size, self._cdata, 0) if err != 0: raise LeptonicaIOError('pixWriteMemPng') # depends on [control=['if'], data=[]] char_data = ffi.cast('char *', data[0]) return ffi.buffer(char_data, size[0])[:]
def marketNewsDF(count=10, token='', version=''): '''News about market https://iexcloud.io/docs/api/#news Continuous Args: count (int): limit number of results token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.DataFrame(marketNews(count, token, version)) _toDatetime(df) _reindex(df, 'datetime') return df
def function[marketNewsDF, parameter[count, token, version]]: constant[News about market https://iexcloud.io/docs/api/#news Continuous Args: count (int): limit number of results token (string); Access token version (string); API version Returns: DataFrame: result ] variable[df] assign[=] call[name[pd].DataFrame, parameter[call[name[marketNews], parameter[name[count], name[token], name[version]]]]] call[name[_toDatetime], parameter[name[df]]] call[name[_reindex], parameter[name[df], constant[datetime]]] return[name[df]]
keyword[def] identifier[marketNewsDF] ( identifier[count] = literal[int] , identifier[token] = literal[string] , identifier[version] = literal[string] ): literal[string] identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[marketNews] ( identifier[count] , identifier[token] , identifier[version] )) identifier[_toDatetime] ( identifier[df] ) identifier[_reindex] ( identifier[df] , literal[string] ) keyword[return] identifier[df]
def marketNewsDF(count=10, token='', version=''): """News about market https://iexcloud.io/docs/api/#news Continuous Args: count (int): limit number of results token (string); Access token version (string); API version Returns: DataFrame: result """ df = pd.DataFrame(marketNews(count, token, version)) _toDatetime(df) _reindex(df, 'datetime') return df
def get_all_synIdx(self): """ Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell """ tic = time() #containers for synapse idxs existing on this rank synIdx = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex) #find synapse locations for cell in parallel synIdx[cellindex] = self.get_synidx(cellindex) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found synapse locations in %.2f seconds' % (time()-tic)) #print the number of synapses per layer from which presynapse population if self.verbose: for cellindex in self.RANK_CELLINDICES: for i, synidx in enumerate(synIdx[cellindex]): print('to:\t%s\tcell:\t%i\tfrom:\t%s:' % (self.y, cellindex, self.X[i]),) idxcount = 0 for idx in synidx: idxcount += idx.size print('\t%i' % idx.size,) print('\ttotal %i' % idxcount) return synIdx
def function[get_all_synIdx, parameter[self]]: constant[ Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell ] variable[tic] assign[=] call[name[time], parameter[]] variable[synIdx] assign[=] dictionary[[], []] variable[randomstate] assign[=] call[name[np].random.get_state, parameter[]] for taget[name[cellindex]] in starred[name[self].RANK_CELLINDICES] begin[:] call[name[np].random.seed, parameter[binary_operation[name[self].POPULATIONSEED + name[cellindex]]]] call[name[synIdx]][name[cellindex]] assign[=] call[name[self].get_synidx, parameter[name[cellindex]]] call[name[np].random.set_state, parameter[name[randomstate]]] if compare[name[RANK] equal[==] constant[0]] begin[:] call[name[print], parameter[binary_operation[constant[found synapse locations in %.2f seconds] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[time], parameter[]] - name[tic]]]]] if name[self].verbose begin[:] for taget[name[cellindex]] in starred[name[self].RANK_CELLINDICES] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b0baba60>, <ast.Name object at 0x7da1b0baab90>]]] in starred[call[name[enumerate], parameter[call[name[synIdx]][name[cellindex]]]]] begin[:] call[name[print], parameter[binary_operation[constant[to: %s cell: %i from: %s:] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0ba8f70>, <ast.Name object at 0x7da1b0bab2e0>, <ast.Subscript object at 0x7da1b0baa740>]]]]] variable[idxcount] assign[=] constant[0] for taget[name[idx]] in starred[name[synidx]] begin[:] <ast.AugAssign object at 0x7da1b0baaad0> call[name[print], parameter[binary_operation[constant[ %i] <ast.Mod object at 0x7da2590d6920> name[idx].size]]] call[name[print], parameter[binary_operation[constant[ total %i] <ast.Mod object at 0x7da2590d6920> name[idxcount]]]] return[name[synIdx]]
keyword[def] identifier[get_all_synIdx] ( identifier[self] ): literal[string] identifier[tic] = identifier[time] () identifier[synIdx] ={} identifier[randomstate] = identifier[np] . identifier[random] . identifier[get_state] () keyword[for] identifier[cellindex] keyword[in] identifier[self] . identifier[RANK_CELLINDICES] : identifier[np] . identifier[random] . identifier[seed] ( identifier[self] . identifier[POPULATIONSEED] + identifier[cellindex] ) identifier[synIdx] [ identifier[cellindex] ]= identifier[self] . identifier[get_synidx] ( identifier[cellindex] ) identifier[np] . identifier[random] . identifier[set_state] ( identifier[randomstate] ) keyword[if] identifier[RANK] == literal[int] : identifier[print] ( literal[string] %( identifier[time] ()- identifier[tic] )) keyword[if] identifier[self] . identifier[verbose] : keyword[for] identifier[cellindex] keyword[in] identifier[self] . identifier[RANK_CELLINDICES] : keyword[for] identifier[i] , identifier[synidx] keyword[in] identifier[enumerate] ( identifier[synIdx] [ identifier[cellindex] ]): identifier[print] ( literal[string] %( identifier[self] . identifier[y] , identifier[cellindex] , identifier[self] . identifier[X] [ identifier[i] ]),) identifier[idxcount] = literal[int] keyword[for] identifier[idx] keyword[in] identifier[synidx] : identifier[idxcount] += identifier[idx] . identifier[size] identifier[print] ( literal[string] % identifier[idx] . identifier[size] ,) identifier[print] ( literal[string] % identifier[idxcount] ) keyword[return] identifier[synIdx]
def get_all_synIdx(self): """ Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell """ tic = time() #containers for synapse idxs existing on this rank synIdx = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex) #find synapse locations for cell in parallel synIdx[cellindex] = self.get_synidx(cellindex) # depends on [control=['for'], data=['cellindex']] #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found synapse locations in %.2f seconds' % (time() - tic)) # depends on [control=['if'], data=[]] #print the number of synapses per layer from which presynapse population if self.verbose: for cellindex in self.RANK_CELLINDICES: for (i, synidx) in enumerate(synIdx[cellindex]): print('to:\t%s\tcell:\t%i\tfrom:\t%s:' % (self.y, cellindex, self.X[i])) idxcount = 0 for idx in synidx: idxcount += idx.size print('\t%i' % idx.size) # depends on [control=['for'], data=['idx']] print('\ttotal %i' % idxcount) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['cellindex']] # depends on [control=['if'], data=[]] return synIdx
def resolve(self, geoid, id_only=False): ''' Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone. ''' level, code, validity = geoids.parse(geoid) qs = self(level=level, code=code) if id_only: qs = qs.only('id') if validity == 'latest': result = qs.latest() else: result = qs.valid_at(validity).first() return result.id if id_only and result else result
def function[resolve, parameter[self, geoid, id_only]]: constant[ Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone. ] <ast.Tuple object at 0x7da1b1120ca0> assign[=] call[name[geoids].parse, parameter[name[geoid]]] variable[qs] assign[=] call[name[self], parameter[]] if name[id_only] begin[:] variable[qs] assign[=] call[name[qs].only, parameter[constant[id]]] if compare[name[validity] equal[==] constant[latest]] begin[:] variable[result] assign[=] call[name[qs].latest, parameter[]] return[<ast.IfExp object at 0x7da1b1120550>]
keyword[def] identifier[resolve] ( identifier[self] , identifier[geoid] , identifier[id_only] = keyword[False] ): literal[string] identifier[level] , identifier[code] , identifier[validity] = identifier[geoids] . identifier[parse] ( identifier[geoid] ) identifier[qs] = identifier[self] ( identifier[level] = identifier[level] , identifier[code] = identifier[code] ) keyword[if] identifier[id_only] : identifier[qs] = identifier[qs] . identifier[only] ( literal[string] ) keyword[if] identifier[validity] == literal[string] : identifier[result] = identifier[qs] . identifier[latest] () keyword[else] : identifier[result] = identifier[qs] . identifier[valid_at] ( identifier[validity] ). identifier[first] () keyword[return] identifier[result] . identifier[id] keyword[if] identifier[id_only] keyword[and] identifier[result] keyword[else] identifier[result]
def resolve(self, geoid, id_only=False): """ Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone. """ (level, code, validity) = geoids.parse(geoid) qs = self(level=level, code=code) if id_only: qs = qs.only('id') # depends on [control=['if'], data=[]] if validity == 'latest': result = qs.latest() # depends on [control=['if'], data=[]] else: result = qs.valid_at(validity).first() return result.id if id_only and result else result
def post_events_service(request): """Respond to inbound webhook JSON HTTP POST from Webex Teams.""" # Get the POST data sent from Webex Teams json_data = request.json log.info("\n") log.info("WEBHOOK POST RECEIVED:") log.info(json_data) log.info("\n") # Create a Webhook object from the JSON data webhook_obj = Webhook(json_data) # Get the room details room = api.rooms.get(webhook_obj.data.roomId) # Get the message details message = api.messages.get(webhook_obj.data.id) # Get the sender's details person = api.people.get(message.personId) log.info("NEW MESSAGE IN ROOM '{}'".format(room.title)) log.info("FROM '{}'".format(person.displayName)) log.info("MESSAGE '{}'\n".format(message.text)) # This is a VERY IMPORTANT loop prevention control step. # If you respond to all messages... You will respond to the messages # that the bot posts and thereby create a loop condition. me = api.people.me() if message.personId == me.id: # Message was sent by me (bot); do not respond. return {'Message': 'OK'} else: # Message was sent by someone else; parse message and respond. if "/CAT" in message.text: log.info("FOUND '/CAT'") # Get a cat fact catfact = get_catfact() log.info("SENDING CAT FACT'{}'".format(catfact)) # Post the fact to the room where the request was received api.messages.create(room.id, text=catfact) return {'Message': 'OK'}
def function[post_events_service, parameter[request]]: constant[Respond to inbound webhook JSON HTTP POST from Webex Teams.] variable[json_data] assign[=] name[request].json call[name[log].info, parameter[constant[ ]]] call[name[log].info, parameter[constant[WEBHOOK POST RECEIVED:]]] call[name[log].info, parameter[name[json_data]]] call[name[log].info, parameter[constant[ ]]] variable[webhook_obj] assign[=] call[name[Webhook], parameter[name[json_data]]] variable[room] assign[=] call[name[api].rooms.get, parameter[name[webhook_obj].data.roomId]] variable[message] assign[=] call[name[api].messages.get, parameter[name[webhook_obj].data.id]] variable[person] assign[=] call[name[api].people.get, parameter[name[message].personId]] call[name[log].info, parameter[call[constant[NEW MESSAGE IN ROOM '{}'].format, parameter[name[room].title]]]] call[name[log].info, parameter[call[constant[FROM '{}'].format, parameter[name[person].displayName]]]] call[name[log].info, parameter[call[constant[MESSAGE '{}' ].format, parameter[name[message].text]]]] variable[me] assign[=] call[name[api].people.me, parameter[]] if compare[name[message].personId equal[==] name[me].id] begin[:] return[dictionary[[<ast.Constant object at 0x7da2041d9000>], [<ast.Constant object at 0x7da2041db280>]]]
keyword[def] identifier[post_events_service] ( identifier[request] ): literal[string] identifier[json_data] = identifier[request] . identifier[json] identifier[log] . identifier[info] ( literal[string] ) identifier[log] . identifier[info] ( literal[string] ) identifier[log] . identifier[info] ( identifier[json_data] ) identifier[log] . identifier[info] ( literal[string] ) identifier[webhook_obj] = identifier[Webhook] ( identifier[json_data] ) identifier[room] = identifier[api] . identifier[rooms] . identifier[get] ( identifier[webhook_obj] . identifier[data] . identifier[roomId] ) identifier[message] = identifier[api] . identifier[messages] . identifier[get] ( identifier[webhook_obj] . identifier[data] . identifier[id] ) identifier[person] = identifier[api] . identifier[people] . identifier[get] ( identifier[message] . identifier[personId] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[room] . identifier[title] )) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[person] . identifier[displayName] )) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[message] . identifier[text] )) identifier[me] = identifier[api] . identifier[people] . identifier[me] () keyword[if] identifier[message] . identifier[personId] == identifier[me] . identifier[id] : keyword[return] { literal[string] : literal[string] } keyword[else] : keyword[if] literal[string] keyword[in] identifier[message] . identifier[text] : identifier[log] . identifier[info] ( literal[string] ) identifier[catfact] = identifier[get_catfact] () identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[catfact] )) identifier[api] . identifier[messages] . identifier[create] ( identifier[room] . identifier[id] , identifier[text] = identifier[catfact] ) keyword[return] { literal[string] : literal[string] }
def post_events_service(request): """Respond to inbound webhook JSON HTTP POST from Webex Teams.""" # Get the POST data sent from Webex Teams json_data = request.json log.info('\n') log.info('WEBHOOK POST RECEIVED:') log.info(json_data) log.info('\n') # Create a Webhook object from the JSON data webhook_obj = Webhook(json_data) # Get the room details room = api.rooms.get(webhook_obj.data.roomId) # Get the message details message = api.messages.get(webhook_obj.data.id) # Get the sender's details person = api.people.get(message.personId) log.info("NEW MESSAGE IN ROOM '{}'".format(room.title)) log.info("FROM '{}'".format(person.displayName)) log.info("MESSAGE '{}'\n".format(message.text)) # This is a VERY IMPORTANT loop prevention control step. # If you respond to all messages... You will respond to the messages # that the bot posts and thereby create a loop condition. me = api.people.me() if message.personId == me.id: # Message was sent by me (bot); do not respond. return {'Message': 'OK'} # depends on [control=['if'], data=[]] else: # Message was sent by someone else; parse message and respond. if '/CAT' in message.text: log.info("FOUND '/CAT'") # Get a cat fact catfact = get_catfact() log.info("SENDING CAT FACT'{}'".format(catfact)) # Post the fact to the room where the request was received api.messages.create(room.id, text=catfact) # depends on [control=['if'], data=[]] return {'Message': 'OK'}
def _build_editable_options(req): """ This method generates a dictionary of the query string parameters contained in a given editable URL. """ regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)") matched = regexp.findall(req) if matched: ret = dict() for option in matched: (name, value) = option if name in ret: raise Exception("%s option already defined" % name) ret[name] = value return ret return None
def function[_build_editable_options, parameter[req]]: constant[ This method generates a dictionary of the query string parameters contained in a given editable URL. ] variable[regexp] assign[=] call[name[re].compile, parameter[constant[[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)]]] variable[matched] assign[=] call[name[regexp].findall, parameter[name[req]]] if name[matched] begin[:] variable[ret] assign[=] call[name[dict], parameter[]] for taget[name[option]] in starred[name[matched]] begin[:] <ast.Tuple object at 0x7da204621480> assign[=] name[option] if compare[name[name] in name[ret]] begin[:] <ast.Raise object at 0x7da204622680> call[name[ret]][name[name]] assign[=] name[value] return[name[ret]] return[constant[None]]
keyword[def] identifier[_build_editable_options] ( identifier[req] ): literal[string] identifier[regexp] = identifier[re] . identifier[compile] ( literal[string] ) identifier[matched] = identifier[regexp] . identifier[findall] ( identifier[req] ) keyword[if] identifier[matched] : identifier[ret] = identifier[dict] () keyword[for] identifier[option] keyword[in] identifier[matched] : ( identifier[name] , identifier[value] )= identifier[option] keyword[if] identifier[name] keyword[in] identifier[ret] : keyword[raise] identifier[Exception] ( literal[string] % identifier[name] ) identifier[ret] [ identifier[name] ]= identifier[value] keyword[return] identifier[ret] keyword[return] keyword[None]
def _build_editable_options(req): """ This method generates a dictionary of the query string parameters contained in a given editable URL. """ regexp = re.compile('[\\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)') matched = regexp.findall(req) if matched: ret = dict() for option in matched: (name, value) = option if name in ret: raise Exception('%s option already defined' % name) # depends on [control=['if'], data=['name']] ret[name] = value # depends on [control=['for'], data=['option']] return ret # depends on [control=['if'], data=[]] return None
def _deserialize(self, value, environment=None): """A collection traverses over something to deserialize its value. :param value: a ``dict`` wich contains mapped values """ if not isinstance(value, MappingABC): raise exc.Invalid(self) # traverse items and match against validated struct mapping = self._create_deserialize_type(value, environment) invalids = [] for name, item in self: # deserialize each item try: mapping[name] = item.deserialize( value.get(name, values.Undefined), environment ) except exc.IgnoreValue: # just ignore this value pass except exc.Invalid as ex: # append this to the list of invalids, so we can return a complete overview of errors invalids.append(ex) if invalids: # on invalids this item is also ``Invalid`` raise exc.InvalidChildren(self, invalids) return mapping
def function[_deserialize, parameter[self, value, environment]]: constant[A collection traverses over something to deserialize its value. :param value: a ``dict`` wich contains mapped values ] if <ast.UnaryOp object at 0x7da1b13523e0> begin[:] <ast.Raise object at 0x7da1b1351060> variable[mapping] assign[=] call[name[self]._create_deserialize_type, parameter[name[value], name[environment]]] variable[invalids] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1351d50>, <ast.Name object at 0x7da1b13537c0>]]] in starred[name[self]] begin[:] <ast.Try object at 0x7da1b13519f0> if name[invalids] begin[:] <ast.Raise object at 0x7da1b1351900> return[name[mapping]]
keyword[def] identifier[_deserialize] ( identifier[self] , identifier[value] , identifier[environment] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[MappingABC] ): keyword[raise] identifier[exc] . identifier[Invalid] ( identifier[self] ) identifier[mapping] = identifier[self] . identifier[_create_deserialize_type] ( identifier[value] , identifier[environment] ) identifier[invalids] =[] keyword[for] identifier[name] , identifier[item] keyword[in] identifier[self] : keyword[try] : identifier[mapping] [ identifier[name] ]= identifier[item] . identifier[deserialize] ( identifier[value] . identifier[get] ( identifier[name] , identifier[values] . identifier[Undefined] ), identifier[environment] ) keyword[except] identifier[exc] . identifier[IgnoreValue] : keyword[pass] keyword[except] identifier[exc] . identifier[Invalid] keyword[as] identifier[ex] : identifier[invalids] . identifier[append] ( identifier[ex] ) keyword[if] identifier[invalids] : keyword[raise] identifier[exc] . identifier[InvalidChildren] ( identifier[self] , identifier[invalids] ) keyword[return] identifier[mapping]
def _deserialize(self, value, environment=None): """A collection traverses over something to deserialize its value. :param value: a ``dict`` wich contains mapped values """ if not isinstance(value, MappingABC): raise exc.Invalid(self) # depends on [control=['if'], data=[]] # traverse items and match against validated struct mapping = self._create_deserialize_type(value, environment) invalids = [] for (name, item) in self: # deserialize each item try: mapping[name] = item.deserialize(value.get(name, values.Undefined), environment) # depends on [control=['try'], data=[]] except exc.IgnoreValue: # just ignore this value pass # depends on [control=['except'], data=[]] except exc.Invalid as ex: # append this to the list of invalids, so we can return a complete overview of errors invalids.append(ex) # depends on [control=['except'], data=['ex']] # depends on [control=['for'], data=[]] if invalids: # on invalids this item is also ``Invalid`` raise exc.InvalidChildren(self, invalids) # depends on [control=['if'], data=[]] return mapping
def workflow_all_aggregate(graph: BELGraph, key: Optional[str] = None, tag: Optional[str] = None, default_score: Optional[float] = None, runs: Optional[int] = None, aggregator: Optional[Callable[[Iterable[float]], float]] = None, ): """Run the heat diffusion workflow to get average score for every possible candidate mechanism. 1. Get all biological processes 2. Get candidate mechanism induced two level back from each biological process 3. Heat diffusion workflow on each candidate mechanism for multiple runs 4. Report average scores for each candidate mechanism :param graph: A BEL graph :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`. Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max` :return: A dictionary of {node: upstream causal subgraph} """ results = {} bioprocess_nodes = list(get_nodes_by_function(graph, BIOPROCESS)) for bioprocess_node in tqdm(bioprocess_nodes): subgraph = generate_mechanism(graph, bioprocess_node, key=key) try: results[bioprocess_node] = workflow_aggregate( graph=subgraph, node=bioprocess_node, key=key, tag=tag, default_score=default_score, runs=runs, aggregator=aggregator ) except Exception: log.exception('could not run on %', bioprocess_node) return results
def function[workflow_all_aggregate, parameter[graph, key, tag, default_score, runs, aggregator]]: constant[Run the heat diffusion workflow to get average score for every possible candidate mechanism. 1. Get all biological processes 2. Get candidate mechanism induced two level back from each biological process 3. Heat diffusion workflow on each candidate mechanism for multiple runs 4. Report average scores for each candidate mechanism :param graph: A BEL graph :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`. Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max` :return: A dictionary of {node: upstream causal subgraph} ] variable[results] assign[=] dictionary[[], []] variable[bioprocess_nodes] assign[=] call[name[list], parameter[call[name[get_nodes_by_function], parameter[name[graph], name[BIOPROCESS]]]]] for taget[name[bioprocess_node]] in starred[call[name[tqdm], parameter[name[bioprocess_nodes]]]] begin[:] variable[subgraph] assign[=] call[name[generate_mechanism], parameter[name[graph], name[bioprocess_node]]] <ast.Try object at 0x7da20c7cb640> return[name[results]]
keyword[def] identifier[workflow_all_aggregate] ( identifier[graph] : identifier[BELGraph] , identifier[key] : identifier[Optional] [ identifier[str] ]= keyword[None] , identifier[tag] : identifier[Optional] [ identifier[str] ]= keyword[None] , identifier[default_score] : identifier[Optional] [ identifier[float] ]= keyword[None] , identifier[runs] : identifier[Optional] [ identifier[int] ]= keyword[None] , identifier[aggregator] : identifier[Optional] [ identifier[Callable] [[ identifier[Iterable] [ identifier[float] ]], identifier[float] ]]= keyword[None] , ): literal[string] identifier[results] ={} identifier[bioprocess_nodes] = identifier[list] ( identifier[get_nodes_by_function] ( identifier[graph] , identifier[BIOPROCESS] )) keyword[for] identifier[bioprocess_node] keyword[in] identifier[tqdm] ( identifier[bioprocess_nodes] ): identifier[subgraph] = identifier[generate_mechanism] ( identifier[graph] , identifier[bioprocess_node] , identifier[key] = identifier[key] ) keyword[try] : identifier[results] [ identifier[bioprocess_node] ]= identifier[workflow_aggregate] ( identifier[graph] = identifier[subgraph] , identifier[node] = identifier[bioprocess_node] , identifier[key] = identifier[key] , identifier[tag] = identifier[tag] , identifier[default_score] = identifier[default_score] , identifier[runs] = identifier[runs] , identifier[aggregator] = identifier[aggregator] ) keyword[except] identifier[Exception] : identifier[log] . identifier[exception] ( literal[string] , identifier[bioprocess_node] ) keyword[return] identifier[results]
def workflow_all_aggregate(graph: BELGraph, key: Optional[str]=None, tag: Optional[str]=None, default_score: Optional[float]=None, runs: Optional[int]=None, aggregator: Optional[Callable[[Iterable[float]], float]]=None): """Run the heat diffusion workflow to get average score for every possible candidate mechanism. 1. Get all biological processes 2. Get candidate mechanism induced two level back from each biological process 3. Heat diffusion workflow on each candidate mechanism for multiple runs 4. Report average scores for each candidate mechanism :param graph: A BEL graph :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`. Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max` :return: A dictionary of {node: upstream causal subgraph} """ results = {} bioprocess_nodes = list(get_nodes_by_function(graph, BIOPROCESS)) for bioprocess_node in tqdm(bioprocess_nodes): subgraph = generate_mechanism(graph, bioprocess_node, key=key) try: results[bioprocess_node] = workflow_aggregate(graph=subgraph, node=bioprocess_node, key=key, tag=tag, default_score=default_score, runs=runs, aggregator=aggregator) # depends on [control=['try'], data=[]] except Exception: log.exception('could not run on %', bioprocess_node) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['bioprocess_node']] return results
def from_preset(preset): """ Initialise a CutOffDictNN according to a preset set of cut-offs. Args: preset (str): A preset name. The list of supported presets are: - "vesta_2019": The distance cut-offs used by the VESTA visualisation program. Returns: A CutOffDictNN using the preset cut-off dictionary. """ if preset == 'vesta_2019': cut_offs = loadfn(os.path.join(_directory, 'vesta_cutoffs.yaml')) return CutOffDictNN(cut_off_dict=cut_offs) else: raise ValueError("Unrecognised preset: {}".format(preset))
def function[from_preset, parameter[preset]]: constant[ Initialise a CutOffDictNN according to a preset set of cut-offs. Args: preset (str): A preset name. The list of supported presets are: - "vesta_2019": The distance cut-offs used by the VESTA visualisation program. Returns: A CutOffDictNN using the preset cut-off dictionary. ] if compare[name[preset] equal[==] constant[vesta_2019]] begin[:] variable[cut_offs] assign[=] call[name[loadfn], parameter[call[name[os].path.join, parameter[name[_directory], constant[vesta_cutoffs.yaml]]]]] return[call[name[CutOffDictNN], parameter[]]]
keyword[def] identifier[from_preset] ( identifier[preset] ): literal[string] keyword[if] identifier[preset] == literal[string] : identifier[cut_offs] = identifier[loadfn] ( identifier[os] . identifier[path] . identifier[join] ( identifier[_directory] , literal[string] )) keyword[return] identifier[CutOffDictNN] ( identifier[cut_off_dict] = identifier[cut_offs] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[preset] ))
def from_preset(preset): """ Initialise a CutOffDictNN according to a preset set of cut-offs. Args: preset (str): A preset name. The list of supported presets are: - "vesta_2019": The distance cut-offs used by the VESTA visualisation program. Returns: A CutOffDictNN using the preset cut-off dictionary. """ if preset == 'vesta_2019': cut_offs = loadfn(os.path.join(_directory, 'vesta_cutoffs.yaml')) return CutOffDictNN(cut_off_dict=cut_offs) # depends on [control=['if'], data=[]] else: raise ValueError('Unrecognised preset: {}'.format(preset))
def board_name(default): """Returns the boards name (if available).""" try: import board try: name = board.name except AttributeError: # There was a board.py file, but it didn't have an name attribute # We also ignore this as an error name = default except ImportError: # No board.py file on the pyboard - not an error name = default except BaseException as err: print('Error encountered executing board.py') import sys sys.print_exception(err) name = default return repr(name)
def function[board_name, parameter[default]]: constant[Returns the boards name (if available).] <ast.Try object at 0x7da2049639a0> return[call[name[repr], parameter[name[name]]]]
keyword[def] identifier[board_name] ( identifier[default] ): literal[string] keyword[try] : keyword[import] identifier[board] keyword[try] : identifier[name] = identifier[board] . identifier[name] keyword[except] identifier[AttributeError] : identifier[name] = identifier[default] keyword[except] identifier[ImportError] : identifier[name] = identifier[default] keyword[except] identifier[BaseException] keyword[as] identifier[err] : identifier[print] ( literal[string] ) keyword[import] identifier[sys] identifier[sys] . identifier[print_exception] ( identifier[err] ) identifier[name] = identifier[default] keyword[return] identifier[repr] ( identifier[name] )
def board_name(default): """Returns the boards name (if available).""" try: import board try: name = board.name # depends on [control=['try'], data=[]] except AttributeError: # There was a board.py file, but it didn't have an name attribute # We also ignore this as an error name = default # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]] except ImportError: # No board.py file on the pyboard - not an error name = default # depends on [control=['except'], data=[]] except BaseException as err: print('Error encountered executing board.py') import sys sys.print_exception(err) name = default # depends on [control=['except'], data=['err']] return repr(name)
def assistance(self, column=None, value=None, **kwargs): """ Provides the Catalog of Federal Domestic Assistance (CFDA) codes and names. """ return self._resolve_call('GIC_ASST_PGM', column, value, **kwargs)
def function[assistance, parameter[self, column, value]]: constant[ Provides the Catalog of Federal Domestic Assistance (CFDA) codes and names. ] return[call[name[self]._resolve_call, parameter[constant[GIC_ASST_PGM], name[column], name[value]]]]
keyword[def] identifier[assistance] ( identifier[self] , identifier[column] = keyword[None] , identifier[value] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[_resolve_call] ( literal[string] , identifier[column] , identifier[value] ,** identifier[kwargs] )
def assistance(self, column=None, value=None, **kwargs): """ Provides the Catalog of Federal Domestic Assistance (CFDA) codes and names. """ return self._resolve_call('GIC_ASST_PGM', column, value, **kwargs)
def format(self, record): """Format the message into JSON expected by fluentd. :type record: :class:`~logging.LogRecord` :param record: the log record :rtype: str :returns: A JSON string formatted for GKE fluentd. """ message = super(ContainerEngineHandler, self).format(record) return format_stackdriver_json(record, message)
def function[format, parameter[self, record]]: constant[Format the message into JSON expected by fluentd. :type record: :class:`~logging.LogRecord` :param record: the log record :rtype: str :returns: A JSON string formatted for GKE fluentd. ] variable[message] assign[=] call[call[name[super], parameter[name[ContainerEngineHandler], name[self]]].format, parameter[name[record]]] return[call[name[format_stackdriver_json], parameter[name[record], name[message]]]]
keyword[def] identifier[format] ( identifier[self] , identifier[record] ): literal[string] identifier[message] = identifier[super] ( identifier[ContainerEngineHandler] , identifier[self] ). identifier[format] ( identifier[record] ) keyword[return] identifier[format_stackdriver_json] ( identifier[record] , identifier[message] )
def format(self, record): """Format the message into JSON expected by fluentd. :type record: :class:`~logging.LogRecord` :param record: the log record :rtype: str :returns: A JSON string formatted for GKE fluentd. """ message = super(ContainerEngineHandler, self).format(record) return format_stackdriver_json(record, message)
def extract_body(mail, types=None, field_key='copiousoutput'): """Returns a string view of a Message. If the `types` argument is set then any encoding types there will be used as the prefered encoding to extract. If `types` is None then :ref:`prefer_plaintext <prefer-plaintext>` will be consulted; if it is True then text/plain parts will be returned, if it is false then text/html will be returned if present or text/plain if there are no text/html parts. :param mail: the mail to use :type mail: :class:`email.Message` :param types: mime content types to use for body string :type types: list[str] :returns: The combined text of any parts to be used :rtype: str """ preferred = 'text/plain' if settings.get( 'prefer_plaintext') else 'text/html' has_preferred = False # see if the mail has our preferred type if types is None: has_preferred = list(typed_subpart_iterator( mail, *preferred.split('/'))) body_parts = [] for part in mail.walk(): # skip non-leaf nodes in the mail tree if part.is_multipart(): continue ctype = part.get_content_type() if types is not None: if ctype not in types: continue cd = part.get('Content-Disposition', '') if cd.startswith('attachment'): continue # if the mail has our preferred type, we only keep this type # note that if types != None, has_preferred always stays False if has_preferred and ctype != preferred: continue if ctype == 'text/plain': body_parts.append(string_sanitize(remove_cte(part, as_string=True))) else: rendered_payload = render_part(part) if rendered_payload: # handler had output body_parts.append(string_sanitize(rendered_payload)) # mark as attachment elif cd: part.replace_header('Content-Disposition', 'attachment; ' + cd) else: part.add_header('Content-Disposition', 'attachment;') return u'\n\n'.join(body_parts)
def function[extract_body, parameter[mail, types, field_key]]: constant[Returns a string view of a Message. If the `types` argument is set then any encoding types there will be used as the prefered encoding to extract. If `types` is None then :ref:`prefer_plaintext <prefer-plaintext>` will be consulted; if it is True then text/plain parts will be returned, if it is false then text/html will be returned if present or text/plain if there are no text/html parts. :param mail: the mail to use :type mail: :class:`email.Message` :param types: mime content types to use for body string :type types: list[str] :returns: The combined text of any parts to be used :rtype: str ] variable[preferred] assign[=] <ast.IfExp object at 0x7da1b07f5a50> variable[has_preferred] assign[=] constant[False] if compare[name[types] is constant[None]] begin[:] variable[has_preferred] assign[=] call[name[list], parameter[call[name[typed_subpart_iterator], parameter[name[mail], <ast.Starred object at 0x7da1b07f7a00>]]]] variable[body_parts] assign[=] list[[]] for taget[name[part]] in starred[call[name[mail].walk, parameter[]]] begin[:] if call[name[part].is_multipart, parameter[]] begin[:] continue variable[ctype] assign[=] call[name[part].get_content_type, parameter[]] if compare[name[types] is_not constant[None]] begin[:] if compare[name[ctype] <ast.NotIn object at 0x7da2590d7190> name[types]] begin[:] continue variable[cd] assign[=] call[name[part].get, parameter[constant[Content-Disposition], constant[]]] if call[name[cd].startswith, parameter[constant[attachment]]] begin[:] continue if <ast.BoolOp object at 0x7da1b07ba410> begin[:] continue if compare[name[ctype] equal[==] constant[text/plain]] begin[:] call[name[body_parts].append, parameter[call[name[string_sanitize], parameter[call[name[remove_cte], parameter[name[part]]]]]]] return[call[constant[ ].join, parameter[name[body_parts]]]]
keyword[def] identifier[extract_body] ( identifier[mail] , identifier[types] = keyword[None] , identifier[field_key] = literal[string] ): literal[string] identifier[preferred] = literal[string] keyword[if] identifier[settings] . identifier[get] ( literal[string] ) keyword[else] literal[string] identifier[has_preferred] = keyword[False] keyword[if] identifier[types] keyword[is] keyword[None] : identifier[has_preferred] = identifier[list] ( identifier[typed_subpart_iterator] ( identifier[mail] ,* identifier[preferred] . identifier[split] ( literal[string] ))) identifier[body_parts] =[] keyword[for] identifier[part] keyword[in] identifier[mail] . identifier[walk] (): keyword[if] identifier[part] . identifier[is_multipart] (): keyword[continue] identifier[ctype] = identifier[part] . identifier[get_content_type] () keyword[if] identifier[types] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[ctype] keyword[not] keyword[in] identifier[types] : keyword[continue] identifier[cd] = identifier[part] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[cd] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[if] identifier[has_preferred] keyword[and] identifier[ctype] != identifier[preferred] : keyword[continue] keyword[if] identifier[ctype] == literal[string] : identifier[body_parts] . identifier[append] ( identifier[string_sanitize] ( identifier[remove_cte] ( identifier[part] , identifier[as_string] = keyword[True] ))) keyword[else] : identifier[rendered_payload] = identifier[render_part] ( identifier[part] ) keyword[if] identifier[rendered_payload] : identifier[body_parts] . identifier[append] ( identifier[string_sanitize] ( identifier[rendered_payload] )) keyword[elif] identifier[cd] : identifier[part] . identifier[replace_header] ( literal[string] , literal[string] + identifier[cd] ) keyword[else] : identifier[part] . identifier[add_header] ( literal[string] , literal[string] ) keyword[return] literal[string] . identifier[join] ( identifier[body_parts] )
def extract_body(mail, types=None, field_key='copiousoutput'): """Returns a string view of a Message. If the `types` argument is set then any encoding types there will be used as the prefered encoding to extract. If `types` is None then :ref:`prefer_plaintext <prefer-plaintext>` will be consulted; if it is True then text/plain parts will be returned, if it is false then text/html will be returned if present or text/plain if there are no text/html parts. :param mail: the mail to use :type mail: :class:`email.Message` :param types: mime content types to use for body string :type types: list[str] :returns: The combined text of any parts to be used :rtype: str """ preferred = 'text/plain' if settings.get('prefer_plaintext') else 'text/html' has_preferred = False # see if the mail has our preferred type if types is None: has_preferred = list(typed_subpart_iterator(mail, *preferred.split('/'))) # depends on [control=['if'], data=[]] body_parts = [] for part in mail.walk(): # skip non-leaf nodes in the mail tree if part.is_multipart(): continue # depends on [control=['if'], data=[]] ctype = part.get_content_type() if types is not None: if ctype not in types: continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['types']] cd = part.get('Content-Disposition', '') if cd.startswith('attachment'): continue # depends on [control=['if'], data=[]] # if the mail has our preferred type, we only keep this type # note that if types != None, has_preferred always stays False if has_preferred and ctype != preferred: continue # depends on [control=['if'], data=[]] if ctype == 'text/plain': body_parts.append(string_sanitize(remove_cte(part, as_string=True))) # depends on [control=['if'], data=[]] else: rendered_payload = render_part(part) if rendered_payload: # handler had output body_parts.append(string_sanitize(rendered_payload)) # depends on [control=['if'], data=[]] # mark as attachment elif cd: part.replace_header('Content-Disposition', 'attachment; ' + cd) # depends on [control=['if'], data=[]] else: part.add_header('Content-Disposition', 'attachment;') # depends on [control=['for'], data=['part']] return u'\n\n'.join(body_parts)
def compute(self, inputVector, learn, activeArray): """ This is the primary public method of the SpatialPooler class. This function takes a input vector and outputs the indices of the active columns. If 'learn' is set to True, this method also updates the permanences of the columns. @param inputVector: A numpy array of 0's and 1's that comprises the input to the spatial pooler. The array will be treated as a one dimensional array, therefore the dimensions of the array do not have to match the exact dimensions specified in the class constructor. In fact, even a list would suffice. The number of input bits in the vector must, however, match the number of bits specified by the call to the constructor. Therefore there must be a '0' or '1' in the array for every input bit. @param learn: A boolean value indicating whether learning should be performed. Learning entails updating the permanence values of the synapses, and hence modifying the 'state' of the model. Setting learning to 'off' freezes the SP and has many uses. For example, you might want to feed in various inputs and examine the resulting SDR's. @param activeArray: An array whose size is equal to the number of columns. Before the function returns this array will be populated with 1's at the indices of the active columns, and 0's everywhere else. """ if not isinstance(inputVector, numpy.ndarray): raise TypeError("Input vector must be a numpy array, not %s" % str(type(inputVector))) if inputVector.size != self._numInputs: raise ValueError( "Input vector dimensions don't match. Expecting %s but got %s" % ( inputVector.size, self._numInputs)) self._updateBookeepingVars(learn) inputVector = numpy.array(inputVector, dtype=realDType) inputVector.reshape(-1) self._overlaps = self._calculateOverlap(inputVector) # self._overlaps[self.deadCols] = 0 # Apply boosting when learning is on if learn: self._boostedOverlaps = self._boostFactors * self._overlaps else: self._boostedOverlaps = self._overlaps # Apply inhibition to determine the winning columns activeColumns = self._inhibitColumns(self._boostedOverlaps) if learn: self._adaptSynapses(inputVector, activeColumns) self._updateDutyCycles(self._overlaps, activeColumns) self._bumpUpWeakColumns() self._updateTargetActivityDensity() self._updateBoostFactors() if self._isUpdateRound(): self._updateInhibitionRadius() self._updateMinDutyCycles() # self.growRandomSynapses() activeArray.fill(0) activeArray[activeColumns] = 1
def function[compute, parameter[self, inputVector, learn, activeArray]]: constant[ This is the primary public method of the SpatialPooler class. This function takes a input vector and outputs the indices of the active columns. If 'learn' is set to True, this method also updates the permanences of the columns. @param inputVector: A numpy array of 0's and 1's that comprises the input to the spatial pooler. The array will be treated as a one dimensional array, therefore the dimensions of the array do not have to match the exact dimensions specified in the class constructor. In fact, even a list would suffice. The number of input bits in the vector must, however, match the number of bits specified by the call to the constructor. Therefore there must be a '0' or '1' in the array for every input bit. @param learn: A boolean value indicating whether learning should be performed. Learning entails updating the permanence values of the synapses, and hence modifying the 'state' of the model. Setting learning to 'off' freezes the SP and has many uses. For example, you might want to feed in various inputs and examine the resulting SDR's. @param activeArray: An array whose size is equal to the number of columns. Before the function returns this array will be populated with 1's at the indices of the active columns, and 0's everywhere else. ] if <ast.UnaryOp object at 0x7da1b0846440> begin[:] <ast.Raise object at 0x7da1b0847310> if compare[name[inputVector].size not_equal[!=] name[self]._numInputs] begin[:] <ast.Raise object at 0x7da1b08469e0> call[name[self]._updateBookeepingVars, parameter[name[learn]]] variable[inputVector] assign[=] call[name[numpy].array, parameter[name[inputVector]]] call[name[inputVector].reshape, parameter[<ast.UnaryOp object at 0x7da1b0844c40>]] name[self]._overlaps assign[=] call[name[self]._calculateOverlap, parameter[name[inputVector]]] if name[learn] begin[:] name[self]._boostedOverlaps assign[=] binary_operation[name[self]._boostFactors * name[self]._overlaps] variable[activeColumns] assign[=] call[name[self]._inhibitColumns, parameter[name[self]._boostedOverlaps]] if name[learn] begin[:] call[name[self]._adaptSynapses, parameter[name[inputVector], name[activeColumns]]] call[name[self]._updateDutyCycles, parameter[name[self]._overlaps, name[activeColumns]]] call[name[self]._bumpUpWeakColumns, parameter[]] call[name[self]._updateTargetActivityDensity, parameter[]] call[name[self]._updateBoostFactors, parameter[]] if call[name[self]._isUpdateRound, parameter[]] begin[:] call[name[self]._updateInhibitionRadius, parameter[]] call[name[self]._updateMinDutyCycles, parameter[]] call[name[activeArray].fill, parameter[constant[0]]] call[name[activeArray]][name[activeColumns]] assign[=] constant[1]
keyword[def] identifier[compute] ( identifier[self] , identifier[inputVector] , identifier[learn] , identifier[activeArray] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[inputVector] , identifier[numpy] . identifier[ndarray] ): keyword[raise] identifier[TypeError] ( literal[string] % identifier[str] ( identifier[type] ( identifier[inputVector] ))) keyword[if] identifier[inputVector] . identifier[size] != identifier[self] . identifier[_numInputs] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[inputVector] . identifier[size] , identifier[self] . identifier[_numInputs] )) identifier[self] . identifier[_updateBookeepingVars] ( identifier[learn] ) identifier[inputVector] = identifier[numpy] . identifier[array] ( identifier[inputVector] , identifier[dtype] = identifier[realDType] ) identifier[inputVector] . identifier[reshape] (- literal[int] ) identifier[self] . identifier[_overlaps] = identifier[self] . identifier[_calculateOverlap] ( identifier[inputVector] ) keyword[if] identifier[learn] : identifier[self] . identifier[_boostedOverlaps] = identifier[self] . identifier[_boostFactors] * identifier[self] . identifier[_overlaps] keyword[else] : identifier[self] . identifier[_boostedOverlaps] = identifier[self] . identifier[_overlaps] identifier[activeColumns] = identifier[self] . identifier[_inhibitColumns] ( identifier[self] . identifier[_boostedOverlaps] ) keyword[if] identifier[learn] : identifier[self] . identifier[_adaptSynapses] ( identifier[inputVector] , identifier[activeColumns] ) identifier[self] . identifier[_updateDutyCycles] ( identifier[self] . identifier[_overlaps] , identifier[activeColumns] ) identifier[self] . identifier[_bumpUpWeakColumns] () identifier[self] . identifier[_updateTargetActivityDensity] () identifier[self] . identifier[_updateBoostFactors] () keyword[if] identifier[self] . identifier[_isUpdateRound] (): identifier[self] . identifier[_updateInhibitionRadius] () identifier[self] . identifier[_updateMinDutyCycles] () identifier[activeArray] . identifier[fill] ( literal[int] ) identifier[activeArray] [ identifier[activeColumns] ]= literal[int]
def compute(self, inputVector, learn, activeArray): """ This is the primary public method of the SpatialPooler class. This function takes a input vector and outputs the indices of the active columns. If 'learn' is set to True, this method also updates the permanences of the columns. @param inputVector: A numpy array of 0's and 1's that comprises the input to the spatial pooler. The array will be treated as a one dimensional array, therefore the dimensions of the array do not have to match the exact dimensions specified in the class constructor. In fact, even a list would suffice. The number of input bits in the vector must, however, match the number of bits specified by the call to the constructor. Therefore there must be a '0' or '1' in the array for every input bit. @param learn: A boolean value indicating whether learning should be performed. Learning entails updating the permanence values of the synapses, and hence modifying the 'state' of the model. Setting learning to 'off' freezes the SP and has many uses. For example, you might want to feed in various inputs and examine the resulting SDR's. @param activeArray: An array whose size is equal to the number of columns. Before the function returns this array will be populated with 1's at the indices of the active columns, and 0's everywhere else. """ if not isinstance(inputVector, numpy.ndarray): raise TypeError('Input vector must be a numpy array, not %s' % str(type(inputVector))) # depends on [control=['if'], data=[]] if inputVector.size != self._numInputs: raise ValueError("Input vector dimensions don't match. Expecting %s but got %s" % (inputVector.size, self._numInputs)) # depends on [control=['if'], data=[]] self._updateBookeepingVars(learn) inputVector = numpy.array(inputVector, dtype=realDType) inputVector.reshape(-1) self._overlaps = self._calculateOverlap(inputVector) # self._overlaps[self.deadCols] = 0 # Apply boosting when learning is on if learn: self._boostedOverlaps = self._boostFactors * self._overlaps # depends on [control=['if'], data=[]] else: self._boostedOverlaps = self._overlaps # Apply inhibition to determine the winning columns activeColumns = self._inhibitColumns(self._boostedOverlaps) if learn: self._adaptSynapses(inputVector, activeColumns) self._updateDutyCycles(self._overlaps, activeColumns) self._bumpUpWeakColumns() self._updateTargetActivityDensity() self._updateBoostFactors() if self._isUpdateRound(): self._updateInhibitionRadius() self._updateMinDutyCycles() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # self.growRandomSynapses() activeArray.fill(0) activeArray[activeColumns] = 1
async def playback(dev: Device, cmd, target, value): """Get and set playback settings, e.g. repeat and shuffle..""" if target and value: dev.set_playback_settings(target, value) if cmd == "support": click.echo("Supported playback functions:") supported = await dev.get_supported_playback_functions("storage:usb1") for i in supported: print(i) elif cmd == "settings": print_settings(await dev.get_playback_settings()) # click.echo("Playback functions:") # funcs = await dev.get_available_playback_functions() # print(funcs) else: click.echo("Currently playing: %s" % await dev.get_play_info())
<ast.AsyncFunctionDef object at 0x7da18bcc9930>
keyword[async] keyword[def] identifier[playback] ( identifier[dev] : identifier[Device] , identifier[cmd] , identifier[target] , identifier[value] ): literal[string] keyword[if] identifier[target] keyword[and] identifier[value] : identifier[dev] . identifier[set_playback_settings] ( identifier[target] , identifier[value] ) keyword[if] identifier[cmd] == literal[string] : identifier[click] . identifier[echo] ( literal[string] ) identifier[supported] = keyword[await] identifier[dev] . identifier[get_supported_playback_functions] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[supported] : identifier[print] ( identifier[i] ) keyword[elif] identifier[cmd] == literal[string] : identifier[print_settings] ( keyword[await] identifier[dev] . identifier[get_playback_settings] ()) keyword[else] : identifier[click] . identifier[echo] ( literal[string] % keyword[await] identifier[dev] . identifier[get_play_info] ())
async def playback(dev: Device, cmd, target, value): """Get and set playback settings, e.g. repeat and shuffle..""" if target and value: dev.set_playback_settings(target, value) # depends on [control=['if'], data=[]] if cmd == 'support': click.echo('Supported playback functions:') supported = await dev.get_supported_playback_functions('storage:usb1') for i in supported: print(i) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] elif cmd == 'settings': print_settings(await dev.get_playback_settings()) # depends on [control=['if'], data=[]] else: # click.echo("Playback functions:") # funcs = await dev.get_available_playback_functions() # print(funcs) click.echo('Currently playing: %s' % await dev.get_play_info())
def default(self, obj): """Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. """ if isinstance(obj, np.ndarray): return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) # Let the base class default method raise the TypeError return _json.JSONEncoder(self, obj)
def function[default, parameter[self, obj]]: constant[Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. ] if call[name[isinstance], parameter[name[obj], name[np].ndarray]] begin[:] return[call[name[dict], parameter[]]] return[call[name[_json].JSONEncoder, parameter[name[self], name[obj]]]]
keyword[def] identifier[default] ( identifier[self] , identifier[obj] ): literal[string] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[np] . identifier[ndarray] ): keyword[return] identifier[dict] ( identifier[__ndarray__] = identifier[obj] . identifier[tolist] (), identifier[dtype] = identifier[str] ( identifier[obj] . identifier[dtype] ), identifier[shape] = identifier[obj] . identifier[shape] ) keyword[return] identifier[_json] . identifier[JSONEncoder] ( identifier[self] , identifier[obj] )
def default(self, obj): """Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. """ if isinstance(obj, np.ndarray): return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) # depends on [control=['if'], data=[]] # Let the base class default method raise the TypeError return _json.JSONEncoder(self, obj)
def _login(session): """Login to Fedex Delivery Manager.""" session.get(LOGIN_REFERER) resp = session.post(LOGIN_URL, { 'user': session.auth.username, 'pwd': session.auth.password }, headers={ 'Referer': LOGIN_REFERER, 'X-Requested-With': 'XMLHttpRequest' }) if resp.status_code != 200: raise FedexError('could not login') data = resp.json() if not data['successful']: raise FedexError(data['errorList'][0]['error']['message']) _save_cookies(session.cookies, session.auth.cookie_path)
def function[_login, parameter[session]]: constant[Login to Fedex Delivery Manager.] call[name[session].get, parameter[name[LOGIN_REFERER]]] variable[resp] assign[=] call[name[session].post, parameter[name[LOGIN_URL], dictionary[[<ast.Constant object at 0x7da18f812f20>, <ast.Constant object at 0x7da18f810cd0>], [<ast.Attribute object at 0x7da18f8130a0>, <ast.Attribute object at 0x7da18f8102b0>]]]] if compare[name[resp].status_code not_equal[!=] constant[200]] begin[:] <ast.Raise object at 0x7da18f811900> variable[data] assign[=] call[name[resp].json, parameter[]] if <ast.UnaryOp object at 0x7da18f811ab0> begin[:] <ast.Raise object at 0x7da18f813700> call[name[_save_cookies], parameter[name[session].cookies, name[session].auth.cookie_path]]
keyword[def] identifier[_login] ( identifier[session] ): literal[string] identifier[session] . identifier[get] ( identifier[LOGIN_REFERER] ) identifier[resp] = identifier[session] . identifier[post] ( identifier[LOGIN_URL] ,{ literal[string] : identifier[session] . identifier[auth] . identifier[username] , literal[string] : identifier[session] . identifier[auth] . identifier[password] }, identifier[headers] ={ literal[string] : identifier[LOGIN_REFERER] , literal[string] : literal[string] }) keyword[if] identifier[resp] . identifier[status_code] != literal[int] : keyword[raise] identifier[FedexError] ( literal[string] ) identifier[data] = identifier[resp] . identifier[json] () keyword[if] keyword[not] identifier[data] [ literal[string] ]: keyword[raise] identifier[FedexError] ( identifier[data] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[string] ]) identifier[_save_cookies] ( identifier[session] . identifier[cookies] , identifier[session] . identifier[auth] . identifier[cookie_path] )
def _login(session): """Login to Fedex Delivery Manager.""" session.get(LOGIN_REFERER) resp = session.post(LOGIN_URL, {'user': session.auth.username, 'pwd': session.auth.password}, headers={'Referer': LOGIN_REFERER, 'X-Requested-With': 'XMLHttpRequest'}) if resp.status_code != 200: raise FedexError('could not login') # depends on [control=['if'], data=[]] data = resp.json() if not data['successful']: raise FedexError(data['errorList'][0]['error']['message']) # depends on [control=['if'], data=[]] _save_cookies(session.cookies, session.auth.cookie_path)
def set_prior_probs(self, statements): """Sets the prior belief probabilities for a list of INDRA Statements. The Statements are assumed to be de-duplicated. In other words, each Statement in the list passed to this function is assumed to have a list of Evidence objects that support it. The prior probability of each Statement is calculated based on the number of Evidences it has and their sources. Parameters ---------- statements : list[indra.statements.Statement] A list of INDRA Statements whose belief scores are to be calculated. Each Statement object's belief attribute is updated by this function. """ self.scorer.check_prior_probs(statements) for st in statements: st.belief = self.scorer.score_statement(st)
def function[set_prior_probs, parameter[self, statements]]: constant[Sets the prior belief probabilities for a list of INDRA Statements. The Statements are assumed to be de-duplicated. In other words, each Statement in the list passed to this function is assumed to have a list of Evidence objects that support it. The prior probability of each Statement is calculated based on the number of Evidences it has and their sources. Parameters ---------- statements : list[indra.statements.Statement] A list of INDRA Statements whose belief scores are to be calculated. Each Statement object's belief attribute is updated by this function. ] call[name[self].scorer.check_prior_probs, parameter[name[statements]]] for taget[name[st]] in starred[name[statements]] begin[:] name[st].belief assign[=] call[name[self].scorer.score_statement, parameter[name[st]]]
keyword[def] identifier[set_prior_probs] ( identifier[self] , identifier[statements] ): literal[string] identifier[self] . identifier[scorer] . identifier[check_prior_probs] ( identifier[statements] ) keyword[for] identifier[st] keyword[in] identifier[statements] : identifier[st] . identifier[belief] = identifier[self] . identifier[scorer] . identifier[score_statement] ( identifier[st] )
def set_prior_probs(self, statements): """Sets the prior belief probabilities for a list of INDRA Statements. The Statements are assumed to be de-duplicated. In other words, each Statement in the list passed to this function is assumed to have a list of Evidence objects that support it. The prior probability of each Statement is calculated based on the number of Evidences it has and their sources. Parameters ---------- statements : list[indra.statements.Statement] A list of INDRA Statements whose belief scores are to be calculated. Each Statement object's belief attribute is updated by this function. """ self.scorer.check_prior_probs(statements) for st in statements: st.belief = self.scorer.score_statement(st) # depends on [control=['for'], data=['st']]
def children(args): """ %prog children gff_file Get the children that have the same parent. """ p = OptionParser(children.__doc__) p.add_option("--parents", default="gene", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args g = make_index(gff_file) parents = set(opts.parents.split(',')) for feat in get_parents(gff_file, parents): cc = [c.id for c in g.children(feat.id, 1)] if len(cc) <= 1: continue print("\t".join(str(x) for x in \ (feat.id, feat.start, feat.stop, "|".join(cc))))
def function[children, parameter[args]]: constant[ %prog children gff_file Get the children that have the same parent. ] variable[p] assign[=] call[name[OptionParser], parameter[name[children].__doc__]] call[name[p].add_option, parameter[constant[--parents]]] <ast.Tuple object at 0x7da1b077dae0> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b077c310>]] <ast.Tuple object at 0x7da1b077c550> assign[=] name[args] variable[g] assign[=] call[name[make_index], parameter[name[gff_file]]] variable[parents] assign[=] call[name[set], parameter[call[name[opts].parents.split, parameter[constant[,]]]]] for taget[name[feat]] in starred[call[name[get_parents], parameter[name[gff_file], name[parents]]]] begin[:] variable[cc] assign[=] <ast.ListComp object at 0x7da1b077ce50> if compare[call[name[len], parameter[name[cc]]] less_or_equal[<=] constant[1]] begin[:] continue call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b077c1f0>]]]]
keyword[def] identifier[children] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[children] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] literal[string] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[gff_file] ,= identifier[args] identifier[g] = identifier[make_index] ( identifier[gff_file] ) identifier[parents] = identifier[set] ( identifier[opts] . identifier[parents] . identifier[split] ( literal[string] )) keyword[for] identifier[feat] keyword[in] identifier[get_parents] ( identifier[gff_file] , identifier[parents] ): identifier[cc] =[ identifier[c] . identifier[id] keyword[for] identifier[c] keyword[in] identifier[g] . identifier[children] ( identifier[feat] . identifier[id] , literal[int] )] keyword[if] identifier[len] ( identifier[cc] )<= literal[int] : keyword[continue] identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[feat] . identifier[id] , identifier[feat] . identifier[start] , identifier[feat] . identifier[stop] , literal[string] . identifier[join] ( identifier[cc] ))))
def children(args): """ %prog children gff_file Get the children that have the same parent. """ p = OptionParser(children.__doc__) p.add_option('--parents', default='gene', help="list of features to extract, use comma to separate (e.g.'gene,mRNA') [default: %default]") (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (gff_file,) = args g = make_index(gff_file) parents = set(opts.parents.split(',')) for feat in get_parents(gff_file, parents): cc = [c.id for c in g.children(feat.id, 1)] if len(cc) <= 1: continue # depends on [control=['if'], data=[]] print('\t'.join((str(x) for x in (feat.id, feat.start, feat.stop, '|'.join(cc))))) # depends on [control=['for'], data=['feat']]
def _from_dict(cls, _dict): """Initialize a TableReturn object from a json dictionary.""" args = {} if 'document' in _dict: args['document'] = DocInfo._from_dict(_dict.get('document')) if 'model_id' in _dict: args['model_id'] = _dict.get('model_id') if 'model_version' in _dict: args['model_version'] = _dict.get('model_version') if 'tables' in _dict: args['tables'] = [ Tables._from_dict(x) for x in (_dict.get('tables')) ] return cls(**args)
def function[_from_dict, parameter[cls, _dict]]: constant[Initialize a TableReturn object from a json dictionary.] variable[args] assign[=] dictionary[[], []] if compare[constant[document] in name[_dict]] begin[:] call[name[args]][constant[document]] assign[=] call[name[DocInfo]._from_dict, parameter[call[name[_dict].get, parameter[constant[document]]]]] if compare[constant[model_id] in name[_dict]] begin[:] call[name[args]][constant[model_id]] assign[=] call[name[_dict].get, parameter[constant[model_id]]] if compare[constant[model_version] in name[_dict]] begin[:] call[name[args]][constant[model_version]] assign[=] call[name[_dict].get, parameter[constant[model_version]]] if compare[constant[tables] in name[_dict]] begin[:] call[name[args]][constant[tables]] assign[=] <ast.ListComp object at 0x7da18f723910> return[call[name[cls], parameter[]]]
keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ): literal[string] identifier[args] ={} keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[DocInfo] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] )) keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]=[ identifier[Tables] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] )) ] keyword[return] identifier[cls] (** identifier[args] )
def _from_dict(cls, _dict): """Initialize a TableReturn object from a json dictionary.""" args = {} if 'document' in _dict: args['document'] = DocInfo._from_dict(_dict.get('document')) # depends on [control=['if'], data=['_dict']] if 'model_id' in _dict: args['model_id'] = _dict.get('model_id') # depends on [control=['if'], data=['_dict']] if 'model_version' in _dict: args['model_version'] = _dict.get('model_version') # depends on [control=['if'], data=['_dict']] if 'tables' in _dict: args['tables'] = [Tables._from_dict(x) for x in _dict.get('tables')] # depends on [control=['if'], data=['_dict']] return cls(**args)
def loop_stopped(self): """ Terminate socket connection because of stopping loop :return: None """ transport = self.transport() if self.server_mode() is True: transport.close_server_socket(self.config()) else: transport.close_client_socket(self.config())
def function[loop_stopped, parameter[self]]: constant[ Terminate socket connection because of stopping loop :return: None ] variable[transport] assign[=] call[name[self].transport, parameter[]] if compare[call[name[self].server_mode, parameter[]] is constant[True]] begin[:] call[name[transport].close_server_socket, parameter[call[name[self].config, parameter[]]]]
keyword[def] identifier[loop_stopped] ( identifier[self] ): literal[string] identifier[transport] = identifier[self] . identifier[transport] () keyword[if] identifier[self] . identifier[server_mode] () keyword[is] keyword[True] : identifier[transport] . identifier[close_server_socket] ( identifier[self] . identifier[config] ()) keyword[else] : identifier[transport] . identifier[close_client_socket] ( identifier[self] . identifier[config] ())
def loop_stopped(self): """ Terminate socket connection because of stopping loop :return: None """ transport = self.transport() if self.server_mode() is True: transport.close_server_socket(self.config()) # depends on [control=['if'], data=[]] else: transport.close_client_socket(self.config())
def _group_by_cnv_method(batches): """Group into batches samples with identical CNV/SV approaches. Allows sharing of background samples across multiple batches, using all normals from tumor/normal pairs with the same prep method for background. """ CnvGroup = collections.namedtuple("CnvGroup", "items, work_dir, access_file, region_file") out = [] groups = collections.defaultdict(list) for batch, items in batches.items(): for data in items: work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", "bins", batch)) cnv_file = get_base_cnv_regions(data, work_dir, "transcripts100", include_gene_names=False) if cnv_file: break assert cnv_file, ("Did not find coverage regions for batch %s: %s" % (batch, " ".join([dd.get_sample_name(d) for d in items]))) groups[(cnv_file, dd.get_prep_method(data))].append((items, data, work_dir)) for (cnv_file, _), cur_group in groups.items(): group_items = reduce(operator.add, [xs[0] for xs in cur_group]) access_file = tz.get_in(["config", "algorithm", "callable_regions"], cur_group[0][1]) out.append(CnvGroup(group_items, cur_group[0][2], access_file, cnv_file)) return out
def function[_group_by_cnv_method, parameter[batches]]: constant[Group into batches samples with identical CNV/SV approaches. Allows sharing of background samples across multiple batches, using all normals from tumor/normal pairs with the same prep method for background. ] variable[CnvGroup] assign[=] call[name[collections].namedtuple, parameter[constant[CnvGroup], constant[items, work_dir, access_file, region_file]]] variable[out] assign[=] list[[]] variable[groups] assign[=] call[name[collections].defaultdict, parameter[name[list]]] for taget[tuple[[<ast.Name object at 0x7da1b18a1f90>, <ast.Name object at 0x7da1b18a0160>]]] in starred[call[name[batches].items, parameter[]]] begin[:] for taget[name[data]] in starred[name[items]] begin[:] variable[work_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[name[dd].get_work_dir, parameter[name[data]]], constant[structural], constant[bins], name[batch]]]]] variable[cnv_file] assign[=] call[name[get_base_cnv_regions], parameter[name[data], name[work_dir], constant[transcripts100]]] if name[cnv_file] begin[:] break assert[name[cnv_file]] call[call[name[groups]][tuple[[<ast.Name object at 0x7da1b18a3100>, <ast.Call object at 0x7da1b18a2fb0>]]].append, parameter[tuple[[<ast.Name object at 0x7da1b18a2a10>, <ast.Name object at 0x7da1b18a2bf0>, <ast.Name object at 0x7da1b18a26e0>]]]] for taget[tuple[[<ast.Tuple object at 0x7da1b18a0910>, <ast.Name object at 0x7da1b18a1660>]]] in starred[call[name[groups].items, parameter[]]] begin[:] variable[group_items] assign[=] call[name[reduce], parameter[name[operator].add, <ast.ListComp object at 0x7da1b18a2e30>]] variable[access_file] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18a3280>, <ast.Constant object at 0x7da1b18a21a0>, <ast.Constant object at 0x7da1b18a08e0>]], call[call[name[cur_group]][constant[0]]][constant[1]]]] call[name[out].append, parameter[call[name[CnvGroup], parameter[name[group_items], call[call[name[cur_group]][constant[0]]][constant[2]], name[access_file], name[cnv_file]]]]] return[name[out]]
keyword[def] identifier[_group_by_cnv_method] ( identifier[batches] ): literal[string] identifier[CnvGroup] = identifier[collections] . identifier[namedtuple] ( literal[string] , literal[string] ) identifier[out] =[] identifier[groups] = identifier[collections] . identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[batch] , identifier[items] keyword[in] identifier[batches] . identifier[items] (): keyword[for] identifier[data] keyword[in] identifier[items] : identifier[work_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dd] . identifier[get_work_dir] ( identifier[data] ), literal[string] , literal[string] , identifier[batch] )) identifier[cnv_file] = identifier[get_base_cnv_regions] ( identifier[data] , identifier[work_dir] , literal[string] , identifier[include_gene_names] = keyword[False] ) keyword[if] identifier[cnv_file] : keyword[break] keyword[assert] identifier[cnv_file] ,( literal[string] % ( identifier[batch] , literal[string] . identifier[join] ([ identifier[dd] . identifier[get_sample_name] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[items] ]))) identifier[groups] [( identifier[cnv_file] , identifier[dd] . identifier[get_prep_method] ( identifier[data] ))]. identifier[append] (( identifier[items] , identifier[data] , identifier[work_dir] )) keyword[for] ( identifier[cnv_file] , identifier[_] ), identifier[cur_group] keyword[in] identifier[groups] . identifier[items] (): identifier[group_items] = identifier[reduce] ( identifier[operator] . identifier[add] ,[ identifier[xs] [ literal[int] ] keyword[for] identifier[xs] keyword[in] identifier[cur_group] ]) identifier[access_file] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[cur_group] [ literal[int] ][ literal[int] ]) identifier[out] . identifier[append] ( identifier[CnvGroup] ( identifier[group_items] , identifier[cur_group] [ literal[int] ][ literal[int] ], identifier[access_file] , identifier[cnv_file] )) keyword[return] identifier[out]
def _group_by_cnv_method(batches): """Group into batches samples with identical CNV/SV approaches. Allows sharing of background samples across multiple batches, using all normals from tumor/normal pairs with the same prep method for background. """ CnvGroup = collections.namedtuple('CnvGroup', 'items, work_dir, access_file, region_file') out = [] groups = collections.defaultdict(list) for (batch, items) in batches.items(): for data in items: work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), 'structural', 'bins', batch)) cnv_file = get_base_cnv_regions(data, work_dir, 'transcripts100', include_gene_names=False) if cnv_file: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data']] assert cnv_file, 'Did not find coverage regions for batch %s: %s' % (batch, ' '.join([dd.get_sample_name(d) for d in items])) groups[cnv_file, dd.get_prep_method(data)].append((items, data, work_dir)) # depends on [control=['for'], data=[]] for ((cnv_file, _), cur_group) in groups.items(): group_items = reduce(operator.add, [xs[0] for xs in cur_group]) access_file = tz.get_in(['config', 'algorithm', 'callable_regions'], cur_group[0][1]) out.append(CnvGroup(group_items, cur_group[0][2], access_file, cnv_file)) # depends on [control=['for'], data=[]] return out
def check_hints(self, ds): ''' Checks for potentially mislabeled metadata and makes suggestions for how to correct :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] ret_val.extend(self._check_hint_bounds(ds)) return ret_val
def function[check_hints, parameter[self, ds]]: constant[ Checks for potentially mislabeled metadata and makes suggestions for how to correct :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ] variable[ret_val] assign[=] list[[]] call[name[ret_val].extend, parameter[call[name[self]._check_hint_bounds, parameter[name[ds]]]]] return[name[ret_val]]
keyword[def] identifier[check_hints] ( identifier[self] , identifier[ds] ): literal[string] identifier[ret_val] =[] identifier[ret_val] . identifier[extend] ( identifier[self] . identifier[_check_hint_bounds] ( identifier[ds] )) keyword[return] identifier[ret_val]
def check_hints(self, ds): """ Checks for potentially mislabeled metadata and makes suggestions for how to correct :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results """ ret_val = [] ret_val.extend(self._check_hint_bounds(ds)) return ret_val
def replace_env_vars(conf): """Fill `conf` with environment variables, where appropriate. Any value of the from $VAR will be replaced with the environment variable VAR. If there are sub dictionaries, this function will recurse. This will preserve the original dictionary, and return a copy. """ d = deepcopy(conf) for key, value in d.items(): if type(value) == dict: d[key] = replace_env_vars(value) elif type(value) == str: if value[0] == '$': var_name = value[1:] d[key] = os.environ[var_name] return d
def function[replace_env_vars, parameter[conf]]: constant[Fill `conf` with environment variables, where appropriate. Any value of the from $VAR will be replaced with the environment variable VAR. If there are sub dictionaries, this function will recurse. This will preserve the original dictionary, and return a copy. ] variable[d] assign[=] call[name[deepcopy], parameter[name[conf]]] for taget[tuple[[<ast.Name object at 0x7da1b09482b0>, <ast.Name object at 0x7da1b0948af0>]]] in starred[call[name[d].items, parameter[]]] begin[:] if compare[call[name[type], parameter[name[value]]] equal[==] name[dict]] begin[:] call[name[d]][name[key]] assign[=] call[name[replace_env_vars], parameter[name[value]]] return[name[d]]
keyword[def] identifier[replace_env_vars] ( identifier[conf] ): literal[string] identifier[d] = identifier[deepcopy] ( identifier[conf] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[d] . identifier[items] (): keyword[if] identifier[type] ( identifier[value] )== identifier[dict] : identifier[d] [ identifier[key] ]= identifier[replace_env_vars] ( identifier[value] ) keyword[elif] identifier[type] ( identifier[value] )== identifier[str] : keyword[if] identifier[value] [ literal[int] ]== literal[string] : identifier[var_name] = identifier[value] [ literal[int] :] identifier[d] [ identifier[key] ]= identifier[os] . identifier[environ] [ identifier[var_name] ] keyword[return] identifier[d]
def replace_env_vars(conf): """Fill `conf` with environment variables, where appropriate. Any value of the from $VAR will be replaced with the environment variable VAR. If there are sub dictionaries, this function will recurse. This will preserve the original dictionary, and return a copy. """ d = deepcopy(conf) for (key, value) in d.items(): if type(value) == dict: d[key] = replace_env_vars(value) # depends on [control=['if'], data=[]] elif type(value) == str: if value[0] == '$': var_name = value[1:] d[key] = os.environ[var_name] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return d
def sample_background_model( hdf5, branch_key, tom, seed, filter_idx, min_mag, npd, hdd, upper_seismogenic_depth, lower_seismogenic_depth, msr=WC1994(), aspect=1.5, trt=DEFAULT_TRT): """ Generates a rupture set from a sample of the background model :param branch_key: Key to indicate the branch for selecting the background model :param tom: Temporal occurrence model as instance of :class: openquake.hazardlib.tom.TOM :param seed: Random seed to use in the call to tom.sample_number_of_occurrences :param filter_idx: Sites for consideration (can be None!) :param float min_mag: Minimim magnitude for consideration of background sources :param npd: Nodal plane distribution as instance of :class: openquake.hazardlib.pmf.PMF :param hdd: Hypocentral depth distribution as instance of :class: openquake.hazardlib.pmf.PMF :param float aspect: Aspect ratio :param float upper_seismogenic_depth: Upper seismogenic depth (km) :param float lower_seismogenic_depth: Lower seismogenic depth (km) :param msr: Magnitude scaling relation :param float integration_distance: Maximum distance from rupture to site for consideration """ bg_magnitudes = hdf5["/".join(["Grid", branch_key, "Magnitude"])].value # Select magnitudes above the minimum magnitudes mag_idx = bg_magnitudes >= min_mag mags = bg_magnitudes[mag_idx] rates = hdf5["/".join(["Grid", branch_key, "RateArray"])][filter_idx, :] rates = rates[:, mag_idx] valid_locs = hdf5["Grid/Locations"][filter_idx, :] # Sample remaining rates sampler = tom.sample_number_of_occurrences(rates, seed) background_ruptures = [] background_n_occ = [] for i, mag in enumerate(mags): rate_idx = numpy.where(sampler[:, i])[0] rate_cnt = sampler[rate_idx, i] occurrence = rates[rate_idx, i] locations = valid_locs[rate_idx, :] ruptures = generate_background_ruptures( tom, locations, occurrence, mag, npd, hdd, upper_seismogenic_depth, lower_seismogenic_depth, msr, aspect, trt) background_ruptures.extend(ruptures) background_n_occ.extend(rate_cnt.tolist()) return background_ruptures, background_n_occ
def function[sample_background_model, parameter[hdf5, branch_key, tom, seed, filter_idx, min_mag, npd, hdd, upper_seismogenic_depth, lower_seismogenic_depth, msr, aspect, trt]]: constant[ Generates a rupture set from a sample of the background model :param branch_key: Key to indicate the branch for selecting the background model :param tom: Temporal occurrence model as instance of :class: openquake.hazardlib.tom.TOM :param seed: Random seed to use in the call to tom.sample_number_of_occurrences :param filter_idx: Sites for consideration (can be None!) :param float min_mag: Minimim magnitude for consideration of background sources :param npd: Nodal plane distribution as instance of :class: openquake.hazardlib.pmf.PMF :param hdd: Hypocentral depth distribution as instance of :class: openquake.hazardlib.pmf.PMF :param float aspect: Aspect ratio :param float upper_seismogenic_depth: Upper seismogenic depth (km) :param float lower_seismogenic_depth: Lower seismogenic depth (km) :param msr: Magnitude scaling relation :param float integration_distance: Maximum distance from rupture to site for consideration ] variable[bg_magnitudes] assign[=] call[name[hdf5]][call[constant[/].join, parameter[list[[<ast.Constant object at 0x7da18eb56d70>, <ast.Name object at 0x7da18eb543a0>, <ast.Constant object at 0x7da18eb57190>]]]]].value variable[mag_idx] assign[=] compare[name[bg_magnitudes] greater_or_equal[>=] name[min_mag]] variable[mags] assign[=] call[name[bg_magnitudes]][name[mag_idx]] variable[rates] assign[=] call[call[name[hdf5]][call[constant[/].join, parameter[list[[<ast.Constant object at 0x7da18eb56410>, <ast.Name object at 0x7da18eb57640>, <ast.Constant object at 0x7da18eb566e0>]]]]]][tuple[[<ast.Name object at 0x7da18eb56890>, <ast.Slice object at 0x7da18eb550c0>]]] variable[rates] assign[=] call[name[rates]][tuple[[<ast.Slice object at 0x7da18eb56c20>, <ast.Name object at 0x7da18eb55c60>]]] variable[valid_locs] assign[=] call[call[name[hdf5]][constant[Grid/Locations]]][tuple[[<ast.Name object at 0x7da18eb567d0>, <ast.Slice object at 0x7da18eb577c0>]]] variable[sampler] assign[=] call[name[tom].sample_number_of_occurrences, parameter[name[rates], name[seed]]] variable[background_ruptures] assign[=] list[[]] variable[background_n_occ] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18eb55fc0>, <ast.Name object at 0x7da18eb57ac0>]]] in starred[call[name[enumerate], parameter[name[mags]]]] begin[:] variable[rate_idx] assign[=] call[call[name[numpy].where, parameter[call[name[sampler]][tuple[[<ast.Slice object at 0x7da18eb55f90>, <ast.Name object at 0x7da18eb54ee0>]]]]]][constant[0]] variable[rate_cnt] assign[=] call[name[sampler]][tuple[[<ast.Name object at 0x7da207f9bfd0>, <ast.Name object at 0x7da207f98250>]]] variable[occurrence] assign[=] call[name[rates]][tuple[[<ast.Name object at 0x7da207f9b220>, <ast.Name object at 0x7da207f98a00>]]] variable[locations] assign[=] call[name[valid_locs]][tuple[[<ast.Name object at 0x7da207f9a050>, <ast.Slice object at 0x7da207f9a980>]]] variable[ruptures] assign[=] call[name[generate_background_ruptures], parameter[name[tom], name[locations], name[occurrence], name[mag], name[npd], name[hdd], name[upper_seismogenic_depth], name[lower_seismogenic_depth], name[msr], name[aspect], name[trt]]] call[name[background_ruptures].extend, parameter[name[ruptures]]] call[name[background_n_occ].extend, parameter[call[name[rate_cnt].tolist, parameter[]]]] return[tuple[[<ast.Name object at 0x7da207f98a90>, <ast.Name object at 0x7da207f9af80>]]]
keyword[def] identifier[sample_background_model] ( identifier[hdf5] , identifier[branch_key] , identifier[tom] , identifier[seed] , identifier[filter_idx] , identifier[min_mag] , identifier[npd] , identifier[hdd] , identifier[upper_seismogenic_depth] , identifier[lower_seismogenic_depth] , identifier[msr] = identifier[WC1994] (), identifier[aspect] = literal[int] , identifier[trt] = identifier[DEFAULT_TRT] ): literal[string] identifier[bg_magnitudes] = identifier[hdf5] [ literal[string] . identifier[join] ([ literal[string] , identifier[branch_key] , literal[string] ])]. identifier[value] identifier[mag_idx] = identifier[bg_magnitudes] >= identifier[min_mag] identifier[mags] = identifier[bg_magnitudes] [ identifier[mag_idx] ] identifier[rates] = identifier[hdf5] [ literal[string] . identifier[join] ([ literal[string] , identifier[branch_key] , literal[string] ])][ identifier[filter_idx] ,:] identifier[rates] = identifier[rates] [:, identifier[mag_idx] ] identifier[valid_locs] = identifier[hdf5] [ literal[string] ][ identifier[filter_idx] ,:] identifier[sampler] = identifier[tom] . identifier[sample_number_of_occurrences] ( identifier[rates] , identifier[seed] ) identifier[background_ruptures] =[] identifier[background_n_occ] =[] keyword[for] identifier[i] , identifier[mag] keyword[in] identifier[enumerate] ( identifier[mags] ): identifier[rate_idx] = identifier[numpy] . identifier[where] ( identifier[sampler] [:, identifier[i] ])[ literal[int] ] identifier[rate_cnt] = identifier[sampler] [ identifier[rate_idx] , identifier[i] ] identifier[occurrence] = identifier[rates] [ identifier[rate_idx] , identifier[i] ] identifier[locations] = identifier[valid_locs] [ identifier[rate_idx] ,:] identifier[ruptures] = identifier[generate_background_ruptures] ( identifier[tom] , identifier[locations] , identifier[occurrence] , identifier[mag] , identifier[npd] , identifier[hdd] , identifier[upper_seismogenic_depth] , identifier[lower_seismogenic_depth] , identifier[msr] , identifier[aspect] , identifier[trt] ) identifier[background_ruptures] . identifier[extend] ( identifier[ruptures] ) identifier[background_n_occ] . identifier[extend] ( identifier[rate_cnt] . identifier[tolist] ()) keyword[return] identifier[background_ruptures] , identifier[background_n_occ]
def sample_background_model(hdf5, branch_key, tom, seed, filter_idx, min_mag, npd, hdd, upper_seismogenic_depth, lower_seismogenic_depth, msr=WC1994(), aspect=1.5, trt=DEFAULT_TRT): """ Generates a rupture set from a sample of the background model :param branch_key: Key to indicate the branch for selecting the background model :param tom: Temporal occurrence model as instance of :class: openquake.hazardlib.tom.TOM :param seed: Random seed to use in the call to tom.sample_number_of_occurrences :param filter_idx: Sites for consideration (can be None!) :param float min_mag: Minimim magnitude for consideration of background sources :param npd: Nodal plane distribution as instance of :class: openquake.hazardlib.pmf.PMF :param hdd: Hypocentral depth distribution as instance of :class: openquake.hazardlib.pmf.PMF :param float aspect: Aspect ratio :param float upper_seismogenic_depth: Upper seismogenic depth (km) :param float lower_seismogenic_depth: Lower seismogenic depth (km) :param msr: Magnitude scaling relation :param float integration_distance: Maximum distance from rupture to site for consideration """ bg_magnitudes = hdf5['/'.join(['Grid', branch_key, 'Magnitude'])].value # Select magnitudes above the minimum magnitudes mag_idx = bg_magnitudes >= min_mag mags = bg_magnitudes[mag_idx] rates = hdf5['/'.join(['Grid', branch_key, 'RateArray'])][filter_idx, :] rates = rates[:, mag_idx] valid_locs = hdf5['Grid/Locations'][filter_idx, :] # Sample remaining rates sampler = tom.sample_number_of_occurrences(rates, seed) background_ruptures = [] background_n_occ = [] for (i, mag) in enumerate(mags): rate_idx = numpy.where(sampler[:, i])[0] rate_cnt = sampler[rate_idx, i] occurrence = rates[rate_idx, i] locations = valid_locs[rate_idx, :] ruptures = generate_background_ruptures(tom, locations, occurrence, mag, npd, hdd, upper_seismogenic_depth, lower_seismogenic_depth, msr, aspect, trt) background_ruptures.extend(ruptures) background_n_occ.extend(rate_cnt.tolist()) # depends on [control=['for'], data=[]] return (background_ruptures, background_n_occ)
def client_to_screen(self, x, y): """ Translates window client coordinates to screen coordinates. @note: This is a simplified interface to some of the functionality of the L{win32.Point} class. @see: {win32.Point.client_to_screen} @type x: int @param x: Horizontal coordinate. @type y: int @param y: Vertical coordinate. @rtype: tuple( int, int ) @return: Translated coordinates in a tuple (x, y). @raise WindowsError: An error occured while processing this request. """ return tuple( win32.ClientToScreen( self.get_handle(), (x, y) ) )
def function[client_to_screen, parameter[self, x, y]]: constant[ Translates window client coordinates to screen coordinates. @note: This is a simplified interface to some of the functionality of the L{win32.Point} class. @see: {win32.Point.client_to_screen} @type x: int @param x: Horizontal coordinate. @type y: int @param y: Vertical coordinate. @rtype: tuple( int, int ) @return: Translated coordinates in a tuple (x, y). @raise WindowsError: An error occured while processing this request. ] return[call[name[tuple], parameter[call[name[win32].ClientToScreen, parameter[call[name[self].get_handle, parameter[]], tuple[[<ast.Name object at 0x7da20c6ab3a0>, <ast.Name object at 0x7da20c6aa200>]]]]]]]
keyword[def] identifier[client_to_screen] ( identifier[self] , identifier[x] , identifier[y] ): literal[string] keyword[return] identifier[tuple] ( identifier[win32] . identifier[ClientToScreen] ( identifier[self] . identifier[get_handle] (),( identifier[x] , identifier[y] )))
def client_to_screen(self, x, y): """ Translates window client coordinates to screen coordinates. @note: This is a simplified interface to some of the functionality of the L{win32.Point} class. @see: {win32.Point.client_to_screen} @type x: int @param x: Horizontal coordinate. @type y: int @param y: Vertical coordinate. @rtype: tuple( int, int ) @return: Translated coordinates in a tuple (x, y). @raise WindowsError: An error occured while processing this request. """ return tuple(win32.ClientToScreen(self.get_handle(), (x, y)))