text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def participate(self): """Finish reading and send text""" try: while True: left = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "left_button")) ) right = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "right_button")) ) random.choice((left, right)).click() time.sleep(1.0) except TimeoutException: return False
[ "def", "participate", "(", "self", ")", ":", "try", ":", "while", "True", ":", "left", "=", "WebDriverWait", "(", "self", ".", "driver", ",", "10", ")", ".", "until", "(", "EC", ".", "element_to_be_clickable", "(", "(", "By", ".", "ID", ",", "\"left_...
35.6
19.733333
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret
[ "def", "esxcli", "(", "host", ",", "user", ",", "pwd", ",", "cmd", ",", "protocol", "=", "None", ",", "port", "=", "None", ",", "esxi_host", "=", "None", ",", "credstore", "=", "None", ")", ":", "esx_cmd", "=", "salt", ".", "utils", ".", "path", ...
42.618182
25.890909
def _file_path(self, uid): """Create and return full file path for DayOne entry""" file_name = '%s.doentry' % (uid) return os.path.join(self.dayone_journal_path, file_name)
[ "def", "_file_path", "(", "self", ",", "uid", ")", ":", "file_name", "=", "'%s.doentry'", "%", "(", "uid", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "dayone_journal_path", ",", "file_name", ")" ]
48.25
9.5
def main(argv=None): """Entry point for Rewind. Parses input and calls run() for the real work. Parameters: argv -- sys.argv arguments. Can be set for testing purposes. returns -- the proposed exit code for the program. """ parser = argparse.ArgumentParser( description='Event storage and event proxy.', usage='%(prog)s <configfile>' ) parser.add_argument('--exit-codeword', metavar="MSG", dest="exit_message", default=None, help="An incoming message that makes" " Rewind quit. Used for testing.") parser.add_argument('configfile') args = argv if argv is not None else sys.argv[1:] args = parser.parse_args(args) config = configparser.SafeConfigParser() with open(args.configfile) as f: config.readfp(f) exitcode = run(config, args.exit_message) return exitcode
[ "def", "main", "(", "argv", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Event storage and event proxy.'", ",", "usage", "=", "'%(prog)s <configfile>'", ")", "parser", ".", "add_argument", "(", "'--exit-codewo...
30.965517
21.137931
def rc_channels_scaled_send(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi, force_mavlink1=False): ''' The scaled values of the RC channels received. (-100%) -10000, (0%) 0, (100%) 10000. Channels that are inactive should be set to UINT16_MAX. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) ''' return self.send(self.rc_channels_scaled_encode(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi), force_mavlink1=force_mavlink1)
[ "def", "rc_channels_scaled_send", "(", "self", ",", "time_boot_ms", ",", "port", ",", "chan1_scaled", ",", "chan2_scaled", ",", "chan3_scaled", ",", "chan4_scaled", ",", "chan5_scaled", ",", "chan6_scaled", ",", "chan7_scaled", ",", "chan8_scaled", ",", "rssi", ",...
107.85
81.05
def is_group_name_exists(self, group_name): """ check if group with given name is already exists """ groups = self.m["groups"] for g in groups: if (g["group_name"] == group_name): return True return False
[ "def", "is_group_name_exists", "(", "self", ",", "group_name", ")", ":", "groups", "=", "self", ".", "m", "[", "\"groups\"", "]", "for", "g", "in", "groups", ":", "if", "(", "g", "[", "\"group_name\"", "]", "==", "group_name", ")", ":", "return", "True...
36.857143
9.428571
def sample_stats_prior_to_xarray(self): """Extract sample_stats from fit.""" dtypes = {"divergent__": bool, "n_leapfrog__": np.int64, "treedepth__": np.int64} # copy dims and coords dims = deepcopy(self.dims) if self.dims is not None else {} coords = deepcopy(self.coords) if self.coords is not None else {} sampler_params = self.sample_stats_prior for j, s_params in enumerate(sampler_params): rename_dict = {} for key in s_params: key_, *end = key.split(".") name = re.sub("__$", "", key_) name = "diverging" if name == "divergent" else name rename_dict[key] = ".".join((name, *end)) sampler_params[j][key] = s_params[key].astype(dtypes.get(key)) sampler_params[j] = sampler_params[j].rename(columns=rename_dict) data = _unpack_dataframes(sampler_params) return dict_to_dataset(data, coords=coords, dims=dims)
[ "def", "sample_stats_prior_to_xarray", "(", "self", ")", ":", "dtypes", "=", "{", "\"divergent__\"", ":", "bool", ",", "\"n_leapfrog__\"", ":", "np", ".", "int64", ",", "\"treedepth__\"", ":", "np", ".", "int64", "}", "# copy dims and coords", "dims", "=", "de...
49.1
20
def move_partition(self, partition, broker_destination): """Move partition to destination broker and adjust replicas.""" self.remove_partition(partition) broker_destination.add_partition(partition)
[ "def", "move_partition", "(", "self", ",", "partition", ",", "broker_destination", ")", ":", "self", ".", "remove_partition", "(", "partition", ")", "broker_destination", ".", "add_partition", "(", "partition", ")" ]
54.5
6.75
def refine_pi_cation_laro(self, all_picat, stacks): """Just important for constellations with histidine involved. If the histidine ring is positioned in stacking position to an aromatic ring in the ligand, there is in most cases stacking and pi-cation interaction reported as histidine also carries a positive charge in the ring. For such cases, only report stacking. """ i_set = [] for picat in all_picat: exclude = False for stack in stacks: if whichrestype(stack.proteinring.atoms[0]) == 'HIS' and picat.ring.obj == stack.ligandring.obj: exclude = True if not exclude: i_set.append(picat) return i_set
[ "def", "refine_pi_cation_laro", "(", "self", ",", "all_picat", ",", "stacks", ")", ":", "i_set", "=", "[", "]", "for", "picat", "in", "all_picat", ":", "exclude", "=", "False", "for", "stack", "in", "stacks", ":", "if", "whichrestype", "(", "stack", ".",...
52.5
22.785714
def patch_python_logging_handlers(): ''' Patch the python logging handlers with out mixed-in classes ''' logging.StreamHandler = StreamHandler logging.FileHandler = FileHandler logging.handlers.SysLogHandler = SysLogHandler logging.handlers.WatchedFileHandler = WatchedFileHandler logging.handlers.RotatingFileHandler = RotatingFileHandler if sys.version_info >= (3, 2): logging.handlers.QueueHandler = QueueHandler
[ "def", "patch_python_logging_handlers", "(", ")", ":", "logging", ".", "StreamHandler", "=", "StreamHandler", "logging", ".", "FileHandler", "=", "FileHandler", "logging", ".", "handlers", ".", "SysLogHandler", "=", "SysLogHandler", "logging", ".", "handlers", ".", ...
40.818182
15.181818
def _find_node_by_indices(self, point): """"Find the GSNode that is refered to by the given indices. See GSNode::_indices() """ path_index, node_index = point path = self.paths[int(path_index)] node = path.nodes[int(node_index)] return node
[ "def", "_find_node_by_indices", "(", "self", ",", "point", ")", ":", "path_index", ",", "node_index", "=", "point", "path", "=", "self", ".", "paths", "[", "int", "(", "path_index", ")", "]", "node", "=", "path", ".", "nodes", "[", "int", "(", "node_in...
32.111111
8.666667
def scramble_string(self, length): """Return random string""" return fake.text(length) if length > 5 else ''.join([fake.random_letter() for n in range(0, length)])
[ "def", "scramble_string", "(", "self", ",", "length", ")", ":", "return", "fake", ".", "text", "(", "length", ")", "if", "length", ">", "5", "else", "''", ".", "join", "(", "[", "fake", ".", "random_letter", "(", ")", "for", "n", "in", "range", "("...
59
25
def add_header_callback(self, cb, port, channel, port_mask=0xFF, channel_mask=0xFF): """ Add a callback for a specific port/header callback with the possibility to add a mask for channel and port for multiple hits for same callback. """ self.cb.append(_CallbackContainer(port, port_mask, channel, channel_mask, cb))
[ "def", "add_header_callback", "(", "self", ",", "cb", ",", "port", ",", "channel", ",", "port_mask", "=", "0xFF", ",", "channel_mask", "=", "0xFF", ")", ":", "self", ".", "cb", ".", "append", "(", "_CallbackContainer", "(", "port", ",", "port_mask", ",",...
47.222222
15.666667
def eval(self, data, data_store, *, exclude=None): """ Return a new object in which callable parameters have been evaluated. Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. exclude (list): List of key names as strings that should be excluded from the evaluation. Returns: TaskParameters: A new TaskParameters object with the callable parameters replaced by their return value. """ exclude = [] if exclude is None else exclude result = {} for key, value in self.items(): if key in exclude: continue if value is not None and callable(value): result[key] = value(data, data_store) else: result[key] = value return TaskParameters(result)
[ "def", "eval", "(", "self", ",", "data", ",", "data_store", ",", "*", ",", "exclude", "=", "None", ")", ":", "exclude", "=", "[", "]", "if", "exclude", "is", "None", "else", "exclude", "result", "=", "{", "}", "for", "key", ",", "value", "in", "s...
41.419355
22.322581
def setup( hosts, default_keyspace, consistency=ConsistencyLevel.ONE, lazy_connect=False, retry_connect=False, **kwargs): """ Records the hosts and connects to one of them :param hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html :type hosts: list :param default_keyspace: The default keyspace to use :type default_keyspace: str :param consistency: The global consistency level :type consistency: int :param lazy_connect: True if should not connect until first use :type lazy_connect: bool :param retry_connect: bool :param retry_connect: True if we should retry to connect even if there was a connection failure initially """ global cluster, session, default_consistency_level, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") if not default_keyspace: raise UndefinedKeyspaceException() from cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency if lazy_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) return cluster = Cluster(hosts, **kwargs) try: session = cluster.connect() except NoHostAvailable: if retry_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) raise session.row_factory = dict_factory
[ "def", "setup", "(", "hosts", ",", "default_keyspace", ",", "consistency", "=", "ConsistencyLevel", ".", "ONE", ",", "lazy_connect", "=", "False", ",", "retry_connect", "=", "False", ",", "*", "*", "kwargs", ")", ":", "global", "cluster", ",", "session", "...
35.188679
18.54717
def sphergal_to_rectgal(l,b,d,vr,pmll,pmbb,degree=False): """ NAME: sphergal_to_rectgal PURPOSE: transform phase-space coordinates in spherical Galactic coordinates to rectangular Galactic coordinates (can take vector inputs) INPUT: l - Galactic longitude (rad) b - Galactic lattitude (rad) d - distance (kpc) vr - line-of-sight velocity (km/s) pmll - proper motion in the Galactic longitude direction (mu_l*cos(b) ) (mas/yr) pmbb - proper motion in the Galactic lattitude (mas/yr) degree - (bool) if True, l and b are in degrees OUTPUT: (X,Y,Z,vx,vy,vz) in (kpc,kpc,kpc,km/s,km/s,km/s) HISTORY: 2009-10-25 - Written - Bovy (NYU) """ XYZ= lbd_to_XYZ(l,b,d,degree=degree) vxvyvz= vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,XYZ=False,degree=degree) if sc.array(l).shape == (): return sc.array([XYZ[0],XYZ[1],XYZ[2],vxvyvz[0],vxvyvz[1],vxvyvz[2]]) else: out=sc.zeros((len(l),6)) out[:,0:3]= XYZ out[:,3:6]= vxvyvz return out
[ "def", "sphergal_to_rectgal", "(", "l", ",", "b", ",", "d", ",", "vr", ",", "pmll", ",", "pmbb", ",", "degree", "=", "False", ")", ":", "XYZ", "=", "lbd_to_XYZ", "(", "l", ",", "b", ",", "d", ",", "degree", "=", "degree", ")", "vxvyvz", "=", "v...
23.909091
27.5
def generate_thumbnail(source, outname, box, delay, fit=True, options=None, converter='ffmpeg'): """Create a thumbnail image for the video source, based on ffmpeg.""" logger = logging.getLogger(__name__) tmpfile = outname + ".tmp.jpg" # dump an image of the video cmd = [converter, '-i', source, '-an', '-r', '1', '-ss', delay, '-vframes', '1', '-y', tmpfile] logger.debug('Create thumbnail for video: %s', ' '.join(cmd)) check_subprocess(cmd, source, outname) # use the generate_thumbnail function from sigal.image image.generate_thumbnail(tmpfile, outname, box, fit=fit, options=options) # remove the image os.unlink(tmpfile)
[ "def", "generate_thumbnail", "(", "source", ",", "outname", ",", "box", ",", "delay", ",", "fit", "=", "True", ",", "options", "=", "None", ",", "converter", "=", "'ffmpeg'", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "...
40.705882
18.764706
def setColor(self, vehID, color): """setColor(string, (integer, integer, integer, integer)) sets color for vehicle with the given ID. i.e. (255,0,0,0) for the color red. The fourth integer (alpha) is only used when drawing vehicles with raster images """ self._connection._beginMessage( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_COLOR, vehID, 1 + 1 + 1 + 1 + 1) self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int( color[0]), int(color[1]), int(color[2]), int(color[3])) self._connection._sendExact()
[ "def", "setColor", "(", "self", ",", "vehID", ",", "color", ")", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_COLOR", ",", "vehID", ",", "1", "+", "1", "+", "1", "+", "1", "+"...
53.545455
15.909091
def register_backend(name, backend, allow_overwrite=False): """Register new backend. Args: name (str): The name of backend. gateclass (type): The type object of backend allow_overwrite (bool, optional): If True, allow to overwrite the existing backend. Otherwise, raise the ValueError. Raises: ValueError: The name is duplicated with existing backend. When `allow_overwrite=True`, this error is not raised. """ if hasattr(Circuit, "run_with_" + name): if allow_overwrite: warnings.warn(f"Circuit has attribute `run_with_{name}`.") else: raise ValueError(f"Circuit has attribute `run_with_{name}`.") if not allow_overwrite: if name in BACKENDS: raise ValueError(f"Backend '{name}' is already registered as backend.") BACKENDS[name] = backend
[ "def", "register_backend", "(", "name", ",", "backend", ",", "allow_overwrite", "=", "False", ")", ":", "if", "hasattr", "(", "Circuit", ",", "\"run_with_\"", "+", "name", ")", ":", "if", "allow_overwrite", ":", "warnings", ".", "warn", "(", "f\"Circuit has ...
42.772727
21.636364
def let_variable(self, frame_id, var_name, expression_value): """ Let a frame's var with a value by building then eval a let expression with breakoints disabled. """ breakpoints_backup = IKBreakpoint.backup_breakpoints_state() IKBreakpoint.disable_all_breakpoints() let_expression = "%s=%s" % (var_name, expression_value,) eval_frame = ctypes.cast(frame_id, ctypes.py_object).value global_vars = eval_frame.f_globals local_vars = eval_frame.f_locals try: exec(let_expression, global_vars, local_vars) error_message="" except Exception as e: t, result = sys.exc_info()[:2] if isinstance(t, str): result_type = t else: result_type = str(t.__name__) error_message = "%s: %s" % (result_type, result,) IKBreakpoint.restore_breakpoints_state(breakpoints_backup) _logger.e_debug("let_variable(%s) => %s", let_expression, error_message or 'succeed') return error_message
[ "def", "let_variable", "(", "self", ",", "frame_id", ",", "var_name", ",", "expression_value", ")", ":", "breakpoints_backup", "=", "IKBreakpoint", ".", "backup_breakpoints_state", "(", ")", "IKBreakpoint", ".", "disable_all_breakpoints", "(", ")", "let_expression", ...
38.137931
15.931034
def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None): """ Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> import tables >>> # set up a new hdf5 table to demonstrate with ... h5file = tables.open_file('example.h5', mode='w', title='Test file') >>> h5file.create_group('/', 'testgroup', 'Test Group') /testgroup (Group) 'Test Group' children := [] >>> class FooBar(tables.IsDescription): ... foo = tables.Int32Col(pos=0) ... bar = tables.StringCol(6, pos=2) ... >>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table') >>> # load some data into the table ... table1 = (('foo', 'bar'), ... (3, b'asdfgh'), ... (2, b'qwerty'), ... (1, b'zxcvbn')) >>> for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... >>> h5table.cols.foo.create_csindex() # CS index is required 0 >>> h5file.flush() >>> h5file.close() >>> # ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', ... sortby='foo') >>> table2 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+ """ assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step)
[ "def", "fromhdf5sorted", "(", "source", ",", "where", "=", "None", ",", "name", "=", "None", ",", "sortby", "=", "None", ",", "checkCSI", "=", "False", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ")", ":", "assert...
37.980769
17.211538
def boolean_input(self, question, default=None): ''' Method for yes/no boolean inputs ''' result = input("%s: " % question) if not result and default is not None: return default while len(result) < 1 or result[0].lower() not in "yn": result = input("Please answer yes or no: ") return result[0].lower() == "y"
[ "def", "boolean_input", "(", "self", ",", "question", ",", "default", "=", "None", ")", ":", "result", "=", "input", "(", "\"%s: \"", "%", "question", ")", "if", "not", "result", "and", "default", "is", "not", "None", ":", "return", "default", "while", ...
38.9
12.9
def nt_db_search(self, files, base, unpack, euk_check, search_method, maximum_range, threads, evalue): ''' Nucleotide database search pipeline - pipeline where reads are searched as nucleotides, and hits are identified using nhmmer searches Parameters ---------- files : obj graftm_output_paths object. base : str The name of the input file, stripped of all suffixes, and paths. Used for creating file names with 'files' object. unpack : obj UnpackRawReads object, returns string command that will output sequences to stdout when called on command line (use: unpack.command_line()) euk_check : bool True indicates the sample will be checked for eukaryotic reads, False indicates not. search_method : str The method for searching e.g. 'hmmsearch' or 'diamond' maximum_range : int Maximum range that a gene can extend within a contig. Any hits that extend beyond this length cannot be linked. max_range is defined as 1.5 X the average length of all full length genes used in the search database. This is defined in the CONTENTS.json file within a gpkg. threads : str Number of threads for hmmer to use evalue : str Evalue cutoff for hmmer to use Returns ------- String path to amino acid fasta file of reads that hit ''' # Define outputs hmmsearch_output_table = files.hmmsearch_output_path(base) hit_reads_fasta = files.fa_output_path(base) return \ self.search_and_extract_nucleotides_matching_nucleotide_database(\ unpack, euk_check, search_method, maximum_range, threads, evalue, hmmsearch_output_table, hit_reads_fasta)
[ "def", "nt_db_search", "(", "self", ",", "files", ",", "base", ",", "unpack", ",", "euk_check", ",", "search_method", ",", "maximum_range", ",", "threads", ",", "evalue", ")", ":", "# Define outputs", "hmmsearch_output_table", "=", "files", ".", "hmmsearch_outpu...
46
23.92
def create_from_textgrid(self,word_list): """ Fills the ParsedResponse object with a list of TextGrid.Word objects originally from a .TextGrid file. :param list word_list: List of TextGrid.Word objects corresponding to words/tokens in the subject response. Modifies: - self.timing_included: TextGrid files include timing information - self.unit_list: fills it with Unit objects derived from the word_list argument. If the type is 'SEMANTIC', the words in these units are automatically lemmatized and made into compound words where appropriate. """ self.timing_included = True for i, entry in enumerate(word_list): self.unit_list.append(Unit(entry, format="TextGrid", type=self.type, index_in_timed_response=i)) # combine compound words, remove pluralizations, etc if self.type == "SEMANTIC": self.lemmatize() self.tokenize()
[ "def", "create_from_textgrid", "(", "self", ",", "word_list", ")", ":", "self", ".", "timing_included", "=", "True", "for", "i", ",", "entry", "in", "enumerate", "(", "word_list", ")", ":", "self", ".", "unit_list", ".", "append", "(", "Unit", "(", "entr...
49.571429
24.380952
def iter_records_for(self, package_name): """ Iterate records for a specific package. """ entry_points = self.packages.get(package_name, NotImplemented) if entry_points is NotImplemented: logger.debug( "package '%s' has not declared any entry points for the '%s' " "registry for artifact construction", package_name, self.registry_name, ) return iter([]) logger.debug( "package '%s' has declared %d entry points for the '%s' " "registry for artifact construction", package_name, len(entry_points), self.registry_name, ) return iter(entry_points.values())
[ "def", "iter_records_for", "(", "self", ",", "package_name", ")", ":", "entry_points", "=", "self", ".", "packages", ".", "get", "(", "package_name", ",", "NotImplemented", ")", "if", "entry_points", "is", "NotImplemented", ":", "logger", ".", "debug", "(", ...
36.05
17.45
def start_external_instances(self, late_start=False): """Launch external instances that are load correctly :param late_start: If late_start, don't look for last_init_try :type late_start: bool :return: None """ for instance in [i for i in self.instances if i.is_external]: # But maybe the init failed a bit, so bypass this ones from now if not self.try_instance_init(instance, late_start=late_start): logger.warning("The module '%s' failed to init, I will try to restart it later", instance.name) self.set_to_restart(instance) continue # ok, init succeed logger.info("Starting external module %s", instance.name) instance.start()
[ "def", "start_external_instances", "(", "self", ",", "late_start", "=", "False", ")", ":", "for", "instance", "in", "[", "i", "for", "i", "in", "self", ".", "instances", "if", "i", ".", "is_external", "]", ":", "# But maybe the init failed a bit, so bypass this ...
44.5
21.333333
def set_log_level(self, level): """Set the logging level. Parameters ---------- level : logging level constant The value to set the logging level to. """ self._log_level = level if self._python_logger: try: level = self.PYTHON_LEVEL.get(level) except ValueError as err: raise FailReply("Unknown logging level '%s'" % (level)) self._python_logger.setLevel(level)
[ "def", "set_log_level", "(", "self", ",", "level", ")", ":", "self", ".", "_log_level", "=", "level", "if", "self", ".", "_python_logger", ":", "try", ":", "level", "=", "self", ".", "PYTHON_LEVEL", ".", "get", "(", "level", ")", "except", "ValueError", ...
30.25
15
def post_helper(form_tag=True, edit_mode=False): """ Post's form layout helper """ helper = FormHelper() helper.form_action = '.' helper.attrs = {'data_abide': ''} helper.form_tag = form_tag fieldsets = [ Row( Column( 'text', css_class='small-12' ), ), ] # Threadwatch option is not in edit form if not edit_mode: fieldsets.append( Row( Column( 'threadwatch', css_class='small-12' ), ), ) fieldsets = fieldsets+[ ButtonHolderPanel( Submit('submit', _('Submit')), css_class='text-right', ), ] helper.layout = Layout(*fieldsets) return helper
[ "def", "post_helper", "(", "form_tag", "=", "True", ",", "edit_mode", "=", "False", ")", ":", "helper", "=", "FormHelper", "(", ")", "helper", ".", "form_action", "=", "'.'", "helper", ".", "attrs", "=", "{", "'data_abide'", ":", "''", "}", "helper", "...
20.794872
18.230769
def get_mysql_connection(host, user, port, password, database, ssl={}): """ MySQL connection """ return pymysql.connect(host=host, user=user, port=port, password=password, db=database, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor, client_flag=pymysql.constants.CLIENT.MULTI_STATEMENTS, ssl=ssl )
[ "def", "get_mysql_connection", "(", "host", ",", "user", ",", "port", ",", "password", ",", "database", ",", "ssl", "=", "{", "}", ")", ":", "return", "pymysql", ".", "connect", "(", "host", "=", "host", ",", "user", "=", "user", ",", "port", "=", ...
42.153846
13.538462
def execute(self, command, blocking=True, exec_create_kwargs=None, exec_start_kwargs=None): """ Execute a command in this container -- the container needs to be running. If the command fails, a ConuException is thrown. This is a blocking call by default and writes output of the command to logger using the INFO level -- this behavior can be changed if you set the argument `blocking` to `False`. If not blocking, you should consume the returned iterator in order to see logs or know when the command finished: :: for line in container.execute(["ping", "-c", "4", "8.8.8.8"], blocking=False): print(line) print("command finished") :param command: list of str, command to execute in the container :param blocking: bool, if True blocks until the command finishes :param exec_create_kwargs: dict, params to pass to exec_create() :param exec_start_kwargs: dict, params to pass to exec_start() :return: iterator if non-blocking or list of bytes if blocking """ logger.info("running command %s", command) exec_create_kwargs = exec_create_kwargs or {} exec_start_kwargs = exec_start_kwargs or {} exec_start_kwargs["stream"] = True # we want stream no matter what exec_i = self.d.exec_create(self.get_id(), command, **exec_create_kwargs) output = self.d.exec_start(exec_i, **exec_start_kwargs) if blocking: response = [] for line in output: response.append(line) logger.info("%s", line.decode("utf-8").strip("\n\r")) e_inspect = self.d.exec_inspect(exec_i) exit_code = e_inspect["ExitCode"] if exit_code: logger.error("command failed") logger.info("exec metadata: %s", e_inspect) raise ConuException("failed to execute command %s, exit code %s" % ( command, exit_code)) return response # TODO: for interactive use cases we need to provide API so users can do exec_inspect return output
[ "def", "execute", "(", "self", ",", "command", ",", "blocking", "=", "True", ",", "exec_create_kwargs", "=", "None", ",", "exec_start_kwargs", "=", "None", ")", ":", "logger", ".", "info", "(", "\"running command %s\"", ",", "command", ")", "exec_create_kwargs...
44.979167
25.604167
def _parse_vertex_tuple(s): """Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...).""" vt = [0, 0, 0] for i, c in enumerate(s.split('/')): if c: vt[i] = int(c) return tuple(vt)
[ "def", "_parse_vertex_tuple", "(", "s", ")", ":", "vt", "=", "[", "0", ",", "0", ",", "0", "]", "for", "i", ",", "c", "in", "enumerate", "(", "s", ".", "split", "(", "'/'", ")", ")", ":", "if", "c", ":", "vt", "[", "i", "]", "=", "int", "...
29.428571
16
def clear_request(name=None): ''' .. versionadded:: 2017.7.3 Clear out the state execution request without executing it CLI Example: .. code-block:: bash salt '*' state.clear_request ''' notify_path = os.path.join(__opts__['cachedir'], 'req_state.p') serial = salt.payload.Serial(__opts__) if not os.path.isfile(notify_path): return True if not name: try: os.remove(notify_path) except (IOError, OSError): pass else: req = check_request() if name in req: req.pop(name) else: return False with salt.utils.files.set_umask(0o077): try: if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path)) with salt.utils.files.fopen(notify_path, 'w+b') as fp_: serial.dump(req, fp_) except (IOError, OSError): log.error( 'Unable to write state request file %s. Check permission.', notify_path ) return True
[ "def", "clear_request", "(", "name", "=", "None", ")", ":", "notify_path", "=", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'req_state.p'", ")", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "__opts__", "...
29.55
20.3
def scramble(expnums, ccd, version='p', dry_run=False): """run the plant script on this combination of exposures""" mjds = [] fobjs = [] for expnum in expnums: filename = storage.get_image(expnum, ccd=ccd, version=version) fobjs.append(fits.open(filename)) # Pull out values to replace in headers.. must pull them # as otherwise we get pointers... mjds.append(fobjs[-1][0].header['MJD-OBS']) order = [0, 2, 1] for idx in range(len(fobjs)): logging.info("Flipping %d to %d" % (fobjs[idx][0].header['EXPNUM'], expnums[order[idx]])) fobjs[idx][0].header['EXPNUM'] = expnums[order[idx]] fobjs[idx][0].header['MJD-OBS'] = mjds[order[idx]] uri = storage.get_uri(expnums[order[idx]], ccd=ccd, version='s', ext='fits') fname = os.path.basename(uri) if os.access(fname, os.F_OK): os.unlink(fname) fobjs[idx].writeto(fname) if dry_run: continue storage.copy(fname, uri) return
[ "def", "scramble", "(", "expnums", ",", "ccd", ",", "version", "=", "'p'", ",", "dry_run", "=", "False", ")", ":", "mjds", "=", "[", "]", "fobjs", "=", "[", "]", "for", "expnum", "in", "expnums", ":", "filename", "=", "storage", ".", "get_image", "...
36.677419
16.516129
def peek_string(self, lpBaseAddress, fUnicode = False, dwMaxSize = 0x1000): """ Tries to read an ASCII or Unicode string from the address space of the process. @see: L{read_string} @type lpBaseAddress: int @param lpBaseAddress: Memory address to begin reading. @type fUnicode: bool @param fUnicode: C{True} is the string is expected to be Unicode, C{False} if it's expected to be ANSI. @type dwMaxSize: int @param dwMaxSize: Maximum allowed string length to read, in bytes. @rtype: str, compat.unicode @return: String read from the process memory space. It B{doesn't} include the terminating null character. Returns an empty string on failure. """ # Validate the parameters. if not lpBaseAddress or dwMaxSize == 0: if fUnicode: return u'' return '' if not dwMaxSize: dwMaxSize = 0x1000 # Read the string. szString = self.peek(lpBaseAddress, dwMaxSize) # If the string is Unicode... if fUnicode: # Decode the string. szString = compat.unicode(szString, 'U16', 'replace') ## try: ## szString = compat.unicode(szString, 'U16') ## except UnicodeDecodeError: ## szString = struct.unpack('H' * (len(szString) / 2), szString) ## szString = [ unichr(c) for c in szString ] ## szString = u''.join(szString) # Truncate the string when the first null char is found. szString = szString[ : szString.find(u'\0') ] # If the string is ANSI... else: # Truncate the string when the first null char is found. szString = szString[ : szString.find('\0') ] # Return the decoded string. return szString
[ "def", "peek_string", "(", "self", ",", "lpBaseAddress", ",", "fUnicode", "=", "False", ",", "dwMaxSize", "=", "0x1000", ")", ":", "# Validate the parameters.", "if", "not", "lpBaseAddress", "or", "dwMaxSize", "==", "0", ":", "if", "fUnicode", ":", "return", ...
32.894737
20.789474
def _set_zoning(self, v, load=False): """ Setter method for zoning, mapped from YANG variable /zoning (container) If this variable is read-only (config: false) in the source YANG file, then _set_zoning is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_zoning() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=zoning.zoning, is_container='container', presence=False, yang_name="zoning", rest_name="zoning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Zoning commands', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """zoning must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=zoning.zoning, is_container='container', presence=False, yang_name="zoning", rest_name="zoning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Zoning commands', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='container', is_config=True)""", }) self.__zoning = t if hasattr(self, '_set'): self._set()
[ "def", "_set_zoning", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
74.909091
34.409091
def plantloopfieldlists(data): """return the plantloopfield list""" objkey = 'plantloop'.upper() numobjects = len(data.dt[objkey]) return [[ 'Name', 'Plant Side Inlet Node Name', 'Plant Side Outlet Node Name', 'Plant Side Branch List Name', 'Demand Side Inlet Node Name', 'Demand Side Outlet Node Name', 'Demand Side Branch List Name']] * numobjects
[ "def", "plantloopfieldlists", "(", "data", ")", ":", "objkey", "=", "'plantloop'", ".", "upper", "(", ")", "numobjects", "=", "len", "(", "data", ".", "dt", "[", "objkey", "]", ")", "return", "[", "[", "'Name'", ",", "'Plant Side Inlet Node Name'", ",", ...
34.166667
8
def _extend_spikes(spike_ids, spike_clusters): """Return all spikes belonging to the clusters containing the specified spikes.""" # We find the spikes belonging to modified clusters. # What are the old clusters that are modified by the assignment? old_spike_clusters = spike_clusters[spike_ids] unique_clusters = _unique(old_spike_clusters) # Now we take all spikes from these clusters. changed_spike_ids = _spikes_in_clusters(spike_clusters, unique_clusters) # These are the new spikes that need to be reassigned. extended_spike_ids = np.setdiff1d(changed_spike_ids, spike_ids, assume_unique=True) return extended_spike_ids
[ "def", "_extend_spikes", "(", "spike_ids", ",", "spike_clusters", ")", ":", "# We find the spikes belonging to modified clusters.", "# What are the old clusters that are modified by the assignment?", "old_spike_clusters", "=", "spike_clusters", "[", "spike_ids", "]", "unique_clusters...
53.384615
14.384615
def find_profile(self, bitarray, eep_rorg, rorg_func, rorg_type, direction=None, command=None): ''' Find profile and data description, matching RORG, FUNC and TYPE ''' if not self.init_ok: self.logger.warn('EEP.xml not loaded!') return None if eep_rorg not in self.telegrams.keys(): self.logger.warn('Cannot find rorg in EEP!') return None if rorg_func not in self.telegrams[eep_rorg].keys(): self.logger.warn('Cannot find func in EEP!') return None if rorg_type not in self.telegrams[eep_rorg][rorg_func].keys(): self.logger.warn('Cannot find type in EEP!') return None profile = self.telegrams[eep_rorg][rorg_func][rorg_type] if command: # multiple commands can be defined, with the command id always in same location (per RORG-FUNC-TYPE). eep_command = profile.find('command', recursive=False) # If commands are not set in EEP, or command is None, # get the first data as a "best guess". if not eep_command: return profile.find('data', recursive=False) # If eep_command is defined, so should be data.command return profile.find('data', {'command': str(command)}, recursive=False) # extract data description # the direction tag is optional if direction is None: return profile.find('data', recursive=False) return profile.find('data', {'direction': direction}, recursive=False)
[ "def", "find_profile", "(", "self", ",", "bitarray", ",", "eep_rorg", ",", "rorg_func", ",", "rorg_type", ",", "direction", "=", "None", ",", "command", "=", "None", ")", ":", "if", "not", "self", ".", "init_ok", ":", "self", ".", "logger", ".", "warn"...
42.972222
25.638889
def assert_instance_deleted(self, model_class, **kwargs): """ Checks if the model instance was deleted from the database. For example:: >>> with self.assert_instance_deleted(Article, slug='lorem-ipsum'): ... Article.objects.get(slug='lorem-ipsum').delete() """ return _InstanceContext( self.assert_instance_exists, self.assert_instance_does_not_exist, model_class, **kwargs )
[ "def", "assert_instance_deleted", "(", "self", ",", "model_class", ",", "*", "*", "kwargs", ")", ":", "return", "_InstanceContext", "(", "self", ".", "assert_instance_exists", ",", "self", ".", "assert_instance_does_not_exist", ",", "model_class", ",", "*", "*", ...
31.933333
19
def destroy(self, force=False): """UnmanagedLXC Destructor. It requires force to be true in order to work. Otherwise it throws an error. """ if force: super(UnmanagedLXC, self).destroy() else: raise UnmanagedLXCError('Destroying an unmanaged LXC might not ' 'work. To continue please call this method with force=True')
[ "def", "destroy", "(", "self", ",", "force", "=", "False", ")", ":", "if", "force", ":", "super", "(", "UnmanagedLXC", ",", "self", ")", ".", "destroy", "(", ")", "else", ":", "raise", "UnmanagedLXCError", "(", "'Destroying an unmanaged LXC might not '", "'w...
36.090909
21.909091
def pad(data_to_pad, block_size, style='pkcs7'): """Apply standard padding. :Parameters: data_to_pad : byte string The data that needs to be padded. block_size : integer The block boundary to use for padding. The output length is guaranteed to be a multiple of ``block_size``. style : string Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*. :Return: The original data with the appropriate padding added at the end. """ padding_len = block_size-len(data_to_pad)%block_size if style == 'pkcs7': padding = bchr(padding_len)*padding_len elif style == 'x923': padding = bchr(0)*(padding_len-1) + bchr(padding_len) elif style == 'iso7816': padding = bchr(128) + bchr(0)*(padding_len-1) else: raise ValueError("Unknown padding style") return data_to_pad + padding
[ "def", "pad", "(", "data_to_pad", ",", "block_size", ",", "style", "=", "'pkcs7'", ")", ":", "padding_len", "=", "block_size", "-", "len", "(", "data_to_pad", ")", "%", "block_size", "if", "style", "==", "'pkcs7'", ":", "padding", "=", "bchr", "(", "padd...
35.52
17.8
def _getApplication(self): """Get the base application UIElement. If the UIElement is a child of the application, it will try to get the AXParent until it reaches the top application level element. """ app = self while True: try: app = app.AXParent except _a11y.ErrorUnsupported: break return app
[ "def", "_getApplication", "(", "self", ")", ":", "app", "=", "self", "while", "True", ":", "try", ":", "app", "=", "app", ".", "AXParent", "except", "_a11y", ".", "ErrorUnsupported", ":", "break", "return", "app" ]
28.857143
17.928571
def list_pools(self, retrieve_all=True, **_params): """Fetches a list of all load balancer pools for a project.""" # Pass filters in "params" argument to do_request return self.list('pools', self.pools_path, retrieve_all, **_params)
[ "def", "list_pools", "(", "self", ",", "retrieve_all", "=", "True", ",", "*", "*", "_params", ")", ":", "# Pass filters in \"params\" argument to do_request", "return", "self", ".", "list", "(", "'pools'", ",", "self", ".", "pools_path", ",", "retrieve_all", ","...
55.4
11.4
def read_label_list(path): """ Reads labels from an Audacity label file and returns them wrapped in a :py:class:`audiomate.annotations.LabelList`. Args: path (str): Path to the Audacity label file Returns: audiomate.annotations.LabelList: Label list containing the labels """ ll = annotations.LabelList() for record in read_label_file(path): ll.add(annotations.Label(record[2], start=record[0], end=record[1])) return ll
[ "def", "read_label_list", "(", "path", ")", ":", "ll", "=", "annotations", ".", "LabelList", "(", ")", "for", "record", "in", "read_label_file", "(", "path", ")", ":", "ll", ".", "add", "(", "annotations", ".", "Label", "(", "record", "[", "2", "]", ...
27.529412
22.941176
def get_contig_names(fasta_file): """ Gets contig names from a fasta file using SeqIO. :param fasta_file: Full path to uncompressed, fasta-formatted file :return: List of contig names. """ contig_names = list() for contig in SeqIO.parse(fasta_file, 'fasta'): contig_names.append(contig.id) return contig_names
[ "def", "get_contig_names", "(", "fasta_file", ")", ":", "contig_names", "=", "list", "(", ")", "for", "contig", "in", "SeqIO", ".", "parse", "(", "fasta_file", ",", "'fasta'", ")", ":", "contig_names", ".", "append", "(", "contig", ".", "id", ")", "retur...
34
10
def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of drama series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=sort, limit=limit, offset=offset) return result
[ "def", "list_drama_series", "(", "self", ",", "sort", "=", "META", ".", "SORT_ALPHA", ",", "limit", "=", "META", ".", "MAX_SERIES", ",", "offset", "=", "0", ")", ":", "result", "=", "self", ".", "_android_api", ".", "list_series", "(", "media_type", "=",...
43.9375
19.0625
def user_has_super_roles(): """Return whether the current belongs to superuser roles """ member = api.get_current_user() super_roles = ["LabManager", "Manager"] diff = filter(lambda role: role in super_roles, member.getRoles()) return len(diff) > 0
[ "def", "user_has_super_roles", "(", ")", ":", "member", "=", "api", ".", "get_current_user", "(", ")", "super_roles", "=", "[", "\"LabManager\"", ",", "\"Manager\"", "]", "diff", "=", "filter", "(", "lambda", "role", ":", "role", "in", "super_roles", ",", ...
38
9.571429
def dispatch_line(self, frame): """Handle line action and return the next line callback.""" callback = TerminalPdb.dispatch_line(self, frame) # If the ipdb session ended, don't return a callback for the next line if self.stoplineno == -1: return None return callback
[ "def", "dispatch_line", "(", "self", ",", "frame", ")", ":", "callback", "=", "TerminalPdb", ".", "dispatch_line", "(", "self", ",", "frame", ")", "# If the ipdb session ended, don't return a callback for the next line", "if", "self", ".", "stoplineno", "==", "-", "...
34.666667
20.555556
def ensure_valid_environment_config(module_name, config): """Exit if config is invalid.""" if not config.get('namespace'): LOGGER.fatal("staticsite: module %s's environment configuration is " "missing a namespace definition!", module_name) sys.exit(1)
[ "def", "ensure_valid_environment_config", "(", "module_name", ",", "config", ")", ":", "if", "not", "config", ".", "get", "(", "'namespace'", ")", ":", "LOGGER", ".", "fatal", "(", "\"staticsite: module %s's environment configuration is \"", "\"missing a namespace definit...
44.428571
14.428571
def add_data_point(self, x, y): """Adds a data point to the series. :param x: The numerical x value to be added. :param y: The numerical y value to be added.""" if not is_numeric(x): raise TypeError("x value must be numeric, not '%s'" % str(x)) if not is_numeric(y): raise TypeError("y value must be numeric, not '%s'" % str(y)) current_last_x = self._data[-1][0] self._data.append((x, y)) if x < current_last_x: self._data = sorted(self._data, key=lambda k: k[0])
[ "def", "add_data_point", "(", "self", ",", "x", ",", "y", ")", ":", "if", "not", "is_numeric", "(", "x", ")", ":", "raise", "TypeError", "(", "\"x value must be numeric, not '%s'\"", "%", "str", "(", "x", ")", ")", "if", "not", "is_numeric", "(", "y", ...
39.5
16.5
def parse_uploaded_image(field): '''Parse an uploaded image and save into a db.ImageField()''' args = image_parser.parse_args() image = args['file'] if image.mimetype not in IMAGES_MIMETYPES: api.abort(400, 'Unsupported image format') bbox = args.get('bbox', None) if bbox: bbox = [int(float(c)) for c in bbox.split(',')] field.save(image, bbox=bbox)
[ "def", "parse_uploaded_image", "(", "field", ")", ":", "args", "=", "image_parser", ".", "parse_args", "(", ")", "image", "=", "args", "[", "'file'", "]", "if", "image", ".", "mimetype", "not", "in", "IMAGES_MIMETYPES", ":", "api", ".", "abort", "(", "40...
35
15.181818
def write_property(fh, key, value): """ Write a single property to the file in Java properties format. :param fh: a writable file-like object :param key: the key to write :param value: the value to write """ if key is COMMENT: write_comment(fh, value) return _require_string(key, 'keys') _require_string(value, 'values') fh.write(_escape_key(key)) fh.write(b'=') fh.write(_escape_value(value)) fh.write(b'\n')
[ "def", "write_property", "(", "fh", ",", "key", ",", "value", ")", ":", "if", "key", "is", "COMMENT", ":", "write_comment", "(", "fh", ",", "value", ")", "return", "_require_string", "(", "key", ",", "'keys'", ")", "_require_string", "(", "value", ",", ...
22.947368
16.315789
def determine_device(kal_out): """Extract and return device from scan results.""" device = "" while device == "": for line in kal_out.splitlines(): if "Using device " in line: device = str(line.split(' ', 2)[-1]) if device == "": device = None return device
[ "def", "determine_device", "(", "kal_out", ")", ":", "device", "=", "\"\"", "while", "device", "==", "\"\"", ":", "for", "line", "in", "kal_out", ".", "splitlines", "(", ")", ":", "if", "\"Using device \"", "in", "line", ":", "device", "=", "str", "(", ...
32
12
def repair(source, validate_archive=False): """Use auditwheel (https://github.com/pypa/auditwheel) to attempt and repair all wheels in a wagon. The repair process will: 1. Extract the wagon and its metadata 2. Repair all wheels 3. Update the metadata with the new wheel names and platform 4. Repack the wagon """ _assert_auditwheel_exists() logger.info('Repairing: %s', source) processed_source = get_source(source) metadata = _get_metadata(processed_source) new_metadata = _repair_wheels(processed_source, metadata) archive_name = _set_archive_name( new_metadata['package_name'], new_metadata['package_version'], new_metadata['supported_python_versions'], new_metadata['supported_platform'], new_metadata['build_tag']) _generate_metadata_file( processed_source, archive_name, new_metadata['supported_platform'], new_metadata['supported_python_versions'], new_metadata['package_name'], new_metadata['package_version'], new_metadata['build_tag'], new_metadata['package_source'], new_metadata['wheels']) archive_path = os.path.join(os.getcwd(), archive_name) _create_wagon_archive(processed_source, archive_path) if validate_archive: validate(archive_path) logger.info('Wagon created successfully at: %s', archive_path) return archive_path
[ "def", "repair", "(", "source", ",", "validate_archive", "=", "False", ")", ":", "_assert_auditwheel_exists", "(", ")", "logger", ".", "info", "(", "'Repairing: %s'", ",", "source", ")", "processed_source", "=", "get_source", "(", "source", ")", "metadata", "=...
33.380952
13.452381
def getSpktSpkid(cellGids=[], timeRange=None, allCells=False): '''return spike ids and times; with allCells=True just need to identify slice of time so can omit cellGids''' from .. import sim import pandas as pd try: # Pandas 0.24 and later from pandas import _lib as pandaslib except: # Pandas 0.23 and earlier from pandas import lib as pandaslib df = pd.DataFrame(pandaslib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid']) #df = pd.DataFrame(pd.lib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid']) if timeRange: min, max = [int(df['spkt'].searchsorted(timeRange[i])) for i in range(2)] # binary search faster than query else: # timeRange None or empty list means all times min, max = 0, len(df) if len(cellGids)==0 or allCells: # get all by either using flag or giving empty list -- can get rid of the flag sel = df[min:max] else: sel = df[min:max].query('spkid in @cellGids') return sel, sel['spkt'].tolist(), sel['spkid'].tolist()
[ "def", "getSpktSpkid", "(", "cellGids", "=", "[", "]", ",", "timeRange", "=", "None", ",", "allCells", "=", "False", ")", ":", "from", ".", ".", "import", "sim", "import", "pandas", "as", "pd", "try", ":", "# Pandas 0.24 and later", "from", "pandas", "im...
54.095238
32.952381
def old(self): """ The old value from the event. """ ori = self.original.action if isinstance(ori, ( types.ChannelAdminLogEventActionChangeAbout, types.ChannelAdminLogEventActionChangeTitle, types.ChannelAdminLogEventActionChangeUsername )): return ori.prev_value elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto): return ori.prev_photo elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet): return ori.prev_stickerset elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage): return ori.prev_message elif isinstance(ori, ( types.ChannelAdminLogEventActionParticipantToggleAdmin, types.ChannelAdminLogEventActionParticipantToggleBan )): return ori.prev_participant elif isinstance(ori, ( types.ChannelAdminLogEventActionToggleInvites, types.ChannelAdminLogEventActionTogglePreHistoryHidden, types.ChannelAdminLogEventActionToggleSignatures )): return not ori.new_value elif isinstance(ori, types.ChannelAdminLogEventActionDeleteMessage): return ori.message elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights): return ori.prev_banned_rights
[ "def", "old", "(", "self", ")", ":", "ori", "=", "self", ".", "original", ".", "action", "if", "isinstance", "(", "ori", ",", "(", "types", ".", "ChannelAdminLogEventActionChangeAbout", ",", "types", ".", "ChannelAdminLogEventActionChangeTitle", ",", "types", ...
44.25
17.9375
def get_views(self, app_id, include_standard_views=False): """ Get all of the views for the specified app :param app_id: the app containing the views :param include_standard_views: defaults to false. Set to true if you wish to include standard views. """ include_standard = "true" if include_standard_views is True else "false" return self.transport.GET(url='/view/app/{}/?include_standard_views={}'.format(app_id, include_standard))
[ "def", "get_views", "(", "self", ",", "app_id", ",", "include_standard_views", "=", "False", ")", ":", "include_standard", "=", "\"true\"", "if", "include_standard_views", "is", "True", "else", "\"false\"", "return", "self", ".", "transport", ".", "GET", "(", ...
53.555556
28.888889
def fw_int_to_hex(*args): """Pack integers into hex string. Use little-endian and unsigned int format. """ return binascii.hexlify( struct.pack('<{}H'.format(len(args)), *args)).decode('utf-8')
[ "def", "fw_int_to_hex", "(", "*", "args", ")", ":", "return", "binascii", ".", "hexlify", "(", "struct", ".", "pack", "(", "'<{}H'", ".", "format", "(", "len", "(", "args", ")", ")", ",", "*", "args", ")", ")", ".", "decode", "(", "'utf-8'", ")" ]
30.285714
14.571429
def prt_goids(self, goids=None, prtfmt=None, sortby=True, prt=sys.stdout): """Given GO IDs, print decriptive info about each GO Term.""" if goids is None: goids = self.go_sources nts = self.get_nts(goids, sortby) if prtfmt is None: prtfmt = self.prt_attr['fmta'] for ntgo in nts: key2val = ntgo._asdict() prt.write("{GO}\n".format(GO=prtfmt.format(**key2val))) return nts
[ "def", "prt_goids", "(", "self", ",", "goids", "=", "None", ",", "prtfmt", "=", "None", ",", "sortby", "=", "True", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "if", "goids", "is", "None", ":", "goids", "=", "self", ".", "go_sources", "nts", ...
41.545455
12.727273
def get_stream(self, channel): """Return the stream of the given channel :param channel: the channel that is broadcasting. Either name or models.Channel instance :type channel: :class:`str` | :class:`models.Channel` :returns: the stream or None, if the channel is offline :rtype: :class:`models.Stream` | None :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.kraken_request('GET', 'streams/' + channel) return models.Stream.wrap_get_stream(r)
[ "def", "get_stream", "(", "self", ",", "channel", ")", ":", "if", "isinstance", "(", "channel", ",", "models", ".", "Channel", ")", ":", "channel", "=", "channel", ".", "name", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'streams/'", "...
39.133333
15.8
def generate_single_kcorrection_listing( log, pathToOutputDirectory, pathToSpectralDatabase, model, restFrameFilter, redshift, temporalResolution=4.0): """ *Given a redshift generate a dictionary of k-correction polynomials for the MCS.* **Key Arguments:** - ``log`` -- logger - ``pathToOutputDirectory`` -- path to the output directory (provided by the user) - ``pathToSpectralDatabase`` -- path to the directory containing the spectral database - ``model`` -- name of the object/model required - ``restFrameFilter`` -- the filter to generate the K-corrections against - ``redshift`` -- the redshift at which to generate the k-corrections for - ``temporalResolution`` -- temporal resolution at which to calculate the k-correcions **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## import re import os import glob ## THIRD PARTY ## import yaml import pysynphot as syn ## LOCAL APPLICATION ## ################ >ACTION(S) ################ # GET THE PEAK MAGNITUDE DETAILS FROM YAML FILE fileName = pathToOutputDirectory + "/transient_light_curves.yaml" stream = file(fileName, 'r') generatedLCs = yaml.load(stream) filterData = generatedLCs[model] peakMag = generatedLCs[model][restFrameFilter]['Peak Magnitude'] peakTime = generatedLCs[model][restFrameFilter]['Peak Time in Spectra'] stream.close() pwd = os.getcwd() path = pathToSpectralDatabase + "/" + model + "/" title = "%s Objects" % (model,) os.chdir(path) spectrumFiles = [] for thisFile in glob.glob("*.spec"): thisFile = path + thisFile spectrumFiles.append(thisFile) os.chdir(pwd) ################ > VARIABLE SETTINGS ###### reTime = re.compile(r't((\+|\-)\d{3}\.\d{2})') ################ >ACTION(S) ################ # CREATE THE REQUIRED DIRECTORIES filters = ["g", "i", "r", "z"] for thisFilter in filters: strRed = "%0.3f" % (redshift,) try: log.debug("attempting to create directories") dataDir = pathToOutputDirectory + \ "/k_corrections/%s/%s" % (model, thisFilter) os.makedirs(dataDir) except Exception as e: log.debug( "could not create directories - failed with this error: %s " % (str(e),)) try: log.debug("attempting to clear the k-correction yaml file") fileName = dataDir + "/z" + \ str(strRed).replace(".", "pt") + ".yaml" stream = file(fileName, 'w') stream.close() except Exception as e: log.critical( "could not clear the k-correction yaml file - failed with this error: %s " % (str(e),)) return -1 timesList = [] fileDictionary = {} for thisFile in spectrumFiles: thisTime = float(reTime.search(thisFile).group(1)) fileDictionary[thisTime] = thisFile import collections ofileDictionary = collections.OrderedDict(sorted(fileDictionary.items())) nextTime = -999999999.9 for thisTime, thisFile in ofileDictionary.iteritems(): log.debug('thisTime: %(thisTime)s, thisFile: %(thisFile)s' % locals()) if thisTime < nextTime: log.debug('skipping the file: %(thisFile)s' % locals()) continue else: nextTime = thisTime + temporalResolution thisTime -= peakTime wavelengthArray, fluxArray = extract_spectra_from_file( log, thisFile) spRest = syn.ArraySpectrum( wave=wavelengthArray, flux=fluxArray, waveunits='angstrom', fluxunits='flam') try: log.debug("attempting to determine the rest %s-magnitude" % (restFrameFilter,)) if restFrameFilter in ["g", "r", "i", "z"]: obsmode = "sdss,%s" % (restFrameFilter,) else: obsmode = restFrameFilter gRest = calcphot( log, wavelengthArray=wavelengthArray, fluxArray=fluxArray, obsmode=obsmode ) except Exception as e: if "Integrated flux is <= 0" in str(e): log.warning( "could not determine the rest-magnitude using calcphot - filter, model, time, file %s, %s, %s, %s - failed with this error: %s " % (restFrameFilter, model, thisTime, thisFile, str(e),)) continue elif "Spectrum and bandpass do not fully overlap" in str(e): log.warning( "could not determine the rest-magnitude using calcphot - filter, model, time, file %s, %s, %s, %s - failed with this error: %s " % (restFrameFilter, model, thisTime, thisFile, str(e),)) continue else: log.warning( "could not determine the rest-magnitude using calcphot - filter, model, time, file %s, %s, %s, %s - failed with this error: %s " % (restFrameFilter, model, thisTime, thisFile, str(e),)) pass for thisFilter in filters: strRed = "%0.3f" % (redshift,) spRest.convert('photnu') spObs = spRest.redshift(redshift) spObs.convert('flam') spRest.convert('flam') dataDir = pathToOutputDirectory + \ "/k_corrections/%s/%s" % (model, thisFilter) try: log.debug( "attempting to open the yaml file to append k-correction data") fileName = dataDir + "/z" + \ str(strRed).replace(".", "pt") + ".yaml" stream = file(fileName, 'a') except Exception as e: log.critical( "could not open the yaml file to append k-correction data - failed with this error: %s " % (str(e),)) return -1 try: log.debug( "attempting to determine the magnitude of the object using calcphot - redshift, filter, model %s, %s, %s" % (strRed, thisFilter, model)) if thisFilter in ["g", "r", "i", "z"]: obsmode = "sdss,%s" % (thisFilter,) else: obsmode = thisFilter filterObs = calcphot( log, wavelengthArray=spObs.wave, fluxArray=spObs.flux, obsmode=obsmode ) except Exception as e: if "Integrated flux is <= 0" in str(e): log.warning( "could not determine the magnitude of the object using calcphot - redshift, filter, model %s, %s, %s - failed with this error: %s " % (strRed, thisFilter, model, str(e),)) continue elif "Spectrum and bandpass do not fully overlap" in str(e): log.warning( "could not determine the magnitude of the object using calcphot - redshift, filter, model %s, %s, %s - failed with this error: %s " % (strRed, thisFilter, model, str(e),)) continue elif "disjoint" in str(e): log.warning( "could not determine the magnitude of the object using calcphot - redshift, filter, model %s, %s, %s - failed with this error: %s " % (strRed, thisFilter, model, str(e),)) continue else: log.warning( "could not determine the magnitude of the object using calcphot - redshift, filter, model %s, %s, %s - failed with this error: %s " % (strRed, thisFilter, model, str(e),)) pass kCor = gRest - filterObs kcName = 'K_%s%s' % (restFrameFilter, thisFilter,) thisKcor = {} thisKcor["Rest Frame Days"] = thisTime thisKcor[kcName] = kCor yamlList = [thisKcor] yaml.dump(yamlList, stream, default_flow_style=False) stream.close() return
[ "def", "generate_single_kcorrection_listing", "(", "log", ",", "pathToOutputDirectory", ",", "pathToSpectralDatabase", ",", "model", ",", "restFrameFilter", ",", "redshift", ",", "temporalResolution", "=", "4.0", ")", ":", "################ > IMPORTS ################", "## ...
42.463054
22.817734
def _upload_part(api, session, url, upload, part_number, part, retry_count, timeout): """ Used by the worker to upload a part to the storage service. :param api: Api instance. :param session: Storage service session. :param url: Part url. :param upload: Upload identifier. :param part_number: Part number. :param part: Part data. :param retry_count: Number of times to retry. :param timeout: Timeout for storage session. """ part_url = retry(retry_count)(_get_part_url)( api, url, upload, part_number ) e_tag = retry(retry_count)(_submit_part)( session, part_url, part, timeout ) retry(retry_count)(_report_part)(api, url, upload, part_number, e_tag)
[ "def", "_upload_part", "(", "api", ",", "session", ",", "url", ",", "upload", ",", "part_number", ",", "part", ",", "retry_count", ",", "timeout", ")", ":", "part_url", "=", "retry", "(", "retry_count", ")", "(", "_get_part_url", ")", "(", "api", ",", ...
33.090909
15.454545
def main(): """Function to remove temperature effect from field data """ options = handle_options() # read in observed and synthetic data elecs, d_obs = readin_volt(options.d_obs) elecs, d_est = readin_volt(options.d_est) elecs, d_estTC = readin_volt(options.d_estTC) # calculate corrected data volt_corr = calc_correction(d_obs, d_est, d_estTC, ) # save data save_volt(elecs, volt_corr, options.output, )
[ "def", "main", "(", ")", ":", "options", "=", "handle_options", "(", ")", "# read in observed and synthetic data", "elecs", ",", "d_obs", "=", "readin_volt", "(", "options", ".", "d_obs", ")", "elecs", ",", "d_est", "=", "readin_volt", "(", "options", ".", "...
30
11.421053
def revoke_group_permission( self, group_name, source_group_name, source_group_owner_id): """ This is a convenience function that wraps the "authorize group" functionality of the C{authorize_security_group} method. For an explanation of the parameters, see C{revoke_security_group}. """ d = self.revoke_security_group( group_name, source_group_name=source_group_name, source_group_owner_id=source_group_owner_id) return d
[ "def", "revoke_group_permission", "(", "self", ",", "group_name", ",", "source_group_name", ",", "source_group_owner_id", ")", ":", "d", "=", "self", ".", "revoke_security_group", "(", "group_name", ",", "source_group_name", "=", "source_group_name", ",", "source_grou...
39.230769
18.153846
def sha1(self): """ :return: The SHA1 hash of the DER-encoded bytes of this public key info """ if self._sha1 is None: self._sha1 = hashlib.sha1(byte_cls(self['public_key'])).digest() return self._sha1
[ "def", "sha1", "(", "self", ")", ":", "if", "self", ".", "_sha1", "is", "None", ":", "self", ".", "_sha1", "=", "hashlib", ".", "sha1", "(", "byte_cls", "(", "self", "[", "'public_key'", "]", ")", ")", ".", "digest", "(", ")", "return", "self", "...
28.666667
20.444444
def use_plenary_sequence_rule_enabler_rule_view(self): """Pass through to provider SequenceRuleEnablerRuleLookupSession.use_plenary_sequence_rule_enabler_rule_view""" self._object_views['sequence_rule_enabler_rule'] = PLENARY # self._get_provider_session('sequence_rule_enabler_rule_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_sequence_rule_enabler_rule_view() except AttributeError: pass
[ "def", "use_plenary_sequence_rule_enabler_rule_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'sequence_rule_enabler_rule'", "]", "=", "PLENARY", "# self._get_provider_session('sequence_rule_enabler_rule_lookup_session') # To make sure the session is tracked", "for",...
61.111111
23.444444
def _check_array_parms(is_array, array_size, value, element_kind, element_name): # pylint: disable=unused-argument # The array_size argument is unused. """ Check whether array-related parameters are ok. """ # The following case has been disabled because it cannot happen given # how this check function is used: # if array_size and is_array is False: # raise ValueError( # _format("The array_size parameter of {0} {1!A} is {2!A} but the " # "is_array parameter is False.", # element_kind, element_name, array_size)) if value is not None: value_is_array = isinstance(value, (list, tuple)) if not is_array and value_is_array: raise ValueError( _format("The is_array parameter of {0} {1!A} is False but " "value {2!A} is an array.", element_kind, element_name, value)) if is_array and not value_is_array: raise ValueError( _format("The is_array parameter of {0} {1!A} is True but " "value {2!A} is not an array.", element_kind, element_name, value))
[ "def", "_check_array_parms", "(", "is_array", ",", "array_size", ",", "value", ",", "element_kind", ",", "element_name", ")", ":", "# pylint: disable=unused-argument", "# The array_size argument is unused.", "# The following case has been disabled because it cannot happen given", "...
43.428571
15.642857
def create(handler, item_document): """Create a new item from a JSON document""" data = {'operation': 'create', 'item': json.load(item_document)} handler.invoke(data)
[ "def", "create", "(", "handler", ",", "item_document", ")", ":", "data", "=", "{", "'operation'", ":", "'create'", ",", "'item'", ":", "json", ".", "load", "(", "item_document", ")", "}", "handler", ".", "invoke", "(", "data", ")" ]
37.2
6.4
def Operation(self, x, y): """Whether x is fully contained in y.""" if x in y: return True # x might be an iterable # first we need to skip strings or we'll do silly things # pylint: disable=consider-merging-isinstance if isinstance(x, py2to3.STRING_TYPES) or isinstance(x, bytes): return False try: for value in x: if value not in y: return False return True except TypeError: return False
[ "def", "Operation", "(", "self", ",", "x", ",", "y", ")", ":", "if", "x", "in", "y", ":", "return", "True", "# x might be an iterable", "# first we need to skip strings or we'll do silly things", "# pylint: disable=consider-merging-isinstance", "if", "isinstance", "(", ...
25.277778
21.055556
def from_xdr(cls, xdr): """Create an :class:`Asset` object from its base64 encoded XDR representation. :param bytes xdr: The base64 encoded XDR Asset object. :return: A new :class:`Asset` object from its encoded XDR representation. """ xdr_decoded = base64.b64decode(xdr) asset = Xdr.StellarXDRUnpacker(xdr_decoded) asset_xdr_object = asset.unpack_Asset() asset = Asset.from_xdr_object(asset_xdr_object) return asset
[ "def", "from_xdr", "(", "cls", ",", "xdr", ")", ":", "xdr_decoded", "=", "base64", ".", "b64decode", "(", "xdr", ")", "asset", "=", "Xdr", ".", "StellarXDRUnpacker", "(", "xdr_decoded", ")", "asset_xdr_object", "=", "asset", ".", "unpack_Asset", "(", ")", ...
33.133333
18
def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): """Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two batch of data, usually be used for binary image segmentation i.e. labels are binary. The coefficient between 0 to 1, 1 if totally match. Parameters ----------- output : tensor A distribution with shape: [batch_size, ....], (any dimensions). target : tensor The target distribution, format the same with `output`. threshold : float The threshold value to be true. axis : tuple of integer All dimensions are reduced, default ``(1,2,3)``. smooth : float This small value will be added to the numerator and denominator, see ``dice_coe``. References ----------- - `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__ """ output = tf.cast(output > threshold, dtype=tf.float32) target = tf.cast(target > threshold, dtype=tf.float32) inse = tf.reduce_sum(tf.multiply(output, target), axis=axis) l = tf.reduce_sum(output, axis=axis) r = tf.reduce_sum(target, axis=axis) # old axis=[0,1,2,3] # hard_dice = 2 * (inse) / (l + r) # epsilon = 1e-5 # hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon) # new haodong hard_dice = (2. * inse + smooth) / (l + r + smooth) ## hard_dice = tf.reduce_mean(hard_dice, name='hard_dice') return hard_dice
[ "def", "dice_hard_coe", "(", "output", ",", "target", ",", "threshold", "=", "0.5", ",", "axis", "=", "(", "1", ",", "2", ",", "3", ")", ",", "smooth", "=", "1e-5", ")", ":", "output", "=", "tf", ".", "cast", "(", "output", ">", "threshold", ",",...
38.756757
22.027027
def _parse_endofnames(client, command, actor, args): """Parse an ENDOFNAMES and dispatch a NAMES event for the channel.""" args = args.split(" :", 1)[0] # Strip off human-readable message _, _, channel = args.rpartition(' ') channel = client.server.get_channel(channel) or channel.lower() client.dispatch_event('MEMBERS', channel)
[ "def", "_parse_endofnames", "(", "client", ",", "command", ",", "actor", ",", "args", ")", ":", "args", "=", "args", ".", "split", "(", "\" :\"", ",", "1", ")", "[", "0", "]", "# Strip off human-readable message", "_", ",", "_", ",", "channel", "=", "a...
57.5
12
def GetReportDownloadHeaders(self, **kwargs): """Returns a dictionary of headers for a report download request. Note that the given keyword arguments will override any settings configured from the googleads.yaml file. Args: **kwargs: Optional keyword arguments. Keyword Arguments: client_customer_id: A string containing a client_customer_id intended to override the default value set by the AdWordsClient. include_zero_impressions: A boolean indicating whether the report should show rows with zero impressions. skip_report_header: A boolean indicating whether to include a header row containing the report name and date range. If false or not specified, report output will include the header row. skip_column_header: A boolean indicating whether to include column names in reports. If false or not specified, report output will include the column names. skip_report_summary: A boolean indicating whether to include a summary row containing the report totals. If false or not specified, report output will include the summary row. use_raw_enum_values: A boolean indicating whether to return enum field values as enums instead of display values. Returns: A dictionary containing the headers configured for downloading a report. Raises: GoogleAdsValueError: If one or more of the report header keyword arguments is invalid. """ headers = self._adwords_client.oauth2_client.CreateHttpHeader() headers.update({ 'Content-type': self._CONTENT_TYPE, 'developerToken': str(self._adwords_client.developer_token), 'clientCustomerId': str(kwargs.get( 'client_customer_id', self._adwords_client.client_customer_id)), 'User-Agent': ''.join([ self._adwords_client.user_agent, googleads.common.GenerateLibSig(self._PRODUCT_SIG), ',gzip']) }) headers.update(self.custom_http_headers) updated_kwargs = dict(self._adwords_client.report_download_headers) updated_kwargs.update(kwargs) for kw in updated_kwargs: try: headers[_REPORT_HEADER_KWARGS[kw]] = str(updated_kwargs[kw]) except KeyError: raise googleads.errors.GoogleAdsValueError( 'The provided keyword "%s" is invalid. Accepted keywords are: %s' % (kw, _REPORT_HEADER_KWARGS.keys())) return headers
[ "def", "GetReportDownloadHeaders", "(", "self", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "self", ".", "_adwords_client", ".", "oauth2_client", ".", "CreateHttpHeader", "(", ")", "headers", ".", "update", "(", "{", "'Content-type'", ":", "self", "."...
41.948276
24.155172
def get_current_word_and_position(self, completion=False): """Return current word, i.e. word at cursor position, and the start position""" cursor = self.textCursor() if cursor.hasSelection(): # Removes the selection and moves the cursor to the left side # of the selection: this is required to be able to properly # select the whole word under cursor (otherwise, the same word is # not selected when the cursor is at the right side of it): cursor.setPosition(min([cursor.selectionStart(), cursor.selectionEnd()])) else: # Checks if the first character to the right is a white space # and if not, moves the cursor one word to the left (otherwise, # if the character to the left do not match the "word regexp" # (see below), the word to the left of the cursor won't be # selected), but only if the first character to the left is not a # white space too. def is_space(move): curs = self.textCursor() curs.movePosition(move, QTextCursor.KeepAnchor) return not to_text_string(curs.selectedText()).strip() if not completion: if is_space(QTextCursor.NextCharacter): if is_space(QTextCursor.PreviousCharacter): return cursor.movePosition(QTextCursor.WordLeft) else: def is_special_character(move): curs = self.textCursor() curs.movePosition(move, QTextCursor.KeepAnchor) text_cursor = to_text_string(curs.selectedText()).strip() return len(re.findall(r'([^\d\W]\w*)', text_cursor, re.UNICODE)) == 0 if is_space(QTextCursor.PreviousCharacter): return if (is_special_character(QTextCursor.NextCharacter)): cursor.movePosition(QTextCursor.WordLeft) cursor.select(QTextCursor.WordUnderCursor) text = to_text_string(cursor.selectedText()) # find a valid python variable name match = re.findall(r'([^\d\W]\w*)', text, re.UNICODE) if match: return match[0], cursor.selectionStart()
[ "def", "get_current_word_and_position", "(", "self", ",", "completion", "=", "False", ")", ":", "cursor", "=", "self", ".", "textCursor", "(", ")", "if", "cursor", ".", "hasSelection", "(", ")", ":", "# Removes the selection and moves the cursor to the left side\r", ...
52.304348
20.869565
def get_access_token(self, code, state=None): ''' In callback url: http://host/callback?code=123&state=xyz use code and state to get an access token. ''' kw = dict(client_id=self._client_id, client_secret=self._client_secret, code=code) if self._redirect_uri: kw['redirect_uri'] = self._redirect_uri if state: kw['state'] = state opener = build_opener(HTTPSHandler) request = Request('https://github.com/login/oauth/access_token', data=_encode_params(kw)) request.get_method = _METHOD_MAP['POST'] request.add_header('Accept', 'application/json') try: response = opener.open(request, timeout=TIMEOUT) r = _parse_json(response.read()) if 'error' in r: raise ApiAuthError(str(r.error)) return str(r.access_token) except HTTPError as e: raise ApiAuthError('HTTPError when get access token')
[ "def", "get_access_token", "(", "self", ",", "code", ",", "state", "=", "None", ")", ":", "kw", "=", "dict", "(", "client_id", "=", "self", ".", "_client_id", ",", "client_secret", "=", "self", ".", "_client_secret", ",", "code", "=", "code", ")", "if"...
42.130435
18.826087
def check_signature(signature, key, data): """Compute the HMAC signature and test against a given hash.""" if isinstance(key, type(u'')): key = key.encode() digest = 'sha1=' + hmac.new(key, data, hashlib.sha1).hexdigest() # Covert everything to byte sequences if isinstance(digest, type(u'')): digest = digest.encode() if isinstance(signature, type(u'')): signature = signature.encode() return werkzeug.security.safe_str_cmp(digest, signature)
[ "def", "check_signature", "(", "signature", ",", "key", ",", "data", ")", ":", "if", "isinstance", "(", "key", ",", "type", "(", "u''", ")", ")", ":", "key", "=", "key", ".", "encode", "(", ")", "digest", "=", "'sha1='", "+", "hmac", ".", "new", ...
34.642857
14.571429
def findparent(self, inputtemplates): """Find the most suitable parent, that is: the first matching unique/multi inputtemplate""" for inputtemplate in inputtemplates: if self.unique == inputtemplate.unique: return inputtemplate.id return None
[ "def", "findparent", "(", "self", ",", "inputtemplates", ")", ":", "for", "inputtemplate", "in", "inputtemplates", ":", "if", "self", ".", "unique", "==", "inputtemplate", ".", "unique", ":", "return", "inputtemplate", ".", "id", "return", "None" ]
48.166667
6.666667
def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. """ version = self.get_metadata_version() if six.PY2: def write_field(key, value): file.write("%s: %s\n" % (key, self._encode_field(value))) else: def write_field(key, value): file.write("%s: %s\n" % (key, value)) write_field('Metadata-Version', str(version)) write_field('Name', self.get_name()) write_field('Version', self.get_version()) write_field('Summary', self.get_description()) write_field('Home-page', self.get_url()) if version < StrictVersion('1.2'): write_field('Author', self.get_contact()) write_field('Author-email', self.get_contact_email()) else: optional_fields = ( ('Author', 'author'), ('Author-email', 'author_email'), ('Maintainer', 'maintainer'), ('Maintainer-email', 'maintainer_email'), ) for field, attr in optional_fields: attr_val = getattr(self, attr) if attr_val is not None: write_field(field, attr_val) write_field('License', self.get_license()) if self.download_url: write_field('Download-URL', self.download_url) for project_url in self.project_urls.items(): write_field('Project-URL', '%s, %s' % project_url) long_desc = rfc822_escape(self.get_long_description()) write_field('Description', long_desc) keywords = ','.join(self.get_keywords()) if keywords: write_field('Keywords', keywords) if version >= StrictVersion('1.2'): for platform in self.get_platforms(): write_field('Platform', platform) else: self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) # PEP 314 self._write_list(file, 'Requires', self.get_requires()) self._write_list(file, 'Provides', self.get_provides()) self._write_list(file, 'Obsoletes', self.get_obsoletes()) # Setuptools specific for PEP 345 if hasattr(self, 'python_requires'): write_field('Requires-Python', self.python_requires) # PEP 566 if self.long_description_content_type: write_field( 'Description-Content-Type', self.long_description_content_type ) if self.provides_extras: for extra in self.provides_extras: write_field('Provides-Extra', extra)
[ "def", "write_pkg_file", "(", "self", ",", "file", ")", ":", "version", "=", "self", ".", "get_metadata_version", "(", ")", "if", "six", ".", "PY2", ":", "def", "write_field", "(", "key", ",", "value", ")", ":", "file", ".", "write", "(", "\"%s: %s\\n\...
32.72973
16.567568
def dry_lapse(pressure, temperature, ref_pressure=None): r"""Calculate the temperature at a level assuming only dry processes. This function lifts a parcel starting at `temperature`, conserving potential temperature. The starting pressure can be given by `ref_pressure`. Parameters ---------- pressure : `pint.Quantity` The atmospheric pressure level(s) of interest temperature : `pint.Quantity` The starting temperature ref_pressure : `pint.Quantity`, optional The reference pressure. If not given, it defaults to the first element of the pressure array. Returns ------- `pint.Quantity` The resulting parcel temperature at levels given by `pressure` See Also -------- moist_lapse : Calculate parcel temperature assuming liquid saturation processes parcel_profile : Calculate complete parcel profile potential_temperature """ if ref_pressure is None: ref_pressure = pressure[0] return temperature * (pressure / ref_pressure)**mpconsts.kappa
[ "def", "dry_lapse", "(", "pressure", ",", "temperature", ",", "ref_pressure", "=", "None", ")", ":", "if", "ref_pressure", "is", "None", ":", "ref_pressure", "=", "pressure", "[", "0", "]", "return", "temperature", "*", "(", "pressure", "/", "ref_pressure", ...
32.96875
22.65625
def require_auth(request: Request, exceptions: bool=True) -> User: """ Returns authenticated User. :param request: HttpRequest :param exceptions: Raise (NotAuthenticated) exception. Default is True. :return: User """ if not request.user or not request.user.is_authenticated: if exceptions: raise NotAuthenticated() return None return request.user
[ "def", "require_auth", "(", "request", ":", "Request", ",", "exceptions", ":", "bool", "=", "True", ")", "->", "User", ":", "if", "not", "request", ".", "user", "or", "not", "request", ".", "user", ".", "is_authenticated", ":", "if", "exceptions", ":", ...
32.916667
15.25
def write_output(output_report, sample_name, multi_positions, genus, percent_contam, contam_stddev, total_gene_length, database_download_date, snp_cutoff=3, cgmlst=None): """ Function that writes the output generated by ConFindr to a report file. Appends to a file that already exists, or creates the file if it doesn't already exist. :param output_report: Path to CSV output report file. Should have headers SampleName,Genus,NumContamSNVs, ContamStatus,PercentContam, and PercentContamStandardDeviation, in that order. :param sample_name: string - name of sample :param multi_positions: integer - number of positions that were found to have more than one base present. :param genus: string - The genus of your sample :param percent_contam: float - Estimated percentage contamination :param contam_stddev: float - Standard deviation of percentage contamination :param total_gene_length: integer - number of bases examined to make a contamination call. :param cgmlst: If None, means that rMLST database was used, so use rMLST snp cutoff. Otherwise, some sort of cgMLST database was used, so use a different cutoff. """ # If the report file hasn't been created, make it, with appropriate header. if not os.path.isfile(output_report): with open(os.path.join(output_report), 'w') as f: f.write('Sample,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamStandardDeviation,BasesExamined,DatabaseDownloadDate\n') if multi_positions >= snp_cutoff or len(genus.split(':')) > 1: contaminated = True else: contaminated = False with open(output_report, 'a+') as f: f.write('{samplename},{genus},{numcontamsnvs},' '{contamstatus},{percent_contam},{contam_stddev},' '{gene_length},{database_download_date}\n'.format(samplename=sample_name, genus=genus, numcontamsnvs=multi_positions, contamstatus=contaminated, percent_contam=percent_contam, contam_stddev=contam_stddev, gene_length=total_gene_length, database_download_date=database_download_date))
[ "def", "write_output", "(", "output_report", ",", "sample_name", ",", "multi_positions", ",", "genus", ",", "percent_contam", ",", "contam_stddev", ",", "total_gene_length", ",", "database_download_date", ",", "snp_cutoff", "=", "3", ",", "cgmlst", "=", "None", ")...
70.583333
37.75
def _at_function(self, calculator, rule, scope, block): """ Implements @mixin and @function """ if not block.argument: raise SyntaxError("%s requires a function name (%s)" % (block.directive, rule.file_and_line)) funct, argspec_node = self._get_funct_def(rule, calculator, block.argument) defaults = {} new_params = [] for var_name, default in argspec_node.iter_def_argspec(): new_params.append(var_name) if default is not None: defaults[var_name] = default # TODO a function or mixin is re-parsed every time it's called; there's # no AST for anything but expressions :( mixin = [rule.source_file, block.lineno, block.unparsed_contents, rule.namespace, argspec_node, rule.source_file] if block.directive == '@function': def _call(mixin): def __call(namespace, *args, **kwargs): source_file = mixin[0] lineno = mixin[1] m_codestr = mixin[2] pristine_callee_namespace = mixin[3] callee_namespace = pristine_callee_namespace.derive() # TODO CallOp converts Sass names to Python names, so we # have to convert them back to Sass names. would be nice # to avoid this back-and-forth somehow kwargs = OrderedDict( (normalize_var('$' + key), value) for (key, value) in kwargs.items()) self._populate_namespace_from_call( "Function {0}".format(funct), callee_namespace, mixin, args, kwargs) _rule = SassRule( source_file=source_file, lineno=lineno, unparsed_contents=m_codestr, namespace=callee_namespace, # rule import_key=rule.import_key, legacy_compiler_options=rule.legacy_compiler_options, options=rule.options, properties=rule.properties, extends_selectors=rule.extends_selectors, ancestry=rule.ancestry, nested=rule.nested, ) # TODO supposed to throw an error if there's a slurpy arg # but keywords() is never called on it try: self.manage_children(_rule, scope) except SassReturn as e: return e.retval else: return Null() __call._pyscss_needs_namespace = True return __call _mixin = _call(mixin) _mixin.mixin = mixin mixin = _mixin if block.directive == '@mixin': add = rule.namespace.set_mixin elif block.directive == '@function': add = rule.namespace.set_function # Register the mixin for every possible arity it takes if argspec_node.slurp or argspec_node.inject: add(funct, None, mixin) else: while len(new_params): add(funct, len(new_params), mixin) param = new_params.pop() if param not in defaults: break if not new_params: add(funct, 0, mixin)
[ "def", "_at_function", "(", "self", ",", "calculator", ",", "rule", ",", "scope", ",", "block", ")", ":", "if", "not", "block", ".", "argument", ":", "raise", "SyntaxError", "(", "\"%s requires a function name (%s)\"", "%", "(", "block", ".", "directive", ",...
41.188235
17.023529
def _ls_sites(path): """ List only sites in the domain_sites() to ensure we co-exist with other projects """ with cd(path): sites = run('ls').split('\n') doms = [d.name for d in domain_sites()] dom_sites = [] for s in sites: ds = s.split('-')[0] ds = ds.replace('_','.') if ds in doms and s not in dom_sites: dom_sites.append(s) return dom_sites
[ "def", "_ls_sites", "(", "path", ")", ":", "with", "cd", "(", "path", ")", ":", "sites", "=", "run", "(", "'ls'", ")", ".", "split", "(", "'\\n'", ")", "doms", "=", "[", "d", ".", "name", "for", "d", "in", "domain_sites", "(", ")", "]", "dom_si...
31.214286
12.642857
def CreateSession( cls, artifact_filter_names=None, command_line_arguments=None, debug_mode=False, filter_file_path=None, preferred_encoding='utf-8', preferred_time_zone=None, preferred_year=None): """Creates a session attribute container. Args: artifact_filter_names (Optional[list[str]]): names of artifact definitions that are used for filtering file system and Windows Registry key paths. command_line_arguments (Optional[str]): the command line arguments. debug_mode (bool): True if debug mode was enabled. filter_file_path (Optional[str]): path to a file with find specifications. preferred_encoding (Optional[str]): preferred encoding. preferred_time_zone (Optional[str]): preferred time zone. preferred_year (Optional[int]): preferred year. Returns: Session: session attribute container. """ session = sessions.Session() session.artifact_filters = artifact_filter_names session.command_line_arguments = command_line_arguments session.debug_mode = debug_mode session.filter_file = filter_file_path session.preferred_encoding = preferred_encoding session.preferred_time_zone = preferred_time_zone session.preferred_year = preferred_year return session
[ "def", "CreateSession", "(", "cls", ",", "artifact_filter_names", "=", "None", ",", "command_line_arguments", "=", "None", ",", "debug_mode", "=", "False", ",", "filter_file_path", "=", "None", ",", "preferred_encoding", "=", "'utf-8'", ",", "preferred_time_zone", ...
40.935484
20.935484
def add_mapping(self, data, name=None): """ Add a new mapping """ from .mappings import DocumentObjectField from .mappings import NestedObject from .mappings import ObjectField if isinstance(data, (DocumentObjectField, ObjectField, NestedObject)): self.mappings[data.name] = data.as_dict() return if name: self.mappings[name] = data return if isinstance(data, dict): self.mappings.update(data) elif isinstance(data, list): for d in data: if isinstance(d, dict): self.mappings.update(d) elif isinstance(d, DocumentObjectField): self.mappings[d.name] = d.as_dict() else: name, data = d self.add_mapping(data, name)
[ "def", "add_mapping", "(", "self", ",", "data", ",", "name", "=", "None", ")", ":", "from", ".", "mappings", "import", "DocumentObjectField", "from", ".", "mappings", "import", "NestedObject", "from", ".", "mappings", "import", "ObjectField", "if", "isinstance...
35.115385
12.346154
def query_pre_approvals(self, initial_date, final_date, page=None, max_results=None): """ query pre-approvals by date range """ last_page = False results = [] while last_page is False: search_result = self._consume_query_pre_approvals( initial_date, final_date, page, max_results) results.extend(search_result.pre_approvals) if search_result.current_page is None or \ search_result.total_pages is None or \ search_result.current_page == search_result.total_pages: last_page = True else: page = search_result.current_page + 1 return results
[ "def", "query_pre_approvals", "(", "self", ",", "initial_date", ",", "final_date", ",", "page", "=", "None", ",", "max_results", "=", "None", ")", ":", "last_page", "=", "False", "results", "=", "[", "]", "while", "last_page", "is", "False", ":", "search_r...
42.235294
17.117647
def master_open(): """master_open() -> (master_fd, slave_name) Open a pty master and return the fd, and the filename of the slave end. Deprecated, use openpty() instead.""" try: master_fd, slave_fd = os.openpty() except (AttributeError, OSError): pass else: slave_name = os.ttyname(slave_fd) os.close(slave_fd) return master_fd, slave_name return _open_terminal()
[ "def", "master_open", "(", ")", ":", "try", ":", "master_fd", ",", "slave_fd", "=", "os", ".", "openpty", "(", ")", "except", "(", "AttributeError", ",", "OSError", ")", ":", "pass", "else", ":", "slave_name", "=", "os", ".", "ttyname", "(", "slave_fd"...
27.933333
17.666667
def post(self, request, bot_id, format=None): """ Add a new chat state --- serializer: KikChatStateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request """ return super(KikChatStateList, self).post(request, bot_id, format)
[ "def", "post", "(", "self", ",", "request", ",", "bot_id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "KikChatStateList", ",", "self", ")", ".", "post", "(", "request", ",", "bot_id", ",", "format", ")" ]
31.083333
10.916667
def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response
[ "def", "destroy", "(", "name", ",", "call", "=", "None", ")", ":", "if", "call", "==", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The destroy action must be called with -d, --destroy, '", "'-a or --action.'", ")", "__utils__", "[", "'cloud.fire_event'", ...
25.7
21.85
def list_cluster_custom_object(self, group, version, plural, **kwargs): # noqa: E501 """list_cluster_custom_object # noqa: E501 list or watch cluster scoped custom objects # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_cluster_custom_object(group, version, plural, async_req=True) >>> result = thread.get() :param async_req bool :param str group: The custom resource's group name (required) :param str version: The custom resource's version (required) :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str pretty: If 'true', then the output is pretty printed. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_cluster_custom_object_with_http_info(group, version, plural, **kwargs) # noqa: E501 else: (data) = self.list_cluster_custom_object_with_http_info(group, version, plural, **kwargs) # noqa: E501 return data
[ "def", "list_cluster_custom_object", "(", "self", ",", "group", ",", "version", ",", "plural", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")"...
77.5
49.857143
def load(*fps, missing=Missing.silent): """ Read a `.Configuration` instance from file-like objects. :param fps: file-like objects (supporting ``.read()``) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fps* :rtype: `.Configuration` """ return Configuration(*(yaml.safe_load(fp.read()) for fp in fps), missing=missing)
[ "def", "load", "(", "*", "fps", ",", "missing", "=", "Missing", ".", "silent", ")", ":", "return", "Configuration", "(", "*", "(", "yaml", ".", "safe_load", "(", "fp", ".", "read", "(", ")", ")", "for", "fp", "in", "fps", ")", ",", "missing", "="...
43.727273
19.363636
def _to_java_object_rdd(self): """ Return a JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = self._pickled() return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
[ "def", "_to_java_object_rdd", "(", "self", ")", ":", "rdd", "=", "self", ".", "_pickled", "(", ")", "return", "self", ".", "ctx", ".", "_jvm", ".", "SerDeUtil", ".", "pythonToJava", "(", "rdd", ".", "_jrdd", ",", "True", ")" ]
39.625
17
def classes(self, set_uri_or_id=None, nestedhierarchy=False): """Returns a dictionary of classes for the specified (sub)set (if None, default, the main set is selected)""" if set_uri_or_id and set_uri_or_id.startswith(('http://','https://')): set_uri = set_uri_or_id else: set_uri = self.get_set_uri(set_uri_or_id) assert set_uri is not None classes= {} uri2idmap = {} for row in self.graph.query("SELECT ?classuri ?classid ?classlabel ?parentclass ?seqnr WHERE { ?classuri rdf:type skos:Concept ; skos:notation ?classid. <" + str(set_uri) + "> skos:member ?classuri . OPTIONAL { ?classuri skos:prefLabel ?classlabel } OPTIONAL { ?classuri skos:narrower ?parentclass } OPTIONAL { ?classuri fsd:sequenceNumber ?seqnr } }"): classinfo = {'uri': str(row.classuri), 'id': str(row.classid),'label': str(row.classlabel) if row.classlabel else "" } if nestedhierarchy: uri2idmap[str(row.classuri)] = str(row.classid) if row.parentclass: classinfo['parentclass'] = str(row.parentclass) #uri if row.seqnr: classinfo['seqnr'] = int(row.seqnr) classes[str(row.classid)] = classinfo if nestedhierarchy: #build hierarchy removekeys = [] for classid, classinfo in classes.items(): if 'parentclass' in classinfo: removekeys.append(classid) parentclassid = uri2idmap[classinfo['parentclass']] if 'subclasses' not in classes[parentclassid]: classes[parentclassid]['subclasses'] = {} classes[parentclassid]['subclasses'][classid] = classinfo for key in removekeys: del classes[key] return classes
[ "def", "classes", "(", "self", ",", "set_uri_or_id", "=", "None", ",", "nestedhierarchy", "=", "False", ")", ":", "if", "set_uri_or_id", "and", "set_uri_or_id", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ")", ")", ":", "set_uri", "=", "se...
54.088235
29.470588
def cake(return_X_y=True): """cake dataset Parameters ---------- return_X_y : bool, if True, returns a model-ready tuple of data (X, y) otherwise, returns a Pandas DataFrame Returns ------- model-ready tuple of data (X, y) OR Pandas DataFrame Notes ----- X contains the category of recipe used transformed to an integer, the catergory of replicate, and the temperatue. y contains the angle at which the cake broke. Source: https://vincentarelbundock.github.io/Rdatasets/doc/lme4/cake.html """ # y is real # recommend LinearGAM cake = pd.read_csv(PATH + '/cake.csv', index_col=0) if return_X_y: X = cake[['recipe', 'replicate', 'temperature']].values X[:,0] = np.unique(cake.values[:,1], return_inverse=True)[1] X[:,1] -= 1 y = cake['angle'].values return _clean_X_y(X, y) return cake
[ "def", "cake", "(", "return_X_y", "=", "True", ")", ":", "# y is real", "# recommend LinearGAM", "cake", "=", "pd", ".", "read_csv", "(", "PATH", "+", "'/cake.csv'", ",", "index_col", "=", "0", ")", "if", "return_X_y", ":", "X", "=", "cake", "[", "[", ...
25.771429
22.285714
def available(name): ''' .. versionadded:: 2014.7.0 Returns ``True`` if the specified service is available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.available sshd ''' path = '/etc/rc.d/{0}'.format(name) return os.path.isfile(path) and os.access(path, os.X_OK)
[ "def", "available", "(", "name", ")", ":", "path", "=", "'/etc/rc.d/{0}'", ".", "format", "(", "name", ")", "return", "os", ".", "path", ".", "isfile", "(", "path", ")", "and", "os", ".", "access", "(", "path", ",", "os", ".", "X_OK", ")" ]
22.2
25.4
def from_yaml(self, node): ''' Implementes a !from_yaml constructor with the following syntax: !from_yaml filename key Arguments: filename: Filename of external YAML document from which to load, relative to the current YAML file. key: Key from external YAML document to return, using a dot-separated syntax for nested keys. Examples: !from_yaml external.yml pop !from_yaml external.yml foo.bar.pop !from_yaml "another file.yml" "foo bar.snap crackle.pop" ''' # Load the content from the node, as a scalar content = self.construct_scalar(node) # Split on unquoted spaces try: parts = shlex.split(content) except UnicodeEncodeError: raise yaml.YAMLError('Non-ASCII arguments to !from_yaml are unsupported') if len(parts) != 2: raise yaml.YAMLError('Two arguments expected to !from_yaml') filename, key = parts # path is relative to the current YAML document path = os.path.join(self._root, filename) # Load the other YAML document with open(path, 'r') as f: doc = yaml.load(f, self.__class__) # Retrieve the key try: cur = doc for k in key.split('.'): cur = cur[k] except KeyError: raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename)) return cur
[ "def", "from_yaml", "(", "self", ",", "node", ")", ":", "# Load the content from the node, as a scalar", "content", "=", "self", ".", "construct_scalar", "(", "node", ")", "# Split on unquoted spaces", "try", ":", "parts", "=", "shlex", ".", "split", "(", "content...
33.644444
21.644444
def count(fn, coll): """Return the count of True values returned by the predicate function applied to the collection :param fn: a predicate function :param coll: a collection :returns: an integer >>> count(lambda x: x % 2 == 0, [11, 22, 31, 24, 15]) 2 """ return len([x for x in coll if fn(x) is True])
[ "def", "count", "(", "fn", ",", "coll", ")", ":", "return", "len", "(", "[", "x", "for", "x", "in", "coll", "if", "fn", "(", "x", ")", "is", "True", "]", ")" ]
25.307692
20
def github_token(token_path=None, token=None): """Return a github oauth token as a string. If `token` is defined, it is has precendece. If `token` and `token_path` are `None`, `~/.sq_github_token` looked for as a fallback. Parameters ---------- token_path : str, optional Path to the token file. The default token is used otherwise. token: str, optional Literial token string. If specifified, this value is used instead of reading from the token_path file. Returns ------- token : `string` Hopefully, a valid github oauth token. """ if token is None: if token_path is None: # Try the default token token_path = '~/.sq_github_token' token_path = os.path.expandvars(os.path.expanduser(token_path)) if not os.path.isfile(token_path): print("You don't have a token in {0} ".format(token_path)) print("Have you run github-auth?") raise EnvironmentError("No token in %s" % token_path) with open(token_path, 'r') as fdo: token = fdo.readline().strip() return token
[ "def", "github_token", "(", "token_path", "=", "None", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "if", "token_path", "is", "None", ":", "# Try the default token", "token_path", "=", "'~/.sq_github_token'", "token_path", "=", "os", ...
32.941176
19.529412
def _parse_record(self, record_type): """Parse a record.""" if self._next_token() in ['{', '(']: key = self._next_token() self.records[key] = { u'id': key, u'type': record_type.lower() } if self._next_token() == ',': while True: field = self._parse_field() if field: k, v = field[0], field[1] if k in self.keynorms: k = self.keynorms[k] if k == 'pages': v = v.replace(' ', '').replace('--', '-') if k == 'author' or k == 'editor': v = self.parse_names(v) # Recapitalizing the title generally causes more problems than it solves # elif k == 'title': # v = latex_to_unicode(v, capitalize='title') else: v = latex_to_unicode(v) self.records[key][k] = v if self._token != ',': break
[ "def", "_parse_record", "(", "self", ",", "record_type", ")", ":", "if", "self", ".", "_next_token", "(", ")", "in", "[", "'{'", ",", "'('", "]", ":", "key", "=", "self", ".", "_next_token", "(", ")", "self", ".", "records", "[", "key", "]", "=", ...
43.703704
11.518519
def _process_targeting_reagents(self, reagent_type, limit=None): """ This method processes the gene targeting knockdown reagents, such as morpholinos, talens, and crisprs. We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method. Morpholinos work similar to RNAi. TALENs are artificial restriction enzymes that can be used for genome editing in situ. CRISPRs are knockdown reagents, working similar to RNAi but at the transcriptional level instead of mRNA level. You can read more about TALEN and CRISPR techniques in review [Gaj et al] http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5 TODO add sequences Triples created: <reagent_id> is a gene_targeting_reagent <reagent_id> rdfs:label <reagent_symbol> <reagent_id> has type <reagent_so_id> <reagent_id> has comment <note> <publication_id> is an individual <publication_id> mentions <morpholino_id> :param reagent_type: should be one of: morph, talen, crispr :param limit: :return: """ LOG.info("Processing Gene Targeting Reagents") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) if reagent_type not in ['morph', 'talen', 'crispr']: LOG.error("You didn't specify the right kind of file type.") return raw = '/'.join((self.rawdir, self.files[reagent_type]['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 if reagent_type in ['morph', 'crispr']: try: (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, publication, note) = row except ValueError: # Catch lines without publication or note (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, publication) = row elif reagent_type == 'talen': (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, reagent_sequence2, publication, note) = row else: # should not get here return reagent_id = 'ZFIN:' + reagent_num.strip() gene_id = 'ZFIN:' + gene_num.strip() self.id_label_map[reagent_id] = reagent_symbol if self.test_mode and ( reagent_num not in self.test_ids['morpholino'] and gene_num not in self.test_ids['gene']): continue geno.addGeneTargetingReagent(reagent_id, reagent_symbol, reagent_so_id, gene_id) # The reagent targeted gene is added # in the pheno_environment processing function. # Add publication # note that the publications can be comma-delimited, # like: ZDB-PUB-100719-4,ZDB-PUB-130703-22 if publication != '': pubs = re.split(r',', publication.strip()) for pub in pubs: pub_id = 'ZFIN:' + pub.strip() ref = Reference(graph, pub_id) ref.addRefToGraph() graph.addTriple(pub_id, self.globaltt['mentions'], reagent_id) # Add comment? if note != '': model.addComment(reagent_id, note) # use the variant hash for reagents to list the affected genes if reagent_id not in self.variant_loci_genes: self.variant_loci_genes[reagent_id] = [gene_id] else: if gene_id not in self.variant_loci_genes[reagent_id]: self.variant_loci_genes[reagent_id] += [gene_id] if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with Reagent type %s", reagent_type) return
[ "def", "_process_targeting_reagents", "(", "self", ",", "reagent_type", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing Gene Targeting Reagents\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "e...
40.964286
21.892857