text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_gicon(self, icon_id: str) -> "Gio.Icon": """Lookup Gio.Icon from udiskie-internal id.""" return Gio.ThemedIcon.new_from_names(self._icon_names[icon_id])
[ "def", "get_gicon", "(", "self", ",", "icon_id", ":", "str", ")", "->", "\"Gio.Icon\"", ":", "return", "Gio", ".", "ThemedIcon", ".", "new_from_names", "(", "self", ".", "_icon_names", "[", "icon_id", "]", ")" ]
58
13
def execute(self, driver_command, params=None): """ Sends a command to be executed by a command.CommandExecutor. :Args: - driver_command: The name of the command to execute as a string. - params: A dictionary of named parameters to send with the command. :Returns: The command's JSON response loaded into a dictionary object. """ if self.session_id is not None: if not params: params = {'sessionId': self.session_id} elif 'sessionId' not in params: params['sessionId'] = self.session_id params = self._wrap_value(params) response = self.command_executor.execute(driver_command, params) if response: self.error_handler.check_response(response) response['value'] = self._unwrap_value( response.get('value', None)) return response # If the server doesn't send a response, assume the command was # a success return {'success': 0, 'value': None, 'sessionId': self.session_id}
[ "def", "execute", "(", "self", ",", "driver_command", ",", "params", "=", "None", ")", ":", "if", "self", ".", "session_id", "is", "not", "None", ":", "if", "not", "params", ":", "params", "=", "{", "'sessionId'", ":", "self", ".", "session_id", "}", ...
39.962963
19.740741
def ReadUserNotifications(self, username, state=None, timerange=None, cursor=None): """Reads notifications scheduled for a user within a given timerange.""" query = ("SELECT UNIX_TIMESTAMP(timestamp), " " notification_state, notification " "FROM user_notification " "WHERE username_hash = %s ") args = [mysql_utils.Hash(username)] if state is not None: query += "AND notification_state = %s " args.append(int(state)) if timerange is not None: time_from, time_to = timerange # pylint: disable=unpacking-non-sequence if time_from is not None: query += "AND timestamp >= FROM_UNIXTIME(%s) " args.append(mysql_utils.RDFDatetimeToTimestamp(time_from)) if time_to is not None: query += "AND timestamp <= FROM_UNIXTIME(%s) " args.append(mysql_utils.RDFDatetimeToTimestamp(time_to)) query += "ORDER BY timestamp DESC " ret = [] cursor.execute(query, args) for timestamp, state, notification_ser in cursor.fetchall(): n = rdf_objects.UserNotification.FromSerializedString(notification_ser) n.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp) n.state = state ret.append(n) return ret
[ "def", "ReadUserNotifications", "(", "self", ",", "username", ",", "state", "=", "None", ",", "timerange", "=", "None", ",", "cursor", "=", "None", ")", ":", "query", "=", "(", "\"SELECT UNIX_TIMESTAMP(timestamp), \"", "\" notification_state, notification \"", ...
33.375
19.325
def _check_file_syntax(filename, temp_dir, override_lang=None, enforce=True): """ Checks that the code in FILENAME parses, attempting to autodetect the language if necessary. Raises IOError if the file cannot be read. Raises DXSyntaxError if there is a problem and "enforce" is True. """ def check_python(filename): # Generate a semi-recognizable name to write the pyc to. Of # course it's possible that different files being scanned could # have the same basename, so this path won't be unique, but the # checks don't run concurrently so this shouldn't cause any # problems. pyc_path = os.path.join(temp_dir, os.path.basename(filename) + ".pyc") try: if USING_PYTHON2: filename = filename.encode(sys.getfilesystemencoding()) py_compile.compile(filename, cfile=pyc_path, doraise=True) finally: try: os.unlink(pyc_path) except OSError: pass def check_bash(filename): if platform.system() == 'Windows': logging.warn( 'Skipping bash syntax check due to unavailability of bash on Windows.') else: subprocess.check_output(["/bin/bash", "-n", filename], stderr=subprocess.STDOUT) if override_lang == 'python2.7': checker_fn = check_python elif override_lang == 'bash': checker_fn = check_bash elif filename.endswith('.py'): checker_fn = check_python elif filename.endswith('.sh'): checker_fn = check_bash else: # Ignore other kinds of files. return # Do a test read of the file to catch errors like the file not # existing or not being readable. open(filename) try: checker_fn(filename) except subprocess.CalledProcessError as e: print(filename + " has a syntax error! Interpreter output:", file=sys.stderr) for line in e.output.strip("\n").split("\n"): print(" " + line.rstrip("\n"), file=sys.stderr) if enforce: raise DXSyntaxError(filename + " has a syntax error") except py_compile.PyCompileError as e: print(filename + " has a syntax error! Interpreter output:", file=sys.stderr) print(" " + e.msg.strip(), file=sys.stderr) if enforce: raise DXSyntaxError(e.msg.strip())
[ "def", "_check_file_syntax", "(", "filename", ",", "temp_dir", ",", "override_lang", "=", "None", ",", "enforce", "=", "True", ")", ":", "def", "check_python", "(", "filename", ")", ":", "# Generate a semi-recognizable name to write the pyc to. Of", "# course it's possi...
38.557377
20.721311
def plotProgenitor(self,d1='x',d2='z',*args,**kwargs): """ NAME: plotProgenitor PURPOSE: plot the progenitor orbit INPUT: d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos') d2= plot this on the Y axis (same list as for d1) scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s bovy_plot.bovy_plot args and kwargs OUTPUT: plot to output device HISTORY: 2013-12-09 - Written - Bovy (IAS) """ tts= self._progenitor._orb.t[self._progenitor._orb.t \ < self._trackts[self._nTrackChunks-1]] obs= [self._R0,0.,self._Zsun] obs.extend(self._vsun) phys= kwargs.pop('scaleToPhysical',False) tx= self._parse_progenitor_dim(d1,tts,ro=self._ro,vo=self._vo, obs=obs,phys=phys) ty= self._parse_progenitor_dim(d2,tts,ro=self._ro,vo=self._vo, obs=obs,phys=phys) bovy_plot.bovy_plot(tx,ty,*args, xlabel=_labelDict[d1.lower()], ylabel=_labelDict[d2.lower()], **kwargs) return None
[ "def", "plotProgenitor", "(", "self", ",", "d1", "=", "'x'", ",", "d2", "=", "'z'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tts", "=", "self", ".", "_progenitor", ".", "_orb", ".", "t", "[", "self", ".", "_progenitor", ".", "_orb", ...
30.976744
25.348837
def build_data_access(host, port, database_name, collection_name): """ Create data access gateway. :param host: The database server to connect to. :type host: str :param port: Database port. :type port: int :param database_name: Database name. :type database_name: str :param collection_name: Name of the collection with Sacred runs. :type collection_name: str """ return PyMongoDataAccess("mongodb://%s:%d" % (host, port), database_name, collection_name)
[ "def", "build_data_access", "(", "host", ",", "port", ",", "database_name", ",", "collection_name", ")", ":", "return", "PyMongoDataAccess", "(", "\"mongodb://%s:%d\"", "%", "(", "host", ",", "port", ")", ",", "database_name", ",", "collection_name", ")" ]
38.066667
15
def get_user_roles(self, user): """Get roles associated with the given user. Args: user (string): User name. Returns: (list): List of roles that user has. Raises: requests.HTTPError on failure. """ return self.service.get_user_roles( user, self.url_prefix, self.auth, self.session, self.session_send_opts)
[ "def", "get_user_roles", "(", "self", ",", "user", ")", ":", "return", "self", ".", "service", ".", "get_user_roles", "(", "user", ",", "self", ".", "url_prefix", ",", "self", ".", "auth", ",", "self", ".", "session", ",", "self", ".", "session_send_opts...
27.928571
18.857143
def get_trainer(self, id_, respect_privacy=True, detail=True): """Returns the Trainer object for the ID""" parameters = {} if respect_privacy is False: parameters['statistics'] = 'force' if detail is False: parameters['detail'] = 'low' r = requests.get(api_url+'trainers/'+str(id_)+'/', headers=self.headers) if respect_privacy is True else requests.get(api_url+'trainers/'+str(id_)+'/', params=parameters, headers=self.headers) print(request_status(r)) r.raise_for_status() return Trainer(r.json())
[ "def", "get_trainer", "(", "self", ",", "id_", ",", "respect_privacy", "=", "True", ",", "detail", "=", "True", ")", ":", "parameters", "=", "{", "}", "if", "respect_privacy", "is", "False", ":", "parameters", "[", "'statistics'", "]", "=", "'force'", "i...
39.692308
27.769231
def new_linsolver(name,prop): """ Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>` """ if name == 'mumps': return LinSolverMUMPS(prop) elif name == 'superlu': return LinSolverSUPERLU(prop) elif name == 'umfpack': return LinSolverUMFPACK(prop) elif name == 'default': try: return new_linsolver('mumps',prop) except ImportError: return new_linsolver('superlu',prop) else: raise ValueError('invalid linear solver name')
[ "def", "new_linsolver", "(", "name", ",", "prop", ")", ":", "if", "name", "==", "'mumps'", ":", "return", "LinSolverMUMPS", "(", "prop", ")", "elif", "name", "==", "'superlu'", ":", "return", "LinSolverSUPERLU", "(", "prop", ")", "elif", "name", "==", "'...
23.888889
18.185185
def write(self, buf): """Write bytes to the stream.""" underflow = self._audio_stream.write(buf) if underflow: logging.warning('SoundDeviceStream write underflow (size: %d)', len(buf)) return len(buf)
[ "def", "write", "(", "self", ",", "buf", ")", ":", "underflow", "=", "self", ".", "_audio_stream", ".", "write", "(", "buf", ")", "if", "underflow", ":", "logging", ".", "warning", "(", "'SoundDeviceStream write underflow (size: %d)'", ",", "len", "(", "buf"...
38
14.571429
def time_col_laminar(EnergyDis, Temp, ConcAl, ConcClay, coag, material, DiamTarget, DiamTube, DIM_FRACTAL, RatioHeightDiameter): """Calculate single collision time for laminar flow mediated collisions. Calculated as a function of floc size. """ return (((1/6) * ((6/np.pi)**(1/3)) * frac_vol_floc_initial(ConcAl, ConcClay, coag, material) ** (-2/3) * (pc.viscosity_kinematic(Temp).magnitude / EnergyDis) ** (1 / 2) * (DiamTarget / material.Diameter) ** (2*DIM_FRACTAL/3 - 2) ) # End of the numerator / (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube, RatioHeightDiameter) ) # End of the denominator )
[ "def", "time_col_laminar", "(", "EnergyDis", ",", "Temp", ",", "ConcAl", ",", "ConcClay", ",", "coag", ",", "material", ",", "DiamTarget", ",", "DiamTube", ",", "DIM_FRACTAL", ",", "RatioHeightDiameter", ")", ":", "return", "(", "(", "(", "1", "/", "6", ...
50
19.133333
def list_dcm_datain(datain): ''' List all DICOM file paths in the datain dictionary of input data. ''' if not isinstance(datain, dict): raise ValueError('The input is not a dictionary!') dcmlst = [] # list of mu-map DICOM files if 'mumapDCM' in datain: dcmump = os.listdir(datain['mumapDCM']) # accept only *.dcm extensions dcmump = [os.path.join(datain['mumapDCM'],d) for d in dcmump if d.endswith(dcmext)] dcmlst += dcmump if 'T1DCM' in datain: dcmt1 = os.listdir(datain['T1DCM']) # accept only *.dcm extensions dcmt1 = [os.path.join(datain['T1DCM'],d) for d in dcmt1 if d.endswith(dcmext)] dcmlst += dcmt1 if 'T2DCM' in datain: dcmt2 = os.listdir(datain['T2DCM']) # accept only *.dcm extensions dcmt2 = [os.path.join(datain['T2DCM'],d) for d in dcmt2 if d.endswith(dcmext)] dcmlst += dcmt2 if 'UTE1' in datain: dcmute1 = os.listdir(datain['UTE1']) # accept only *.dcm extensions dcmute1 = [os.path.join(datain['UTE1'],d) for d in dcmute1 if d.endswith(dcmext)] dcmlst += dcmute1 if 'UTE2' in datain: dcmute2 = os.listdir(datain['UTE2']) # accept only *.dcm extensions dcmute2 = [os.path.join(datain['UTE2'],d) for d in dcmute2 if d.endswith(dcmext)] dcmlst += dcmute2 #-list-mode data dcm if 'lm_dcm' in datain: dcmlst += [datain['lm_dcm']] if 'lm_ima' in datain: dcmlst += [datain['lm_ima']] #-norm if 'nrm_dcm' in datain: dcmlst += [datain['nrm_dcm']] if 'nrm_ima' in datain: dcmlst += [datain['nrm_ima']] return dcmlst
[ "def", "list_dcm_datain", "(", "datain", ")", ":", "if", "not", "isinstance", "(", "datain", ",", "dict", ")", ":", "raise", "ValueError", "(", "'The input is not a dictionary!'", ")", "dcmlst", "=", "[", "]", "# list of mu-map DICOM files", "if", "'mumapDCM'", ...
30.555556
21.037037
def __verify_minion_publish(self, clear_load): ''' Verify that the passed information authorized a minion to execute :param dict clear_load: A publication load from a minion :rtype: bool :return: A boolean indicating if the minion is allowed to publish the command in the load ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')): return False # If the command will make a recursive publish don't run if clear_load['fun'].startswith('publish.'): return False # Check the permissions for this minion if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning( 'Minion id %s is not who it says it is and is attempting ' 'to issue a peer command', clear_load['id'] ) return False clear_load.pop('tok') perms = [] for match in self.opts['peer']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in clear_load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] clear_load['fun'] = clear_load['fun'].split(',') arg_ = [] for arg in clear_load['arg']: arg_.append(arg.split()) clear_load['arg'] = arg_ # finally, check the auth of the load return self.ckminions.auth_check( perms, clear_load['fun'], clear_load['arg'], clear_load['tgt'], clear_load.get('tgt_type', 'glob'), publish_validate=True)
[ "def", "__verify_minion_publish", "(", "self", ",", "clear_load", ")", ":", "# Verify that the load is valid", "if", "'peer'", "not", "in", "self", ".", "opts", ":", "return", "False", "if", "not", "isinstance", "(", "self", ".", "opts", "[", "'peer'", "]", ...
39.882353
17.372549
def execute_from_command_line(argv=None): """ Currently the only entrypoint (manage.py, demosys-admin) """ if not argv: argv = sys.argv # prog_name = argv[0] system_commands = find_commands(system_command_dir()) project_commands = find_commands(project_command_dir()) project_package = project_package_name() command = argv[1] if len(argv) > 1 else None # Are we running a core command? if command in system_commands: cmd = load_command_class('demosys', command) cmd.run_from_argv(argv) elif command in project_commands: cmd = load_command_class(project_package, command) cmd.run_from_argv(argv) else: print("Available commands:") for name in system_commands: print(" - {}".format(name)) for name in project_commands: print(" - {}".format(name))
[ "def", "execute_from_command_line", "(", "argv", "=", "None", ")", ":", "if", "not", "argv", ":", "argv", "=", "sys", ".", "argv", "# prog_name = argv[0]", "system_commands", "=", "find_commands", "(", "system_command_dir", "(", ")", ")", "project_commands", "="...
30.785714
13.928571
def _wrap_jinja_filter(self, function): """Propagate exceptions as undefined values filter.""" def wrapper(*args, **kwargs): """Filter wrapper.""" try: return function(*args, **kwargs) except Exception: # pylint: disable=broad-except return NestedUndefined() # Copy over Jinja filter decoration attributes. for attribute in dir(function): if attribute.endswith('filter'): setattr(wrapper, attribute, getattr(function, attribute)) return wrapper
[ "def", "_wrap_jinja_filter", "(", "self", ",", "function", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Filter wrapper.\"\"\"", "try", ":", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ...
37.933333
13.866667
def _read_conf_file(path): ''' Read in a config file from a given path and process it into a dictionary ''' log.debug('Reading configuration from %s', path) with salt.utils.files.fopen(path, 'r') as conf_file: try: conf_opts = salt.utils.yaml.safe_load(conf_file) or {} except salt.utils.yaml.YAMLError as err: message = 'Error parsing configuration file: {0} - {1}'.format(path, err) log.error(message) raise salt.exceptions.SaltConfigurationError(message) # only interpret documents as a valid conf, not things like strings, # which might have been caused by invalid yaml syntax if not isinstance(conf_opts, dict): message = 'Error parsing configuration file: {0} - conf ' \ 'should be a document, not {1}.'.format(path, type(conf_opts)) log.error(message) raise salt.exceptions.SaltConfigurationError(message) # allow using numeric ids: convert int to string if 'id' in conf_opts: if not isinstance(conf_opts['id'], six.string_types): conf_opts['id'] = six.text_type(conf_opts['id']) else: conf_opts['id'] = salt.utils.data.decode(conf_opts['id']) return conf_opts
[ "def", "_read_conf_file", "(", "path", ")", ":", "log", ".", "debug", "(", "'Reading configuration from %s'", ",", "path", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "'r'", ")", "as", "conf_file", ":", "try", ":", ...
46
24.428571
def execute(helper, config, args): """ Lists environments """ versions = helper.get_versions() out("Deployed versions:") for version in versions: out(version)
[ "def", "execute", "(", "helper", ",", "config", ",", "args", ")", ":", "versions", "=", "helper", ".", "get_versions", "(", ")", "out", "(", "\"Deployed versions:\"", ")", "for", "version", "in", "versions", ":", "out", "(", "version", ")" ]
22.875
8.875
def _get_appoptics(options): ''' Return an appoptics connection object. ''' conn = appoptics_metrics.connect( options.get('api_token'), sanitizer=appoptics_metrics.sanitize_metric_name, hostname=options.get('api_url')) log.info("Connected to appoptics.") return conn
[ "def", "_get_appoptics", "(", "options", ")", ":", "conn", "=", "appoptics_metrics", ".", "connect", "(", "options", ".", "get", "(", "'api_token'", ")", ",", "sanitizer", "=", "appoptics_metrics", ".", "sanitize_metric_name", ",", "hostname", "=", "options", ...
30.5
13.3
def connect(self, dest_pair): """ Connects to the specified destination through a proxy. Uses the same API as socket's connect(). To select the proxy server, use set_proxy(). dest_pair - 2-tuple of (IP/hostname, port). """ if len(dest_pair) != 2 or dest_pair[0].startswith("["): # Probably IPv6, not supported -- raise an error, and hope # Happy Eyeballs (RFC6555) makes sure at least the IPv4 # connection works... raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair)) dest_addr, dest_port = dest_pair if self.type == socket.SOCK_DGRAM: if not self._proxyconn: self.bind(("", 0)) dest_addr = socket.gethostbyname(dest_addr) # If the host address is INADDR_ANY or similar, reset the peer # address so that packets are received from any peer if dest_addr == "0.0.0.0" and not dest_port: self.proxy_peername = None else: self.proxy_peername = (dest_addr, dest_port) return proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy # Do a minimal input check first if (not isinstance(dest_pair, (list, tuple)) or len(dest_pair) != 2 or not dest_addr or not isinstance(dest_port, int)): raise GeneralProxyError("Invalid destination-connection (host, port) pair") # We set the timeout here so that we don't hang in connection or during # negotiation. super(socksocket, self).settimeout(self._timeout) if proxy_type is None: # Treat like regular socket object self.proxy_peername = dest_pair super(socksocket, self).settimeout(self._timeout) super(socksocket, self).connect((dest_addr, dest_port)) return proxy_addr = self._proxy_addr() try: # Initial connection to proxy server. super(socksocket, self).connect(proxy_addr) except socket.error as error: # Error while connecting to proxy self.close() proxy_addr, proxy_port = proxy_addr proxy_server = "{0}:{1}".format(proxy_addr, proxy_port) printable_type = PRINTABLE_PROXY_TYPES[proxy_type] msg = "Error connecting to {0} proxy {1}".format(printable_type, proxy_server) log.debug("%s due to: %s", msg, error) raise ProxyConnectionError(msg, error) else: # Connected to proxy server, now negotiate try: # Calls negotiate_{SOCKS4, SOCKS5, HTTP} negotiate = self._proxy_negotiators[proxy_type] negotiate(self, dest_addr, dest_port) except socket.error as error: # Wrap socket errors self.close() raise GeneralProxyError("Socket error", error) except ProxyError: # Protocol error while negotiating with proxy self.close() raise
[ "def", "connect", "(", "self", ",", "dest_pair", ")", ":", "if", "len", "(", "dest_pair", ")", "!=", "2", "or", "dest_pair", "[", "0", "]", ".", "startswith", "(", "\"[\"", ")", ":", "# Probably IPv6, not supported -- raise an error, and hope", "# Happy Eyeballs...
38.634146
20.073171
def trajectory(self, ini, end, delta, mass_coo, age_in_sec=False, online=False): """ create a trajectory out of a stellar model Parameters ---------- ini : integer Initial model, inital cycle number. end : integer Final model, final cycle number. delta : integer Sparsity factor of the frames. mass_coo : float Mass coordinate for the traj. age_in_sec : boolean, optional Set to True if age in se file is in seconds (like in MESA). The default is False. Returns -------- float radius_at_mass_coo, density_at_mass_coo, temperature_at_mass_coo, age_all Notes ----- plus writes a file with the trajectory information to be used with ppn. Warning: remove the old trajectory, if you have any for the same mass coordinate. You are appending data, not overwriting. Update: this method works for output types with indexes going from the outside in (MESA) or the other way around. Also the requested quantities are linearly interpolated in the mass shell. online: boolean, optional are you working online in the ipython notebook? If so, you will be given an HTML link to download the file. """ filename='traj_'+str(mass_coo)+'.dat' f = open(filename,'a') radius_at_mass_coo=[] density_at_mass_coo=[] temperature_at_mass_coo=[] masses=self.se.get(list(range(ini,end+1,delta)),'mass') temps=self.se.get(list(range(ini,end+1,delta)),'temperature') rhos=self.se.get(list(range(ini,end+1,delta)),'rho') radii=self.se.get(list(range(ini,end+1,delta)),'radius') ages=self.se.get(list(range(ini,end+1,delta)),'age') cycs=list(range(ini,end+1,delta)) age_all=[] for i in range(len(ages)): age=ages[i] if age_in_sec: age /= constants.one_year mass=masses[i] temperature=temps[i] rho=rhos[i] radius=radii[i] my_things=[temperature,rho,radius] if mass[0]>mass[len(mass)-1]: zone_above=where(mass>mass_coo)[0][-1] zone_below=zone_above+1 else: zone_above=where(mass>mass_coo)[0][0] zone_below=zone_above-1 if mass[zone_below]>mass[zone_above]: sys.exit("ERROR: finding of zone index confused") all_things_interplt=[] for thing in my_things: thing_interplt=thing[zone_below]+(mass_coo-mass[zone_below])* \ (thing[zone_above]-thing[zone_below])/(mass[zone_above]-mass[zone_below]) all_things_interplt.append(thing_interplt) this_temperature,this_rho,this_radius=all_things_interplt string = str(cycs[i])+' '+str(age)+' '+str(this_temperature)+' '+str(this_rho) f.write(string+"\n") radius_at_mass_coo.append(this_radius) density_at_mass_coo.append(this_rho) temperature_at_mass_coo.append(this_temperature) age_all.append(age) f.close() if online: return FileLink(filename) return radius_at_mass_coo, density_at_mass_coo, temperature_at_mass_coo, age_all
[ "def", "trajectory", "(", "self", ",", "ini", ",", "end", ",", "delta", ",", "mass_coo", ",", "age_in_sec", "=", "False", ",", "online", "=", "False", ")", ":", "filename", "=", "'traj_'", "+", "str", "(", "mass_coo", ")", "+", "'.dat'", "f", "=", ...
36.891304
19.391304
def process_features_online(self, corpus, input_features, output_path, chunk_size=1): """ Process all features of the given corpus and save the processed features in a feature-container. The features are processed in **online** mode, chunk by chunk. Args: corpus (Corpus): The corpus to process the utterances from. input_features (FeatureContainer): The feature-container to process the frames from. output_path (str): A path to save the feature-container to. chunk_size (int): Number of frames to process per chunk. Returns: FeatureContainer: The feature-container containing the processed features. """ feat_container = containers.FeatureContainer(output_path) feat_container.open() input_features.open() for utterance in corpus.utterances.values(): sampling_rate = input_features.sampling_rate frames = input_features.get(utterance.idx, mem_map=True) current_frame = 0 while current_frame < frames.shape[0]: last = current_frame + chunk_size > frames.shape[0] to_frame = current_frame + chunk_size chunk = frames[current_frame:to_frame] processed = self.process_frames(chunk, sampling_rate, current_frame, last=last, utterance=utterance, corpus=corpus) if processed is not None: feat_container.append(utterance.idx, processed) current_frame += chunk_size tf_frame_size, tf_hop_size = self.frame_transform(input_features.frame_size, input_features.hop_size) feat_container.frame_size = tf_frame_size feat_container.hop_size = tf_hop_size feat_container.sampling_rate = input_features.sampling_rate feat_container.close() return feat_container
[ "def", "process_features_online", "(", "self", ",", "corpus", ",", "input_features", ",", "output_path", ",", "chunk_size", "=", "1", ")", ":", "feat_container", "=", "containers", ".", "FeatureContainer", "(", "output_path", ")", "feat_container", ".", "open", ...
40.680851
28.510638
def make_cookie(name, load, seed, expire=0, domain="", path="", timestamp=""): """ Create and return a cookie :param name: Cookie name :param load: Cookie load :param seed: A seed for the HMAC function :param expire: Number of minutes before this cookie goes stale :param domain: The domain of the cookie :param path: The path specification for the cookie :return: A tuple to be added to headers """ cookie = SimpleCookie() if not timestamp: timestamp = str(int(time.mktime(time.gmtime()))) signature = cookie_signature(seed, load, timestamp) cookie[name] = "|".join([load, timestamp, signature]) if path: cookie[name]["path"] = path if domain: cookie[name]["domain"] = domain if expire: cookie[name]["expires"] = _expiration(expire, "%a, %d-%b-%Y %H:%M:%S GMT") return tuple(cookie.output().split(": ", 1))
[ "def", "make_cookie", "(", "name", ",", "load", ",", "seed", ",", "expire", "=", "0", ",", "domain", "=", "\"\"", ",", "path", "=", "\"\"", ",", "timestamp", "=", "\"\"", ")", ":", "cookie", "=", "SimpleCookie", "(", ")", "if", "not", "timestamp", ...
35.148148
15.518519
def i2b(self, pkt, x): """Convert internal value to internal value""" if type(x) is str: x = bytes([ ord(i) for i in x ]) return x
[ "def", "i2b", "(", "self", ",", "pkt", ",", "x", ")", ":", "if", "type", "(", "x", ")", "is", "str", ":", "x", "=", "bytes", "(", "[", "ord", "(", "i", ")", "for", "i", "in", "x", "]", ")", "return", "x" ]
32
11.6
def submit_statistics(self): """ Upload the database to the FTP server. Only submit new information contained in the partial database. Merge the partial database back into master after a successful upload. """ if not self._hq.get('api_key', False) or not self._enabled: return for r in ('uuid', 'application_name', 'application_version'): if not getattr(self, r, False): return False self['__submissions__'] += 1 try: # To ensure the usage tracker does not interfere with script functionality, catch all exceptions so any # errors always exit nicely. tableinfo = self.get_table_info() # Get the last row from each table json_data = database_to_json(self.dbcon_master, tableinfo) json_data.update(database_to_json(self.dbcon_part, tableinfo)) payload = {'API Key': self._hq['api_key'], 'User Identifier': self.uuid, 'Application Name': self.application_name, 'Application Version': self.application_version, 'Data': json_data } # For tables with data that has not yet been writen to the database (ie inital values), # add them manually to the payload for name, info in tableinfo.iteritems(): if name not in payload['Data']: table = self[name] if table is None: continue if isinstance(table, State): data = 'No State' if table._state == NO_STATE else table._state else: data = table.count tableinfo[name]['data'] = data payload['Data'][name] = tableinfo[name] try: response = requests.post(self._hq['host'] + '/usagestats/upload', data=json.dumps(payload), timeout=self.HQ_DEFAULT_TIMEOUT) except Exception as e: logging.error(e) response = False if response and response.status_code == 200: success = True logger.debug('Submission to %s successful.' % self._hq['host']) else: success = False # If we have a partial database, merge it into the local master and create a new partial if self.dbcon_part and success: merge_databases(self.dbcon_master, self.dbcon_part) # Clear the partial database now that the stats have been uploaded for table in get_table_list(self.dbcon_part): clear_table(self.dbcon_part, table) return success except Exception as e: logger.error(e) self['__submissions__'].delete_last() self.stop_watcher() return False
[ "def", "submit_statistics", "(", "self", ")", ":", "if", "not", "self", ".", "_hq", ".", "get", "(", "'api_key'", ",", "False", ")", "or", "not", "self", ".", "_enabled", ":", "return", "for", "r", "in", "(", "'uuid'", ",", "'application_name'", ",", ...
43.623188
21.623188
def run(self, args): """Parse command line arguments, and run rflint""" self.args = self.parse_and_process_args(args) if self.args.version: print(__version__) return 0 if self.args.rulefile: for filename in self.args.rulefile: self._load_rule_file(filename) if self.args.list: self.list_rules() return 0 if self.args.describe: self._describe_rules(self.args.args) return 0 self.counts = { ERROR: 0, WARNING: 0, "other": 0} for filename in self.args.args: if not (os.path.exists(filename)): sys.stderr.write("rflint: %s: No such file or directory\n" % filename) continue if os.path.isdir(filename): self._process_folder(filename) else: self._process_file(filename) if self.counts[ERROR] > 0: return self.counts[ERROR] if self.counts[ERROR] < 254 else 255 return 0
[ "def", "run", "(", "self", ",", "args", ")", ":", "self", ".", "args", "=", "self", ".", "parse_and_process_args", "(", "args", ")", "if", "self", ".", "args", ".", "version", ":", "print", "(", "__version__", ")", "return", "0", "if", "self", ".", ...
29.444444
19.222222
def view_component_planes(self, dimensions=None, figsize=None, colormap=cm.Spectral_r, colorbar=False, bestmatches=False, bestmatchcolors=None, labels=None, zoom=None, filename=None): """Observe the component planes in the codebook of the SOM. :param dimensions: Optional parameter to specify along which dimension or dimensions should the plotting happen. By default, each dimension is plotted in a sequence of plots. :type dimension: int or list of int. :param figsize: Optional parameter to specify the size of the figure. :type figsize: (int, int) :param colormap: Optional parameter to specify the color map to be used. :type colormap: matplotlib.colors.Colormap :param colorbar: Optional parameter to include a colormap as legend. :type colorbar: bool. :param bestmatches: Optional parameter to plot best matching units. :type bestmatches: bool. :param bestmatchcolors: Optional parameter to specify the color of each best matching unit. :type bestmatchcolors: list of int. :param labels: Optional parameter to specify the label of each point. :type labels: list of str. :param zoom: Optional parameter to zoom into a region on the map. The first two coordinates of the tuple are the row limits, the second tuple contains the column limits. :type zoom: ((int, int), (int, int)) :param filename: If specified, the plot will not be shown but saved to this file. :type filename: str. """ if self.codebook is None: raise Exception("The codebook is not available. Either train a map" " or load a codebook from a file") if dimensions is None: dimensions = range(self.n_dim) for i in dimensions: plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap, colorbar, bestmatches, bestmatchcolors, labels, zoom, filename) return plt
[ "def", "view_component_planes", "(", "self", ",", "dimensions", "=", "None", ",", "figsize", "=", "None", ",", "colormap", "=", "cm", ".", "Spectral_r", ",", "colorbar", "=", "False", ",", "bestmatches", "=", "False", ",", "bestmatchcolors", "=", "None", "...
53.906977
21.255814
def get(self, identity): """ Constructs a EntityContext :param identity: Unique identity of the Entity :returns: twilio.rest.authy.v1.service.entity.EntityContext :rtype: twilio.rest.authy.v1.service.entity.EntityContext """ return EntityContext(self._version, service_sid=self._solution['service_sid'], identity=identity, )
[ "def", "get", "(", "self", ",", "identity", ")", ":", "return", "EntityContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "identity", "=", "identity", ",", ")" ]
37.3
23.5
def switch_client(self): """ Switch client to this session. Raises ------ :exc:`exc.LibTmuxException` """ proc = self.cmd('switch-client', '-t%s' % self.id) if proc.stderr: raise exc.LibTmuxException(proc.stderr)
[ "def", "switch_client", "(", "self", ")", ":", "proc", "=", "self", ".", "cmd", "(", "'switch-client'", ",", "'-t%s'", "%", "self", ".", "id", ")", "if", "proc", ".", "stderr", ":", "raise", "exc", ".", "LibTmuxException", "(", "proc", ".", "stderr", ...
21.461538
18.538462
def new_image_like(self, data): """ Create a new ANTsImage with the same header information, but with a new image array. Arguments --------- data : ndarray or py::capsule New array or pointer for the image. It must have the same shape as the current image data. Returns ------- ANTsImage """ if not isinstance(data, np.ndarray): raise ValueError('data must be a numpy array') if not self.has_components: if data.shape != self.shape: raise ValueError('given array shape (%s) and image array shape (%s) do not match' % (data.shape, self.shape)) else: if (data.shape[-1] != self.components) or (data.shape[:-1] != self.shape): raise ValueError('given array shape (%s) and image array shape (%s) do not match' % (data.shape[1:], self.shape)) return iio2.from_numpy(data, origin=self.origin, spacing=self.spacing, direction=self.direction, has_components=self.has_components)
[ "def", "new_image_like", "(", "self", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "raise", "ValueError", "(", "'data must be a numpy array'", ")", "if", "not", "self", ".", "has_components", ":", "if"...
38.928571
23.428571
def validate_data_privacy(request, data, wrapper_kw=None): """ Validate :data: contains only data allowed by privacy settings. :param request: Pyramid Request instance :param data: Dict containing request/response data which should be validated """ from nefertari import wrappers if wrapper_kw is None: wrapper_kw = {} wrapper = wrappers.apply_privacy(request) allowed_fields = wrapper(result=data, **wrapper_kw).keys() data = data.copy() data.pop('_type', None) not_allowed_fields = set(data.keys()) - set(allowed_fields) if not_allowed_fields: raise wrappers.ValidationError(', '.join(not_allowed_fields))
[ "def", "validate_data_privacy", "(", "request", ",", "data", ",", "wrapper_kw", "=", "None", ")", ":", "from", "nefertari", "import", "wrappers", "if", "wrapper_kw", "is", "None", ":", "wrapper_kw", "=", "{", "}", "wrapper", "=", "wrappers", ".", "apply_priv...
34.947368
18.736842
def get(self, name=None): """ Gets a list of all recipes, which are registered by the current plugin. If a name is provided, only the requested recipe is returned or None. :param: name: Name of the recipe """ return self.__app.recipes.get(name, self._plugin)
[ "def", "get", "(", "self", ",", "name", "=", "None", ")", ":", "return", "self", ".", "__app", ".", "recipes", ".", "get", "(", "name", ",", "self", ".", "_plugin", ")" ]
37.5
18.5
def squad(R_in, t_in, t_out): """Spherical "quadrangular" interpolation of rotors with a cubic spline This is the best way to interpolate rotations. It uses the analog of a cubic spline, except that the interpolant is confined to the rotor manifold in a natural way. Alternative methods involving interpolation of other coordinates on the rotation group or normalization of interpolated values give bad results. The results from this method are as natural as any, and are continuous in first and second derivatives. The input `R_in` rotors are assumed to be reasonably continuous (no sign flips), and the input `t` arrays are assumed to be sorted. No checking is done for either case, and you may get silently bad results if these conditions are violated. This function simplifies the calling, compared to `squad_evaluate` (which takes a set of four quaternions forming the edges of the "quadrangle", and the normalized time `tau`) and `squad_vectorized` (which takes the same arguments, but in array form, and efficiently loops over them). Parameters ---------- R_in: array of quaternions A time-series of rotors (unit quaternions) to be interpolated t_in: array of float The times corresponding to R_in t_out: array of float The times to which R_in should be interpolated """ if R_in.size == 0 or t_out.size == 0: return np.array((), dtype=np.quaternion) # This list contains an index for each `t_out` such that # t_in[i-1] <= t_out < t_in[i] # Note that `side='right'` is much faster in my tests # i_in_for_out = t_in.searchsorted(t_out, side='left') # np.clip(i_in_for_out, 0, len(t_in) - 1, out=i_in_for_out) i_in_for_out = t_in.searchsorted(t_out, side='right')-1 # Now, for each index `i` in `i_in`, we need to compute the # interpolation "coefficients" (`A_i`, `B_ip1`). # # I previously tested an explicit version of the loops below, # comparing `stride_tricks.as_strided` with explicit # implementation via `roll` (as seen here). I found that the # `roll` was significantly more efficient for simple calculations, # though the difference is probably totally washed out here. In # any case, it might be useful to test again. # A = R_in * np.exp((- np.log((~R_in) * np.roll(R_in, -1)) + np.log((~np.roll(R_in, 1)) * R_in) * ((np.roll(t_in, -1) - t_in) / (t_in - np.roll(t_in, 1))) ) * 0.25) B = np.roll(R_in, -1) * np.exp((np.log((~np.roll(R_in, -1)) * np.roll(R_in, -2)) * ((np.roll(t_in, -1) - t_in) / (np.roll(t_in, -2) - np.roll(t_in, -1))) - np.log((~R_in) * np.roll(R_in, -1))) * -0.25) # Correct the first and last A time steps, and last two B time steps. We extend R_in with the following wrap-around # values: # R_in[0-1] = R_in[0]*(~R_in[1])*R_in[0] # R_in[n+0] = R_in[-1] * (~R_in[-2]) * R_in[-1] # R_in[n+1] = R_in[0] * (~R_in[-1]) * R_in[0] # = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1] # = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1] # A[i] = R_in[i] * np.exp((- np.log((~R_in[i]) * R_in[i+1]) # + np.log((~R_in[i-1]) * R_in[i]) * ((t_in[i+1] - t_in[i]) / (t_in[i] - t_in[i-1])) # ) * 0.25) # A[0] = R_in[0] * np.exp((- np.log((~R_in[0]) * R_in[1]) + np.log((~R_in[0])*R_in[1]*(~R_in[0])) * R_in[0]) * 0.25) # = R_in[0] A[0] = R_in[0] # A[-1] = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) # + np.log((~R_in[-2]) * R_in[-1]) * ((t_in[n+0] - t_in[-1]) / (t_in[-1] - t_in[-2])) # ) * 0.25) # = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25) # = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # + np.log((~R_in[-2]) * R_in[-1])) * 0.25) # = R_in[-1] * np.exp((- np.log((~R_in[-2]) * R_in[-1]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25) # = R_in[-1] A[-1] = R_in[-1] # B[i] = R_in[i+1] * np.exp((np.log((~R_in[i+1]) * R_in[i+2]) * ((t_in[i+1] - t_in[i]) / (t_in[i+2] - t_in[i+1])) # - np.log((~R_in[i]) * R_in[i+1])) * -0.25) # B[-2] = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) * ((t_in[-1] - t_in[-2]) / (t_in[0] - t_in[-1])) # - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * np.exp((np.log((~R_in[-2]) * R_in[-1]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] B[-2] = R_in[-1] # B[-1] = R_in[0] # B[-1] = R_in[0] * np.exp((np.log((~R_in[0]) * R_in[1]) - np.log((~R_in[-1]) * R_in[0])) * -0.25) # = R_in[-1] * (~R_in[-2]) * R_in[-1] # * np.exp((np.log((~(R_in[-1] * (~R_in[-2]) * R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * (~R_in[-2]) * R_in[-1] # * np.exp((np.log(((~R_in[-1]) * R_in[-2] * (~R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25) # * np.exp((np.log((~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-2]) * R_in[-1])) * -0.25) B[-1] = R_in[-1] * (~R_in[-2]) * R_in[-1] # Use the coefficients at the corresponding t_out indices to # compute the squad interpolant # R_ip1 = np.array(np.roll(R_in, -1)[i_in_for_out]) # R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1] R_ip1 = np.roll(R_in, -1) R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1] R_ip1 = np.array(R_ip1[i_in_for_out]) t_inp1 = np.roll(t_in, -1) t_inp1[-1] = t_in[-1] + (t_in[-1] - t_in[-2]) tau = (t_out - t_in[i_in_for_out]) / ((t_inp1 - t_in)[i_in_for_out]) # tau = (t_out - t_in[i_in_for_out]) / ((np.roll(t_in, -1) - t_in)[i_in_for_out]) R_out = np.squad_vectorized(tau, R_in[i_in_for_out], A[i_in_for_out], B[i_in_for_out], R_ip1) return R_out
[ "def", "squad", "(", "R_in", ",", "t_in", ",", "t_out", ")", ":", "if", "R_in", ".", "size", "==", "0", "or", "t_out", ".", "size", "==", "0", ":", "return", "np", ".", "array", "(", "(", ")", ",", "dtype", "=", "np", ".", "quaternion", ")", ...
55.760684
29.863248
def get_objective_banks(self): """Pass through to provider ObjectiveBankLookupSession.get_objective_banks""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_template catalogs = self._get_provider_session('objective_bank_lookup_session').get_objective_banks() cat_list = [] for cat in catalogs: cat_list.append(ObjectiveBank(self._provider_manager, cat, self._runtime, self._proxy)) return ObjectiveBankList(cat_list)
[ "def", "get_objective_banks", "(", "self", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.BinLookupSession.get_bins_template", "catalogs", "=", "self", ".", "_get_provider_session", "(", "'objective_bank_lookup_session'", ")", ".", "get_objective_banks", ...
56.888889
21
def _createBitpattern(functioncode, value): """Create the bit pattern that is used for writing single bits. This is basically a storage of numerical constants. Args: * functioncode (int): can be 5 or 15 * value (int): can be 0 or 1 Returns: The bit pattern (string). Raises: TypeError, ValueError """ _checkFunctioncode(functioncode, [5, 15]) _checkInt(value, minvalue=0, maxvalue=1, description='inputvalue') if functioncode == 5: if value == 0: return '\x00\x00' else: return '\xff\x00' elif functioncode == 15: if value == 0: return '\x00' else: return '\x01'
[ "def", "_createBitpattern", "(", "functioncode", ",", "value", ")", ":", "_checkFunctioncode", "(", "functioncode", ",", "[", "5", ",", "15", "]", ")", "_checkInt", "(", "value", ",", "minvalue", "=", "0", ",", "maxvalue", "=", "1", ",", "description", "...
23.066667
20.533333
def load_recipe(self): '''load will return a loaded in singularity recipe. The idea is that these sections can then be parsed into a Dockerfile, or printed back into their original form. Returns ======= config: a parsed recipe Singularity recipe ''' # Comments between sections, add to top of file lines = self.lines.copy() comments = [] # Start with a fresh config! self.config = dict() section = None name = None while len(lines) > 0: # Clean up white trailing/leading space line = lines.pop(0) stripped = line.strip() # Bootstrap Line if re.search('(b|B)(o|O){2}(t|T)(s|S)(t|T)(r|R)(a|A)(p|P)', line): self._load_bootstrap(stripped) # From Line if re.search('(f|F)(r|R)(O|o)(m|M)', stripped): self._load_from(stripped) # Comment if stripped.startswith("#"): comments.append(stripped) continue # Section elif stripped.startswith('%'): section = self._add_section(stripped) bot.debug("Adding section title %s" %section) # If we have a section, and are adding it elif section is not None: lines = [line] + lines self._load_section(lines=lines, section=section) self.config['comments'] = comments
[ "def", "load_recipe", "(", "self", ")", ":", "# Comments between sections, add to top of file", "lines", "=", "self", ".", "lines", ".", "copy", "(", ")", "comments", "=", "[", "]", "# Start with a fresh config!", "self", ".", "config", "=", "dict", "(", ")", ...
29.960784
19.764706
def p_function_call(p): """ FunctionCall : FuncQName FormalArguments """ # FIXME: This production also matches NodeType() or # processing-instruction("foo"), which are technically NodeTest qname = p[1] p[0] = ast.FunctionCall(qname[0], qname[1], p[2])
[ "def", "p_function_call", "(", "p", ")", ":", "# FIXME: This production also matches NodeType() or", "# processing-instruction(\"foo\"), which are technically NodeTest", "qname", "=", "p", "[", "1", "]", "p", "[", "0", "]", "=", "ast", ".", "FunctionCall", "(", "qname",...
34
12.5
def get_required_columns(self): """Lists names of columns that have required fields.""" required_columns = [] if self.formset_class: empty_form = self.get_formset().empty_form for column in self.columns.values(): field = empty_form.fields.get(column.name) if field and field.required: required_columns.append(column.name) return required_columns
[ "def", "get_required_columns", "(", "self", ")", ":", "required_columns", "=", "[", "]", "if", "self", ".", "formset_class", ":", "empty_form", "=", "self", ".", "get_formset", "(", ")", ".", "empty_form", "for", "column", "in", "self", ".", "columns", "."...
44.4
9.9
def clear_xcom_data(self, session=None): """ Clears all XCom data from the database for the task instance """ session.query(XCom).filter( XCom.dag_id == self.dag_id, XCom.task_id == self.task_id, XCom.execution_date == self.execution_date ).delete() session.commit()
[ "def", "clear_xcom_data", "(", "self", ",", "session", "=", "None", ")", ":", "session", ".", "query", "(", "XCom", ")", ".", "filter", "(", "XCom", ".", "dag_id", "==", "self", ".", "dag_id", ",", "XCom", ".", "task_id", "==", "self", ".", "task_id"...
34.1
8.7
def get_all_attributes(klass_or_instance): """Get all attribute members (attribute, property style method). """ pairs = list() for attr, value in inspect.getmembers( klass_or_instance, lambda x: not inspect.isroutine(x)): if not (attr.startswith("__") or attr.endswith("__")): pairs.append((attr, value)) return pairs
[ "def", "get_all_attributes", "(", "klass_or_instance", ")", ":", "pairs", "=", "list", "(", ")", "for", "attr", ",", "value", "in", "inspect", ".", "getmembers", "(", "klass_or_instance", ",", "lambda", "x", ":", "not", "inspect", ".", "isroutine", "(", "x...
40.111111
11.111111
def from_html(html, url=None, download_date=None): """ Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease to extract the publishing date and title. :param html: :param url: :return: """ extractor = article_extractor.Extractor( ['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor']) title_encoded = ''.encode() if not url: url = '' # if an url was given, we can use that as the filename filename = urllib.parse.quote_plus(url) + '.json' item = NewscrawlerItem() item['spider_response'] = DotMap() item['spider_response'].body = html item['url'] = url item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode() item['html_title'] = title_encoded item['rss_title'] = title_encoded item['local_path'] = None item['filename'] = filename item['download_date'] = download_date item['modified_date'] = None item = extractor.extract(item) tmp_article = ExtractedInformationStorage.extract_relevant_info(item) final_article = ExtractedInformationStorage.convert_to_class(tmp_article) return final_article
[ "def", "from_html", "(", "html", ",", "url", "=", "None", ",", "download_date", "=", "None", ")", ":", "extractor", "=", "article_extractor", ".", "Extractor", "(", "[", "'newspaper_extractor'", ",", "'readability_extractor'", ",", "'date_extractor'", ",", "'lan...
41.657143
21.828571
def identify_vcs(repo_url, guess=False, repo_aliases=REPO_ALIASES): """ Determines the type of repo that `repo_url` represents. :param repo_url: Repo URL of unknown type. :returns: VCS type (git, hg, etc) or raises UnknownVCS exception. """ repo_url = unicode(repo_url) # Do basic alias check vcs = identify_vcs_vs_alias(repo_url, guess=guess, repo_aliases=repo_aliases) if vcs: return vcs # remove prefix and try again no_prefix = ''.join(repo_url.split("//")[1:]) vcs = identify_vcs_vs_alias(no_prefix, guess=guess, repo_aliases=repo_aliases) if vcs: return vcs if guess: if "bitbucket" in repo_url: return "hg" raise UnknownVCS
[ "def", "identify_vcs", "(", "repo_url", ",", "guess", "=", "False", ",", "repo_aliases", "=", "REPO_ALIASES", ")", ":", "repo_url", "=", "unicode", "(", "repo_url", ")", "# Do basic alias check", "vcs", "=", "identify_vcs_vs_alias", "(", "repo_url", ",", "guess"...
29.458333
22.208333
def get_config(self): """ Load user configuration or return default when not found. :rtype: :class:`Configuration` """ if not self._config: namespace = {} if os.path.exists(self.config_path): execfile(self.config_path, namespace) self._config = namespace.get('config') or Configuration() return self._config
[ "def", "get_config", "(", "self", ")", ":", "if", "not", "self", ".", "_config", ":", "namespace", "=", "{", "}", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "config_path", ")", ":", "execfile", "(", "self", ".", "config_path", ",", "...
30.538462
16.538462
def _add_page(self, text): """ Helper function for PDFText, to have the document add a page, and retry adding a large block of text that would otherwise have been to long for the page. """ save_cursor = self.parent.document.page.cursor.copy() save_cursor.x_reset() save_cursor.y_reset() self.parent.document.add_page() self.parent.document.set_cursor(save_cursor) self.parent.document.add_text(text)
[ "def", "_add_page", "(", "self", ",", "text", ")", ":", "save_cursor", "=", "self", ".", "parent", ".", "document", ".", "page", ".", "cursor", ".", "copy", "(", ")", "save_cursor", ".", "x_reset", "(", ")", "save_cursor", ".", "y_reset", "(", ")", "...
38.461538
13.384615
def participate(self, identity, experiment_name, variant): """ Set the variant for a specific user and mark a participation for the experiment. Participation will *only* be marked for visitors who have been verified as humans (to avoid skewing reports with requests from bots and web crawlers). """ self.set_variant(identity, experiment_name, variant) if self.is_verified_human(identity): self.mark_participant(experiment_name, variant)
[ "def", "participate", "(", "self", ",", "identity", ",", "experiment_name", ",", "variant", ")", ":", "self", ".", "set_variant", "(", "identity", ",", "experiment_name", ",", "variant", ")", "if", "self", ".", "is_verified_human", "(", "identity", ")", ":",...
42.5
21.166667
def connect_to_region(region_name, **kw_params): """ Given a valid region name, return a :class:`boto.sns.connection.SNSConnection`. :type: str :param region_name: The name of the region to connect to. :rtype: :class:`boto.sns.connection.SNSConnection` or ``None`` :return: A connection to the given region, or None if an invalid region name is given """ for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None
[ "def", "connect_to_region", "(", "region_name", ",", "*", "*", "kw_params", ")", ":", "for", "region", "in", "regions", "(", ")", ":", "if", "region", ".", "name", "==", "region_name", ":", "return", "region", ".", "connect", "(", "*", "*", "kw_params", ...
32.625
16.125
def aggregate(self, instance, owner): """Given an instance and a class, aggregate together some panglers. Walks every class in the MRO of the `owner` class, including `owner`, collecting panglers exposed as `self.attr_name`. The resulting pangler will be bound to the provided `instance`. """ try: p = self.pangler_factory.from_store(instance, self.id) except KeyError: pass else: return p p = self.pangler_factory(self.id) mro = inspect.getmro(owner) others = [] for cls in mro: sub_p = getattr(cls, self.attr_name, None) if sub_p is None: continue others.append(sub_p) return p.combine(*others).stored_bind(instance)
[ "def", "aggregate", "(", "self", ",", "instance", ",", "owner", ")", ":", "try", ":", "p", "=", "self", ".", "pangler_factory", ".", "from_store", "(", "instance", ",", "self", ".", "id", ")", "except", "KeyError", ":", "pass", "else", ":", "return", ...
32.916667
19
def get_field_type(info): """A field python type""" type_ = info.get_type() cls = get_field_class(type_) field = cls(info, type_, None) field.setup() return field.py_type
[ "def", "get_field_type", "(", "info", ")", ":", "type_", "=", "info", ".", "get_type", "(", ")", "cls", "=", "get_field_class", "(", "type_", ")", "field", "=", "cls", "(", "info", ",", "type_", ",", "None", ")", "field", ".", "setup", "(", ")", "r...
18.8
20.1
def twoslice(field, center=None, size=6.0, cmap='bone_r', vmin=0, vmax=1, orientation='vertical', figpad=1.09, off=0.01): """ Plot two parts of the ortho view, the two sections given by ``orientation``. """ center = center or [i//2 for i in field.shape] slices = [] for i,c in enumerate(center): blank = [np.s_[:]]*len(center) blank[i] = c slices.append(tuple(blank)) z,y,x = [float(i) for i in field.shape] w = float(x + z) h = float(y + z) def show(field, ax, slicer, transpose=False): tmp = field[slicer] if not transpose else field[slicer].T ax.imshow( tmp, cmap=cmap, interpolation='nearest', vmin=vmin, vmax=vmax ) ax.set_xticks([]) ax.set_yticks([]) ax.grid('off') if orientation.startswith('v'): # rect = l,b,w,h log.info('{} {} {} {} {} {}'.format(x, y, z, w, h, x/h)) r = x/h q = y/h f = 1 / (1 + 3*off) fig = pl.figure(figsize=(size*r, size*f)) ax1 = fig.add_axes((off, f*(1-q)+2*off, f, f*q)) ax2 = fig.add_axes((off, off, f, f*(1-q))) show(field, ax1, slices[0]) show(field, ax2, slices[1]) else: # rect = l,b,w,h r = y/w q = x/w f = 1 / (1 + 3*off) fig = pl.figure(figsize=(size*f, size*r)) ax1 = fig.add_axes((off, off, f*q, f)) ax2 = fig.add_axes((2*off+f*q, off, f*(1-q), f)) show(field, ax1, slices[0]) show(field, ax2, slices[2], transpose=True) return fig, ax1, ax2
[ "def", "twoslice", "(", "field", ",", "center", "=", "None", ",", "size", "=", "6.0", ",", "cmap", "=", "'bone_r'", ",", "vmin", "=", "0", ",", "vmax", "=", "1", ",", "orientation", "=", "'vertical'", ",", "figpad", "=", "1.09", ",", "off", "=", ...
30.666667
18.352941
def get_component(self, component_name): """ Looks up a component by its name. Args: component_name: The name of the component to look up. Returns: The component for the provided name or None if there is no such component. """ mapping = self.get_components() return mapping[component_name] if component_name in mapping else None
[ "def", "get_component", "(", "self", ",", "component_name", ")", ":", "mapping", "=", "self", ".", "get_components", "(", ")", "return", "mapping", "[", "component_name", "]", "if", "component_name", "in", "mapping", "else", "None" ]
36.272727
18.272727
def spawn_uwsgi(self, only=None): """Spawns uWSGI process(es) which will use configuration(s) from the module. Returns list of tuples: (configuration_alias, uwsgi_process_id) If only one configuration found current process (uwsgiconf) is replaced with a new one (uWSGI), otherwise a number of new detached processes is spawned. :param str|unicode only: Configuration alias to run from the module. If not set uWSGI will be spawned for every configuration found in the module. :rtype: list """ spawned = [] configs = self.configurations if len(configs) == 1: alias = configs[0].alias UwsgiRunner().spawn(self.fpath, alias, replace=True) spawned.append((alias, os.getpid())) else: for config in configs: # type: Configuration alias = config.alias if only is None or alias == only: pid = UwsgiRunner().spawn(self.fpath, alias) spawned.append((alias, pid)) return spawned
[ "def", "spawn_uwsgi", "(", "self", ",", "only", "=", "None", ")", ":", "spawned", "=", "[", "]", "configs", "=", "self", ".", "configurations", "if", "len", "(", "configs", ")", "==", "1", ":", "alias", "=", "configs", "[", "0", "]", ".", "alias", ...
33.90625
23.625
def _spectral_norm(self): """ spectral normalization """ w = self.params.get('weight').data(self.ctx) w_mat = nd.reshape(w, [w.shape[0], -1]) _u = self.u.data(self.ctx) _v = None for _ in range(POWER_ITERATION): _v = nd.L2Normalization(nd.dot(_u, w_mat)) _u = nd.L2Normalization(nd.dot(_v, w_mat.T)) sigma = nd.sum(nd.dot(_u, w_mat) * _v) if sigma == 0.: sigma = EPSILON with autograd.pause(): self.u.set_data(_u) return w / sigma
[ "def", "_spectral_norm", "(", "self", ")", ":", "w", "=", "self", ".", "params", ".", "get", "(", "'weight'", ")", ".", "data", "(", "self", ".", "ctx", ")", "w_mat", "=", "nd", ".", "reshape", "(", "w", ",", "[", "w", ".", "shape", "[", "0", ...
27.2
18.2
def create_all(cls, list_of_kwargs): """Batch method for creating a list of instances Args: list_of_kwargs(list of dicts): hereA list of dicts where each dict denotes the keyword args that you would pass to the create method separately Examples: >>> Customer.create_all([ ... {'name': 'Vicky', 'age': 34, 'user_id': 1}, ... {'name': 'Ron', 'age': 40, 'user_id': 1, 'gender': 'Male'}]) """ try: return cls.add_all([ cls.new(**kwargs) if kwargs is not None else None for kwargs in list_of_kwargs]) except: cls.session.rollback() raise
[ "def", "create_all", "(", "cls", ",", "list_of_kwargs", ")", ":", "try", ":", "return", "cls", ".", "add_all", "(", "[", "cls", ".", "new", "(", "*", "*", "kwargs", ")", "if", "kwargs", "is", "not", "None", "else", "None", "for", "kwargs", "in", "l...
34.8
22.15
def get_dom(self) -> str: """ Retrieves the current value of the DOM for the step """ if self.is_running: return self.dumps() if self.dom is not None: return self.dom dom = self.dumps() self.dom = dom return dom
[ "def", "get_dom", "(", "self", ")", "->", "str", ":", "if", "self", ".", "is_running", ":", "return", "self", ".", "dumps", "(", ")", "if", "self", ".", "dom", "is", "not", "None", ":", "return", "self", ".", "dom", "dom", "=", "self", ".", "dump...
22.916667
19.333333
def redirect_stderr(self, enabled=True, log_level=logging.ERROR): """ Redirect sys.stderr to file-like object. """ if enabled: if self.__stderr_wrapper: self.__stderr_wrapper.update_log_level(log_level=log_level) else: self.__stderr_wrapper = StdErrWrapper(logger=self, log_level=log_level) self.__stderr_stream = self.__stderr_wrapper else: self.__stderr_stream = _original_stderr # Assign the new stream to sys.stderr sys.stderr = self.__stderr_stream
[ "def", "redirect_stderr", "(", "self", ",", "enabled", "=", "True", ",", "log_level", "=", "logging", ".", "ERROR", ")", ":", "if", "enabled", ":", "if", "self", ".", "__stderr_wrapper", ":", "self", ".", "__stderr_wrapper", ".", "update_log_level", "(", "...
36
18.875
def _get_multiparts(response): """ From this 'multipart/parallel; boundary="874e43d27ec6d83f30f37841bdaf90c7"; charset=utf-8' get this --874e43d27ec6d83f30f37841bdaf90c7 """ boundary = None for part in response.headers.get('Content-Type', '').split(';'): if 'boundary=' in part: boundary = '--{}'.format(part.split('=', 1)[1].strip('\"')) break if not boundary: raise ParseError("Was not able to find the boundary between objects in a multipart response") if response.content is None: return [] response_string = response.content if six.PY3: # Python3 returns bytes, decode for string operations response_string = response_string.decode('latin-1') # help bad responses be more multipart compliant whole_body = response_string.strip('\r\n') no_front_boundary = whole_body.strip(boundary) # The boundary comes with some characters multi_parts = [] for part in no_front_boundary.split(boundary): multi_parts.append(part.strip('\r\n')) return multi_parts
[ "def", "_get_multiparts", "(", "response", ")", ":", "boundary", "=", "None", "for", "part", "in", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ".", "split", "(", "';'", ")", ":", "if", "'boundary='", "in", "part", ":"...
33.714286
22.114286
def _pfp__add_child(self, name, child, stream=None, overwrite=False): """Add a child to the Struct field. If multiple consecutive fields are added with the same name, an implicit array will be created to store all fields of that name. :param str name: The name of the child :param pfp.fields.Field child: The field to add :param bool overwrite: Overwrite existing fields (False) :param pfp.bitwrap.BitwrappedStream stream: unused, but her for compatability with Union._pfp__add_child :returns: The resulting field added """ if not overwrite and self._pfp__is_non_consecutive_duplicate(name, child): return self._pfp__handle_non_consecutive_duplicate(name, child) elif not overwrite and name in self._pfp__children_map: return self._pfp__handle_implicit_array(name, child) else: child._pfp__parent = self self._pfp__children.append(child) child._pfp__name = name self._pfp__children_map[name] = child return child
[ "def", "_pfp__add_child", "(", "self", ",", "name", ",", "child", ",", "stream", "=", "None", ",", "overwrite", "=", "False", ")", ":", "if", "not", "overwrite", "and", "self", ".", "_pfp__is_non_consecutive_duplicate", "(", "name", ",", "child", ")", ":",...
51.095238
20.095238
def layer(op): '''Decorator for composable network layers.''' def layer_decorated(self, *args, **kwargs): # Automatically set a name if not provided. name = kwargs.setdefault('name', self.get_unique_name(op.__name__)) # Figure out the layer inputs. if len(self.terminals) == 0: raise RuntimeError('No input variables found for layer %s.' % name) elif len(self.terminals) == 1: layer_input = self.terminals[0] else: layer_input = list(self.terminals) # Perform the operation and get the output. layer_output = op(self, layer_input, *args, **kwargs) # Add to layer LUT. self.layers[name] = layer_output # This output is now the input for the next layer. self.feed(layer_output) # Return self for chained calls. return self return layer_decorated
[ "def", "layer", "(", "op", ")", ":", "def", "layer_decorated", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Automatically set a name if not provided.", "name", "=", "kwargs", ".", "setdefault", "(", "'name'", ",", "self", ".", "get_...
38.391304
15.608696
def upload_file(self, local_path, remote_path): """ Upload a file from the local filesystem to the remote host :type local_path: str :param local_path: path of local file to upload :type remote_path: str :param remote_path: destination path of upload on remote host """ logger.debug("{0}: uploading {1} to {0}:{2}".format(self.target_address, local_path, remote_path)) try: sftp = paramiko.SFTPClient.from_transport(self.transport()) sftp.put(local_path, remote_path) sftp.close() except SSHException as ex: logger.warn(("{0}: LiME module upload failed with exception:" "{1}".format(self.target_address, ex)))
[ "def", "upload_file", "(", "self", ",", "local_path", ",", "remote_path", ")", ":", "logger", ".", "debug", "(", "\"{0}: uploading {1} to {0}:{2}\"", ".", "format", "(", "self", ".", "target_address", ",", "local_path", ",", "remote_path", ")", ")", "try", ":"...
45.526316
20.263158
def get_all_pipelines(app=''): """Get a list of all the Pipelines in _app_. Args: app (str): Name of Spinnaker Application. Returns: requests.models.Response: Response from Gate containing Pipelines. """ url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app) response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app) pipelines = response.json() LOG.debug('Pipelines:\n%s', pipelines) return pipelines
[ "def", "get_all_pipelines", "(", "app", "=", "''", ")", ":", "url", "=", "'{host}/applications/{app}/pipelineConfigs'", ".", "format", "(", "host", "=", "API_URL", ",", "app", "=", "app", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "veri...
29.368421
26.263158
def _normalize_hparams(hparams): """Normalize a dict keyed by `HParam`s and/or raw strings. Args: hparams: A `dict` whose keys are `HParam` objects and/or strings representing hyperparameter names, and whose values are hyperparameter values. No two keys may have the same name. Returns: A `dict` whose keys are hyperparameter names (as strings) and whose values are the corresponding hyperparameter values. Raises: ValueError: If two entries in `hparams` share the same hyperparameter name. """ result = {} for (k, v) in six.iteritems(hparams): if isinstance(k, HParam): k = k.name if k in result: raise ValueError("multiple values specified for hparam %r" % (k,)) result[k] = v return result
[ "def", "_normalize_hparams", "(", "hparams", ")", ":", "result", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "hparams", ")", ":", "if", "isinstance", "(", "k", ",", "HParam", ")", ":", "k", "=", "k", ".", ...
31.083333
22.375
def reorder_nodes_before_add_or_move(self, pos, newpos, newdepth, target, siblings, oldpath=None, movebranch=False): """ Handles the reordering of nodes and branches when adding/moving nodes. :returns: A tuple containing the old path and the new path. """ if ( (pos == 'last-sibling') or (pos == 'right' and target == target.get_last_sibling()) ): # easy, the last node last = target.get_last_sibling() newpath = last._inc_path() if movebranch: self.stmts.append( self.get_sql_newpath_in_branches(oldpath, newpath)) else: # do the UPDATE dance if newpos is None: siblings = target.get_siblings() siblings = {'left': siblings.filter(path__gte=target.path), 'right': siblings.filter(path__gt=target.path), 'first-sibling': siblings}[pos] basenum = target._get_lastpos_in_path() newpos = {'first-sibling': 1, 'left': basenum, 'right': basenum + 1}[pos] newpath = self.node_cls._get_path(target.path, newdepth, newpos) # If the move is amongst siblings and is to the left and there # are siblings to the right of its new position then to be on # the safe side we temporarily dump it on the end of the list tempnewpath = None if movebranch and len(oldpath) == len(newpath): parentoldpath = self.node_cls._get_basepath( oldpath, int(len(oldpath) / self.node_cls.steplen) - 1 ) parentnewpath = self.node_cls._get_basepath( newpath, newdepth - 1) if ( parentoldpath == parentnewpath and siblings and newpath < oldpath ): last = target.get_last_sibling() basenum = last._get_lastpos_in_path() tempnewpath = self.node_cls._get_path( newpath, newdepth, basenum + 2) self.stmts.append( self.get_sql_newpath_in_branches( oldpath, tempnewpath)) # Optimisation to only move siblings which need moving # (i.e. if we've got holes, allow them to compress) movesiblings = [] priorpath = newpath for node in siblings: # If the path of the node is already greater than the path # of the previous node it doesn't need shifting if node.path > priorpath: break # It does need shifting, so add to the list movesiblings.append(node) # Calculate the path that it would be moved to, as that's # the next "priorpath" priorpath = node._inc_path() movesiblings.reverse() for node in movesiblings: # moving the siblings (and their branches) at the right of the # related position one step to the right sql, vals = self.get_sql_newpath_in_branches( node.path, node._inc_path()) self.stmts.append((sql, vals)) if movebranch: if oldpath.startswith(node.path): # if moving to a parent, update oldpath since we just # increased the path of the entire branch oldpath = vals[0] + oldpath[len(vals[0]):] if target.path.startswith(node.path): # and if we moved the target, update the object # django made for us, since the update won't do it # maybe useful in loops target.path = vals[0] + target.path[len(vals[0]):] if movebranch: # node to move if tempnewpath: self.stmts.append( self.get_sql_newpath_in_branches( tempnewpath, newpath)) else: self.stmts.append( self.get_sql_newpath_in_branches( oldpath, newpath)) return oldpath, newpath
[ "def", "reorder_nodes_before_add_or_move", "(", "self", ",", "pos", ",", "newpos", ",", "newdepth", ",", "target", ",", "siblings", ",", "oldpath", "=", "None", ",", "movebranch", "=", "False", ")", ":", "if", "(", "(", "pos", "==", "'last-sibling'", ")", ...
44.539216
18.147059
def _get_base(server_certificate, **conn): """Fetch the base IAM Server Certificate.""" server_certificate['_version'] = 1 # Get the initial cert details: cert_details = get_server_certificate_api(server_certificate['ServerCertificateName'], **conn) if cert_details: server_certificate.update(cert_details['ServerCertificateMetadata']) server_certificate['CertificateBody'] = cert_details['CertificateBody'] server_certificate['CertificateChain'] = cert_details.get('CertificateChain', None) # Cast dates from a datetime to something JSON serializable. server_certificate['UploadDate'] = get_iso_string(server_certificate['UploadDate']) server_certificate['Expiration'] = get_iso_string(server_certificate['Expiration']) return server_certificate
[ "def", "_get_base", "(", "server_certificate", ",", "*", "*", "conn", ")", ":", "server_certificate", "[", "'_version'", "]", "=", "1", "# Get the initial cert details:", "cert_details", "=", "get_server_certificate_api", "(", "server_certificate", "[", "'ServerCertific...
47.411765
30.235294
def _getScalesDiag(self,termx=0): """ Internal function for parameter initialization Uses 2 term single trait model to get covar params for initialization Args: termx: non-noise term terms that is used for initialization """ assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models' assert self.noisPos is not None, 'VarianceDecomposition:: noise term has to be set' assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1' assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization' assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization' scales = [] res = self._getH2singleTrait(self.vd.getTerm(termx).getK()) scaleg = sp.sqrt(res['varg'].mean()) scalen = sp.sqrt(res['varn'].mean()) for term_i in range(self.n_randEffs): if term_i==termx: _scales = scaleg*self.diag[term_i] elif term_i==self.noisPos: _scales = scalen*self.diag[term_i] else: _scales = 0.*self.diag[term_i] if self.jitter[term_i]>0: _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])]))) scales.append(_scales) return sp.concatenate(scales)
[ "def", "_getScalesDiag", "(", "self", ",", "termx", "=", "0", ")", ":", "assert", "self", ".", "P", ">", "1", ",", "'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'", "assert", "self", ".", "noisPos", "is", "not", "None", ",", "'V...
55.357143
27.642857
def set_centralized_assembled_values(self, a): """Set assembled matrix values on processor 0.""" if self.myid != 0: return assert a.size == self.id.nz self._refs.update(a=a) self.id.a = self.cast_array(a)
[ "def", "set_centralized_assembled_values", "(", "self", ",", "a", ")", ":", "if", "self", ".", "myid", "!=", "0", ":", "return", "assert", "a", ".", "size", "==", "self", ".", "id", ".", "nz", "self", ".", "_refs", ".", "update", "(", "a", "=", "a"...
35.714286
8.428571
async def peers(self): """Returns the current Raft peer set Returns: Collection: addresses of peers This endpoint retrieves the Raft peers for the datacenter in which the agent is running. It returns a collection of addresses, such as:: [ "10.1.10.12:8300", "10.1.10.11:8300", "10.1.10.10:8300" ] This list of peers is strongly consistent and can be useful in determining when a given server has successfully joined the cluster. """ response = await self._api.get("/v1/status/peers") if response.status == 200: return set(response.body)
[ "async", "def", "peers", "(", "self", ")", ":", "response", "=", "await", "self", ".", "_api", ".", "get", "(", "\"/v1/status/peers\"", ")", "if", "response", ".", "status", "==", "200", ":", "return", "set", "(", "response", ".", "body", ")" ]
32.619048
21.142857
def update_group(self, group_id, group, force=False, minimal=True): """Update a group. Applies writable settings in `group` to `group_id` Note: this method can not be used to rename groups. :param str group_id: target group ID :param group: group settings :type group: :class:`marathon.models.group.MarathonGroup` :param bool force: apply even if a deployment is in progress :param bool minimal: ignore nulls and empty collections :returns: a dict containing the deployment id and version :rtype: dict """ # Changes won't take if version is set - blank it for convenience group.version = None params = {'force': force} data = group.to_json(minimal=minimal) response = self._do_request( 'PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=data, params=params) return response.json()
[ "def", "update_group", "(", "self", ",", "group_id", ",", "group", ",", "force", "=", "False", ",", "minimal", "=", "True", ")", ":", "# Changes won't take if version is set - blank it for convenience", "group", ".", "version", "=", "None", "params", "=", "{", "...
38.416667
21.625
def dump_model_data(request, app_label, model_label): """Exports data from a model. """ return dump_to_response(request, '%s.%s' % (app_label, model_label), [], '-'.join((app_label, model_label)))
[ "def", "dump_model_data", "(", "request", ",", "app_label", ",", "model_label", ")", ":", "return", "dump_to_response", "(", "request", ",", "'%s.%s'", "%", "(", "app_label", ",", "model_label", ")", ",", "[", "]", ",", "'-'", ".", "join", "(", "(", "app...
46.4
14.4
def fit(self, X, y): """ Fit the model using X, y as training data. Parameters ---------- X : {array-like, sparse matrix} of shape [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape [n_samples, n_outputs] Target values (class labels in classification, real numbers in regression) Returns ------- self : object Returns an instance of self. """ rhl = self._create_random_layer() self._genelm_regressor = GenELMRegressor(hidden_layer=rhl, regressor=self.regressor) self._genelm_regressor.fit(X, y) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "rhl", "=", "self", ".", "_create_random_layer", "(", ")", "self", ".", "_genelm_regressor", "=", "GenELMRegressor", "(", "hidden_layer", "=", "rhl", ",", "regressor", "=", "self", ".", "regressor"...
32.2
21
def remove_variable(self, variable, *, implied=True, verbose=True): """Remove variable from data. Parameters ---------- variable : int or str Variable index or name to remove. implied : boolean (optional) Toggle deletion of other variables that start with the same name. Default is True. verbose : boolean (optional) Toggle talkback. Default is True. """ if isinstance(variable, int): variable = self.variable_names[variable] # find all of the implied variables removed = [] if implied: for n in self.variable_names: if n.startswith(variable): removed.append(n) else: removed = [variable] # check that axes will not be ruined for n in removed: for a in self._axes: if n in [v.natural_name for v in a.variables]: message = "{0} is contained in axis {1}".format(n, a.expression) raise RuntimeError(message) for c in self._constants: if n in [v.natural_name for v in c.variables]: warnings.warn( "Variable being removed used in a constant", wt_exceptions.WrightToolsWarning, ) # do removal for n in removed: variable_index = wt_kit.get_index(self.variable_names, n) new = list(self.variable_names) name = new.pop(variable_index) del self[name] self.variable_names = new self._variables = None # finish if verbose: print("{0} variable(s) removed:".format(len(removed))) for n in removed: print(" {0}".format(n))
[ "def", "remove_variable", "(", "self", ",", "variable", ",", "*", ",", "implied", "=", "True", ",", "verbose", "=", "True", ")", ":", "if", "isinstance", "(", "variable", ",", "int", ")", ":", "variable", "=", "self", ".", "variable_names", "[", "varia...
37.183673
14
def convert_mask_to_pil(mask, real=True): """Convert Mask to PIL Image.""" from PIL import Image header = mask._layer._psd._record.header channel_ids = [ci.id for ci in mask._layer._record.channel_info] if real and mask._has_real(): width = mask._data.real_right - mask._data.real_left height = mask._data.real_bottom - mask._data.real_top channel = mask._layer._channels[ channel_ids.index(ChannelID.REAL_USER_LAYER_MASK) ] else: width = mask._data.right - mask._data.left height = mask._data.bottom - mask._data.top channel = mask._layer._channels[ channel_ids.index(ChannelID.USER_LAYER_MASK) ] data = channel.get_data(width, height, header.depth, header.version) return _create_channel((width, height), data, header.depth)
[ "def", "convert_mask_to_pil", "(", "mask", ",", "real", "=", "True", ")", ":", "from", "PIL", "import", "Image", "header", "=", "mask", ".", "_layer", ".", "_psd", ".", "_record", ".", "header", "channel_ids", "=", "[", "ci", ".", "id", "for", "ci", ...
43.578947
15.894737
def ilx_conv(graph, prefix, ilx_start): """ convert a set of temporary identifiers to ilx and modify the graph in place """ to_sub = set() for subject in graph.subjects(rdflib.RDF.type, rdflib.OWL.Class): if PREFIXES[prefix] in subject: to_sub.add(subject) ilx_base = 'ilx_{:0>7}' ILX_base = 'ILX:{:0>7}' # ah rdflib/owlapi, you infuriate me ilx_labels = {} replace = {} for sub in sorted(to_sub): ilx_format = ilx_base.format(ilx_start) ILX_format = ILX_base.format(ilx_start) ilx_start += 1 prefix, url, suffix = graph.namespace_manager.compute_qname(sub) curie = prefix + ':' + suffix replace[curie] = ILX_format label = [_ for _ in graph.objects(sub, rdflib.RDFS.label)][0] ilx_labels[ilx_format] = label new_sub = expand('ilx:' + ilx_format) for p, o in graph.predicate_objects(sub): graph.remove((sub, p, o)) graph.add((new_sub, p, o)) for s, p in graph.subject_predicates(sub): graph.remove((s, p, sub)) graph.add((s, p, new_sub)) return ilx_labels, replace
[ "def", "ilx_conv", "(", "graph", ",", "prefix", ",", "ilx_start", ")", ":", "to_sub", "=", "set", "(", ")", "for", "subject", "in", "graph", ".", "subjects", "(", "rdflib", ".", "RDF", ".", "type", ",", "rdflib", ".", "OWL", ".", "Class", ")", ":",...
35.34375
15.6875
def fso_makedirs(self, path, mode=None): 'overlays os.makedirs()' path = self.abs(path) cur = '/' segments = path.split('/') for idx, seg in enumerate(segments): cur = os.path.join(cur, seg) try: st = self.fso_stat(cur) except OSError: st = None if st is None: self.fso_mkdir(cur) continue if idx + 1 == len(segments): raise OSError(17, 'File exists', path) if not stat.S_ISDIR(st.st_mode): raise OSError(20, 'Not a directory', path)
[ "def", "fso_makedirs", "(", "self", ",", "path", ",", "mode", "=", "None", ")", ":", "path", "=", "self", ".", "abs", "(", "path", ")", "cur", "=", "'/'", "segments", "=", "path", ".", "split", "(", "'/'", ")", "for", "idx", ",", "seg", "in", "...
28.888889
12.888889
def sanitize_tx_data(unspents, outputs, fee, leftover, combine=True, message=None, compressed=True, absolute_fee=False, min_change=0, version='main'): """ sanitize_tx_data() fee is in satoshis per byte. """ outputs = outputs.copy() for i, output in enumerate(outputs): dest, amount, currency = output outputs[i] = (dest, currency_to_satoshi_cached(amount, currency)) if not unspents: raise ValueError('Transactions must have at least one unspent.') # Temporary storage so all outputs precede messages. messages = [] if message: message_chunks = chunk_data(message.encode('utf-8'), MESSAGE_LIMIT) for message in message_chunks: messages.append((message, 0)) # Include return address in output count. # Calculate output size as a list (including return address). output_size = [len(address_to_scriptpubkey(o[0])) + 9 for o in outputs] output_size.append(len(messages) * (MESSAGE_LIMIT + 9)) output_size.append(len(address_to_scriptpubkey(leftover)) + 9) sum_outputs = sum(out[1] for out in outputs) # Use Branch-and-Bound for coin selection: unspents[:], remaining = select_coins( sum_outputs, fee, output_size, min_change=min_change, absolute_fee=absolute_fee, consolidate=combine, unspents=unspents ) if remaining > 0: outputs.append((leftover, remaining)) # Sanity check: If spending from main-/testnet, then all output addresses must also be for main-/testnet. for output in outputs: dest, amount = output vs = get_version(dest) if vs and vs != version: raise ValueError('Cannot send to ' + vs + 'net address when ' 'spending from a ' + version + 'net address.') outputs.extend(messages) return unspents, outputs
[ "def", "sanitize_tx_data", "(", "unspents", ",", "outputs", ",", "fee", ",", "leftover", ",", "combine", "=", "True", ",", "message", "=", "None", ",", "compressed", "=", "True", ",", "absolute_fee", "=", "False", ",", "min_change", "=", "0", ",", "versi...
34.314815
23.314815
def _get_bmdl_ratio(self, models): """Return BMDL ratio in list of models.""" bmdls = [model.output["BMDL"] for model in models if model.output["BMDL"] > 0] return max(bmdls) / min(bmdls) if len(bmdls) > 0 else 0
[ "def", "_get_bmdl_ratio", "(", "self", ",", "models", ")", ":", "bmdls", "=", "[", "model", ".", "output", "[", "\"BMDL\"", "]", "for", "model", "in", "models", "if", "model", ".", "output", "[", "\"BMDL\"", "]", ">", "0", "]", "return", "max", "(", ...
46.6
23
def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x
[ "def", "_convertEntities", "(", "self", ",", "match", ")", ":", "x", "=", "match", ".", "group", "(", "1", ")", "if", "self", ".", "convertHTMLEntities", "and", "x", "in", "name2codepoint", ":", "return", "unichr", "(", "name2codepoint", "[", "x", "]", ...
38.791667
11.708333
def spec(self): """Return a SourceSpec to describe this source""" from ambry_sources.sources import SourceSpec d = self.dict d['url'] = self.url # Will get the URL twice; once as ref and once as URL, but the ref is ignored return SourceSpec(**d)
[ "def", "spec", "(", "self", ")", ":", "from", "ambry_sources", ".", "sources", "import", "SourceSpec", "d", "=", "self", ".", "dict", "d", "[", "'url'", "]", "=", "self", ".", "url", "# Will get the URL twice; once as ref and once as URL, but the ref is ignored", ...
28.7
24.4
def _extended_gcd(self, a, b): """ Extended Euclidean algorithms to solve Bezout's identity: a*x + b*y = gcd(x, y) Finds one particular solution for x, y: s, t Returns: gcd, s, t """ s, old_s = 0, 1 t, old_t = 1, 0 r, old_r = b, a while r: quotient = old_r // r old_r, r = r, old_r - quotient * r old_s, s = s, old_s - quotient * s old_t, t = t, old_t - quotient * t return old_r, old_s, old_t
[ "def", "_extended_gcd", "(", "self", ",", "a", ",", "b", ")", ":", "s", ",", "old_s", "=", "0", ",", "1", "t", ",", "old_t", "=", "1", ",", "0", "r", ",", "old_r", "=", "b", ",", "a", "while", "r", ":", "quotient", "=", "old_r", "//", "r", ...
32.3125
10.9375
async def connect(self, timeout=None): """ Connects to the Lavalink player event websocket. Parameters ---------- timeout : int Time after which to timeout on attempting to connect to the Lavalink websocket, ``None`` is considered never, but the underlying code may stop trying past a certain point. Raises ------ asyncio.TimeoutError If the websocket failed to connect after the given time. """ self._is_shutdown = False combo_uri = "ws://{}:{}".format(self.host, self.rest) uri = "ws://{}:{}".format(self.host, self.port) log.debug( "Lavalink WS connecting to %s or %s with headers %s", combo_uri, uri, self.headers ) tasks = tuple({self._multi_try_connect(u) for u in (combo_uri, uri)}) for task in asyncio.as_completed(tasks, timeout=timeout): with contextlib.suppress(Exception): if await cast(Awaitable[Optional[websockets.WebSocketClientProtocol]], task): break else: raise asyncio.TimeoutError log.debug("Creating Lavalink WS listener.") self._listener_task = self.loop.create_task(self.listener()) for data in self._queue: await self.send(data) self.ready.set() self.update_state(NodeState.READY)
[ "async", "def", "connect", "(", "self", ",", "timeout", "=", "None", ")", ":", "self", ".", "_is_shutdown", "=", "False", "combo_uri", "=", "\"ws://{}:{}\"", ".", "format", "(", "self", ".", "host", ",", "self", ".", "rest", ")", "uri", "=", "\"ws://{}...
32.880952
24.642857
def apply_link_ref(offset: int, length: int, value: bytes, bytecode: bytes) -> bytes: """ Returns the new bytecode with `value` put into the location indicated by `offset` and `length`. """ try: validate_empty_bytes(offset, length, bytecode) except ValidationError: raise BytecodeLinkingError("Link references cannot be applied to bytecode") new_bytes = ( # Ignore linting error b/c conflict b/w black & flake8 bytecode[:offset] + value + bytecode[offset + length :] # noqa: E201, E203 ) return new_bytes
[ "def", "apply_link_ref", "(", "offset", ":", "int", ",", "length", ":", "int", ",", "value", ":", "bytes", ",", "bytecode", ":", "bytes", ")", "->", "bytes", ":", "try", ":", "validate_empty_bytes", "(", "offset", ",", "length", ",", "bytecode", ")", "...
35.6875
25.1875
def get_document( self, name, mask=None, transaction=None, read_time=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a single document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> response = client.get_document(name) Args: name (str): The resource name of the Document to get. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If the document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads the document in a transaction. read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads the version of the document at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.Document` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_document" not in self._inner_api_calls: self._inner_api_calls[ "get_document" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_document, default_retry=self._method_configs["GetDocument"].retry, default_timeout=self._method_configs["GetDocument"].timeout, client_info=self._client_info, ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( transaction=transaction, read_time=read_time ) request = firestore_pb2.GetDocumentRequest( name=name, mask=mask, transaction=transaction, read_time=read_time ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["get_document"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "get_document", "(", "self", ",", "name", ",", "mask", "=", "None", ",", "transaction", "=", "None", ",", "read_time", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "="...
43.225806
26.989247
def reset(self, force): """Connect to the assigned bucket or create if needed. Clear all the blobs inside.""" client = self.create_client() bucket = client.lookup_bucket(self.bucket_name) if bucket is not None: if not force: self._log.error("Bucket already exists, aborting.") raise ExistingBackendError self._log.info("Bucket already exists, deleting all content.") for blob in bucket.list_blobs(): self._log.info("Deleting %s ..." % blob.name) bucket.delete_blob(blob.name) else: client.create_bucket(self.bucket_name)
[ "def", "reset", "(", "self", ",", "force", ")", ":", "client", "=", "self", ".", "create_client", "(", ")", "bucket", "=", "client", ".", "lookup_bucket", "(", "self", ".", "bucket_name", ")", "if", "bucket", "is", "not", "None", ":", "if", "not", "f...
47.071429
13.571429
def remove_non_ascii(input_string): """Remove non-ascii characters Source: http://stackoverflow.com/a/1342373 """ no_ascii = "".join(i for i in input_string if ord(i) < 128) return no_ascii
[ "def", "remove_non_ascii", "(", "input_string", ")", ":", "no_ascii", "=", "\"\"", ".", "join", "(", "i", "for", "i", "in", "input_string", "if", "ord", "(", "i", ")", "<", "128", ")", "return", "no_ascii" ]
34
9.166667
def remove(self, layers): """ Remove one or more layers from the stack of masking layers. Args: layers: An int, string or list of strings and/or ints. Ints are interpreted as indices in the stack to remove; strings are interpreted as names of layers to remove. Negative ints will also work--i.e., remove(-1) will drop the last layer added. """ if not isinstance(layers, list): layers = [layers] for l in layers: if isinstance(l, string_types): if l not in self.layers: raise ValueError("There's no image/layer named '%s' in " "the masking stack!" % l) self.stack.remove(l) else: l = self.stack.pop(l) del self.layers[l] self.set_mask()
[ "def", "remove", "(", "self", ",", "layers", ")", ":", "if", "not", "isinstance", "(", "layers", ",", "list", ")", ":", "layers", "=", "[", "layers", "]", "for", "l", "in", "layers", ":", "if", "isinstance", "(", "l", ",", "string_types", ")", ":",...
41.761905
17.47619
def simplify_dicts( index, shared, simplify_dicts=SIMPLIFY_DICTS, always_compress=ALWAYS_COMPRESS_DICTS ): """Eliminate "noise" dictionary records from the index index -- overall index of objects (including metadata such as type records) shared -- parent-count mapping for records in index module/type/class dictionaries """ # things which will have their dictionaries compressed out to_delete = set() for to_simplify in iterindex(index): if to_simplify['address'] in to_delete: continue if to_simplify['type'] in simplify_dicts and not 'compressed' in to_simplify: refs = to_simplify['refs'] for ref in refs: child = index.get( ref ) if child is not None and child['type'] == 'dict': child_referrers = child['parents'][:] if len(child_referrers) == 1 or to_simplify['type'] in always_compress: to_simplify['compressed'] = True to_simplify['refs'] = child['refs'] to_simplify['size'] += child['size'] # rewrite anything *else* that was pointing to child to point to us... while to_simplify['address'] in child_referrers: child_referrers.remove( to_simplify['address'] ) if child_referrers: rewrite_refs( child_referrers, child['address'], to_simplify['address'], index, single_ref=True ) # now rewrite grandchildren to point to root obj instead of dict for grandchild in child['refs']: grandchild = index[grandchild] parent_set = grandchild['parents'] if parent_set: rewrite_references( parent_set, child, to_simplify, single_ref = True, ) assert parent_set to_delete.add( child['address'] ) for item in to_delete: del index[item] del shared[item] return index
[ "def", "simplify_dicts", "(", "index", ",", "shared", ",", "simplify_dicts", "=", "SIMPLIFY_DICTS", ",", "always_compress", "=", "ALWAYS_COMPRESS_DICTS", ")", ":", "# things which will have their dictionaries compressed out", "to_delete", "=", "set", "(", ")", "for", "t...
44.631579
19.298246
def is_unused(input, model_file=None, model_proto=None, name=None): """Returns true if input id is unused piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of bool with the same shape as input. """ return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type( input, model_file=model_file, model_proto=model_proto, name=name, piece_type=2)
[ "def", "is_unused", "(", "input", ",", "model_file", "=", "None", ",", "model_proto", "=", "None", ",", "name", "=", "None", ")", ":", "return", "_gen_sentencepiece_processor_op", ".", "sentencepiece_get_piece_type", "(", "input", ",", "model_file", "=", "model_...
38.8125
21.125
def prep_search_string(cls, search_string, match_substrings): """Prepares search string as a proper whoosh search string. :param search_string: The search string which should be prepared. :param match_substrings: ``True`` if you want to match substrings, ``False`` otherwise. """ if sys.version < '3' and not isinstance(search_string, unicode): search_string = search_string.decode('utf-8') s = search_string.strip() # we don't want stars from user s = s.replace('*', '') if len(s) < _get_config(cls)['search_string_min_len']: raise ValueError('Search string must have at least 3 characters') # replace multiple with star space star if match_substrings: s = u'*{0}*'.format(re.sub('[\s]+', '* *', s)) # TODO: some sanitization return s
[ "def", "prep_search_string", "(", "cls", ",", "search_string", ",", "match_substrings", ")", ":", "if", "sys", ".", "version", "<", "'3'", "and", "not", "isinstance", "(", "search_string", ",", "unicode", ")", ":", "search_string", "=", "search_string", ".", ...
46.894737
17.631579
def connect(self): """Connects the client object to redis. It's safe to use this method even if you are already connected. Note: this method is useless with autoconnect mode (default). Returns: a Future object with True as result if the connection was ok. """ if self.is_connected(): raise tornado.gen.Return(True) cb1 = self._read_callback cb2 = self._close_callback self.__callback_queue = collections.deque() self._reply_list = [] self.__reader = hiredis.Reader(replyError=ClientError) kwargs = self.connection_kwargs self.__connection = Connection(cb1, cb2, **kwargs) connection_status = yield self.__connection.connect() if connection_status is not True: # nothing left to do here, return raise tornado.gen.Return(False) if self.password is not None: authentication_status = yield self._call('AUTH', self.password) if authentication_status != b'OK': # incorrect password, return back the result LOG.warning("impossible to connect: bad password") self.__connection.disconnect() raise tornado.gen.Return(False) if self.db != 0: db_status = yield self._call('SELECT', self.db) if db_status != b'OK': LOG.warning("can't select db %s", self.db) raise tornado.gen.Return(False) raise tornado.gen.Return(True)
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "is_connected", "(", ")", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "True", ")", "cb1", "=", "self", ".", "_read_callback", "cb2", "=", "self", ".", "_close_callback", "self", ...
43.171429
14.485714
def getAllSensors(self): """ Retrieve all the user's own sensors by iterating over the SensorsGet function @return (list) - Array of sensors """ j = 0 sensors = [] parameters = {'page':0, 'per_page':1000, 'owned':1} while True: parameters['page'] = j if self.SensorsGet(parameters): s = json.loads(self.getResponse())['sensors'] sensors.extend(s) else: # if any of the calls fails, we cannot be cannot be sure about the sensors in CommonSense return None if len(s) < 1000: break j += 1 return sensors
[ "def", "getAllSensors", "(", "self", ")", ":", "j", "=", "0", "sensors", "=", "[", "]", "parameters", "=", "{", "'page'", ":", "0", ",", "'per_page'", ":", "1000", ",", "'owned'", ":", "1", "}", "while", "True", ":", "parameters", "[", "'page'", "]...
30.666667
21
def asdict(self, rawkey=False): r"""Convert Result to dict. Parameters: rawkey(bool): * True: dict key is Descriptor instance * False: dict key is str Returns: dict """ if rawkey: return dict(self.items()) else: return { str(k): v for k, v in self.items() }
[ "def", "asdict", "(", "self", ",", "rawkey", "=", "False", ")", ":", "if", "rawkey", ":", "return", "dict", "(", "self", ".", "items", "(", ")", ")", "else", ":", "return", "{", "str", "(", "k", ")", ":", "v", "for", "k", ",", "v", "in", "sel...
21.789474
18
def unsign(self, token): """ Extract the data from a signed ``token``. """ if self.max_age is None: data = self.signer.unsign(token) else: data = self.signer.unsign(token, max_age=self.max_age) return signing.b64_decode(data.encode())
[ "def", "unsign", "(", "self", ",", "token", ")", ":", "if", "self", ".", "max_age", "is", "None", ":", "data", "=", "self", ".", "signer", ".", "unsign", "(", "token", ")", "else", ":", "data", "=", "self", ".", "signer", ".", "unsign", "(", "tok...
29.8
13.8
def dragLeaveEvent(self, event): """Clears drop cursor line""" super(AbstractDragView, self).dragLeaveEvent(event) self.dragline = None self.viewport().update() event.accept()
[ "def", "dragLeaveEvent", "(", "self", ",", "event", ")", ":", "super", "(", "AbstractDragView", ",", "self", ")", ".", "dragLeaveEvent", "(", "event", ")", "self", ".", "dragline", "=", "None", "self", ".", "viewport", "(", ")", ".", "update", "(", ")"...
35
10.833333
def fill_form_field(self, field_name, field_value): """Fills given field with given value :param field_name: name of field to fill :param field_value: value with which to fill field """ self.browser.execute_script( "document.getElementsByName(\"" + str( field_name) + "\")[0].value = \"" + str(field_value) + "\"")
[ "def", "fill_form_field", "(", "self", ",", "field_name", ",", "field_value", ")", ":", "self", ".", "browser", ".", "execute_script", "(", "\"document.getElementsByName(\\\"\"", "+", "str", "(", "field_name", ")", "+", "\"\\\")[0].value = \\\"\"", "+", "str", "("...
41.666667
14.111111
def request_log_level(self, req, msg): """Query or set the current logging level. Parameters ---------- level : {'all', 'trace', 'debug', 'info', 'warn', 'error', 'fatal', \ 'off'}, optional Name of the logging level to set the device server to (the default is to leave the log level unchanged). Returns ------- success : {'ok', 'fail'} Whether the request succeeded. level : {'all', 'trace', 'debug', 'info', 'warn', 'error', 'fatal', \ 'off'} The log level after processing the request. Examples -------- :: ?log-level !log-level ok warn ?log-level info !log-level ok info """ if msg.arguments: try: self.log.set_log_level_by_name(msg.arguments[0]) except ValueError, e: raise FailReply(str(e)) return req.make_reply("ok", self.log.level_name())
[ "def", "request_log_level", "(", "self", ",", "req", ",", "msg", ")", ":", "if", "msg", ".", "arguments", ":", "try", ":", "self", ".", "log", ".", "set_log_level_by_name", "(", "msg", ".", "arguments", "[", "0", "]", ")", "except", "ValueError", ",", ...
29.057143
20.685714
def cdpop(): """ Return the last directory. Returns absolute path to new working directory. """ if len(_cdhist) >= 1: old = _cdhist.pop() # Pop from history. os.chdir(old) return old else: return pwd()
[ "def", "cdpop", "(", ")", ":", "if", "len", "(", "_cdhist", ")", ">=", "1", ":", "old", "=", "_cdhist", ".", "pop", "(", ")", "# Pop from history.", "os", ".", "chdir", "(", "old", ")", "return", "old", "else", ":", "return", "pwd", "(", ")" ]
20.666667
17
def has(cmd): """Returns true if the give shell command is available. **Examples**: :: auxly.shell.has("ls") # True """ helps = ["--help", "-h", "--version"] if "nt" == os.name: helps.insert(0, "/?") fakecmd = "fakecmd" cmderr = strerr(fakecmd).replace(fakecmd, cmd) for h in helps: hcmd = "%s %s" % (cmd, h) if 0 == silent(hcmd): return True if len(listout(hcmd)) > 0: return True if strerr(hcmd) != cmderr: return True return False
[ "def", "has", "(", "cmd", ")", ":", "helps", "=", "[", "\"--help\"", ",", "\"-h\"", ",", "\"--version\"", "]", "if", "\"nt\"", "==", "os", ".", "name", ":", "helps", ".", "insert", "(", "0", ",", "\"/?\"", ")", "fakecmd", "=", "\"fakecmd\"", "cmderr"...
25.666667
14.714286
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ spaces = ' ' * indent pretty = spaces + 'class ' + self.name if self.superclasses: superclasses = ', '.join(self.superclasses) pretty += '(' + superclasses + ')' pretty += ':\n' if self.members: pretty += '\n\n'.join( c.pretty_str(indent + 2) for c in self.members ) else: pretty += spaces + ' [declaration]' return pretty
[ "def", "pretty_str", "(", "self", ",", "indent", "=", "0", ")", ":", "spaces", "=", "' '", "*", "indent", "pretty", "=", "spaces", "+", "'class '", "+", "self", ".", "name", "if", "self", ".", "superclasses", ":", "superclasses", "=", "', '", ".", "j...
33.2
13.85
def serialize_text_node(self, elt, sw, pyobj): '''Serialize without an element node. ''' self.set_prefix(elt, pyobj) return String.serialize_text_node(self, elt, sw, pyobj)
[ "def", "serialize_text_node", "(", "self", ",", "elt", ",", "sw", ",", "pyobj", ")", ":", "self", ".", "set_prefix", "(", "elt", ",", "pyobj", ")", "return", "String", ".", "serialize_text_node", "(", "self", ",", "elt", ",", "sw", ",", "pyobj", ")" ]
40
13.6
def extract_smiles(s): """Return a list of SMILES identifiers extracted from the string.""" # TODO: This still gets a lot of false positives. smiles = [] for t in s.split(): if len(t) > 2 and SMILES_RE.match(t) and not t.endswith('.') and bracket_level(t) == 0: smiles.append(t) return smiles
[ "def", "extract_smiles", "(", "s", ")", ":", "# TODO: This still gets a lot of false positives.", "smiles", "=", "[", "]", "for", "t", "in", "s", ".", "split", "(", ")", ":", "if", "len", "(", "t", ")", ">", "2", "and", "SMILES_RE", ".", "match", "(", ...
40.625
20.375
def parse_gmsh(filename, boundary_file): """ Parse a GMSH .msh file and return a dictionary containing the data neccessary to create CRTomo grids """ mesh = {} fid = open(filename, 'r') line = fid.readline() while(line): if(line.startswith('$MeshFormat')): pass elif(line.startswith('$Nodes')): nodes = [] line = fid.readline() nr_nodes = np.fromstring(line, dtype=int, count=1, sep=r'\n') nr_nodes while(line): line = fid.readline() if(line.startswith('$EndNodes')): break node = np.fromstring(line, dtype=float, sep=' ') nodes.append(node) mesh['nodes'] = nodes elif(line.startswith('$Elements')): """ Create a dictionary with the element types as keys. E.g.: elements['15'] provides all elements of type 15 (Points) """ elements = {} line = fid.readline() nr_elements = np.fromstring(line, dtype=int, count=1, sep=r'\n') nr_elements while(line): line = fid.readline() if(line.startswith('$EndElements')): break element = np.fromstring(line, dtype=int, sep=' ') # el_nr = element[0] el_type = element[1] el_nr_tags = element[2] # el_tags = element[3:3 + el_nr_tags] el_nodes = element[3 + el_nr_tags:] # now decide where to put it key = str(el_type) if(key in elements.keys()): elements[key].append(el_nodes) else: elements[key] = [] elements[key].append(el_nodes) mesh['elements'] = elements line = fid.readline() fid.close() # if boundary_file is != None, then sort the lines (element type 1) # according to the element types boundaries = {} if(boundary_file is not None): # load the original boundary lines # it is possible that GMSH added additional nodes on these lines, and # that is why we need to find all mesh lines that lie on these original # lines. bids = np.loadtxt(boundary_file) for btype in ('12', '11'): # select all original boundaries with this type a = np.where(bids[:, 4] == int(btype))[0] boundaries[btype] = [] # for each of those lines, find all lines of the mesh that belong # here for orig_line in bids[a, :]: # print('Find all lines lying on the line: ') found_one_line = False # print(orig_line) # construct line equation # x1 == x2 ? # split into coordinates ox1 = orig_line[0] ox2 = orig_line[2] oy1 = orig_line[1] oy2 = orig_line[3] if(orig_line[0] == orig_line[2]): # special case: we only need to find all lines with x1 == # x2 == x1_orig and y_min >= y_orig_min and y_max <= # <_orig_max for line in elements['1']: if(btype == '11'): if(line[0] == 48 and line[1] == 150): pass # print('Find all lines lying on the line: ') # print('This is the line') # it doesn't matter any more to be able to assign x -> # y values. Thus we can sort the y values and just # check # if the new line lies in between the original one oy1, oy2 = np.sort([orig_line[1], orig_line[3]]) x1, x2 = np.sort( [ mesh['nodes'][line[0] - 1][1], mesh['nodes'][line[1] - 1][1] ] ) y1, y2 = np.sort( [ mesh['nodes'][line[0] - 1][2], mesh['nodes'][line[1] - 1][2] ] ) if np.isclose(x1, x2) and np.isclose(x2, ox1): if(y1 >= oy1 and y2 <= oy2): found_one_line = True boundaries[btype].append(line) else: # print('checking with full line equation') # no vertical line # we need the full check using the line equation slope = (orig_line[1] - orig_line[3]) / ( orig_line[0] - orig_line[2]) intersect = orig_line[1] - (slope * orig_line[0]) # print('Slope', slope, ' Intercept ', intersect) for line in elements['1']: x1 = mesh['nodes'][line[0] - 1][1] y1 = mesh['nodes'][line[0] - 1][2] x2 = mesh['nodes'][line[1] - 1][1] y2 = mesh['nodes'][line[1] - 1][2] # print(x1, x2, y1, y1) check = False # check if x coordinates of the test line fit in the # original line if(ox1 < ox2): if(x1 < x2): if((np.isclose(x1, ox1) or x1 > ox1) and (np.isclose(x2, ox2) or x2 < ox2)): check = True else: if((np.isclose(x2, ox1) or x2 >= ox1) and (np.isclose(x1, ox2) or x1 <= ox2)): check = True else: if(x1 < x2): if((np.isclose(x1, ox2) or x1 >= ox2) and (np.isclose(x2, ox1) or x2 <= ox1)): check = True else: if((np.isclose(x2, ox2) or x2 >= ox2) and (np.isclose(x1, ox1) or x1 <= ox1)): check = True # print('boundary check:', check) if(check): # the line lies within the x-range of the orig line ytest1 = slope * x1 + intersect ytest2 = slope * x2 + intersect if(np.around(ytest1 - y1, 5) == 0 and np.around(ytest2 - y2, 5) == 0): boundaries[btype].append(line) # found = True found_one_line = True # print('found it new', line) # add a weak check: we need to find at least one line in the # mesh corresponding to this boundary line: if not found_one_line: raise Exception('no mesh line found for this boundary') print('Total number of boundaries of this type:', len(boundaries[btype])) mesh['boundaries'] = boundaries return mesh
[ "def", "parse_gmsh", "(", "filename", ",", "boundary_file", ")", ":", "mesh", "=", "{", "}", "fid", "=", "open", "(", "filename", ",", "'r'", ")", "line", "=", "fid", ".", "readline", "(", ")", "while", "(", "line", ")", ":", "if", "(", "line", "...
42.505618
17.314607
def _configure_device(commands, **kwargs): ''' Helper function to send configuration commands to the device over a proxy minion or native minion using NX-API or SSH. ''' if salt.utils.platform.is_proxy(): return __proxy__['nxos.proxy_config'](commands, **kwargs) else: return _nxapi_config(commands, **kwargs)
[ "def", "_configure_device", "(", "commands", ",", "*", "*", "kwargs", ")", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_proxy", "(", ")", ":", "return", "__proxy__", "[", "'nxos.proxy_config'", "]", "(", "commands", ",", "*", "*", "kwargs",...
37.888889
19.888889