text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def add_local_lb(self, price_item_id, datacenter): """Creates a local load balancer in the specified data center. :param int price_item_id: The price item ID for the load balancer :param string datacenter: The datacenter to create the loadbalancer in :returns: A dictionary containing the product order """ product_order = { 'complexType': 'SoftLayer_Container_Product_Order_Network_' 'LoadBalancer', 'quantity': 1, 'packageId': 0, "location": self._get_location(datacenter), 'prices': [{'id': price_item_id}] } return self.client['Product_Order'].placeOrder(product_order)
[ "def", "add_local_lb", "(", "self", ",", "price_item_id", ",", "datacenter", ")", ":", "product_order", "=", "{", "'complexType'", ":", "'SoftLayer_Container_Product_Order_Network_'", "'LoadBalancer'", ",", "'quantity'", ":", "1", ",", "'packageId'", ":", "0", ",", ...
41.764706
19.705882
def install(name, link, path, priority): ''' Install symbolic links determining default commands CLI Example: .. code-block:: bash salt '*' alternatives.install editor /usr/bin/editor /usr/bin/emacs23 50 ''' cmd = [_get_cmd(), '--install', link, name, path, six.text_type(priority)] out = __salt__['cmd.run_all'](cmd, python_shell=False) if out['retcode'] > 0 and out['stderr'] != '': return out['stderr'] return out['stdout']
[ "def", "install", "(", "name", ",", "link", ",", "path", ",", "priority", ")", ":", "cmd", "=", "[", "_get_cmd", "(", ")", ",", "'--install'", ",", "link", ",", "name", ",", "path", ",", "six", ".", "text_type", "(", "priority", ")", "]", "out", ...
31.133333
25
def on_add_state_machine_after(self, observable, return_value, args): """ This method specifies what happens when a state machine is added to the state machine manager :param observable: the state machine manager :param return_value: the new state machine :param args: :return: """ self.logger.info("Execution status observer register new state machine sm_id: {}".format(args[1].state_machine_id)) self.register_states_of_state_machine(args[1])
[ "def", "on_add_state_machine_after", "(", "self", ",", "observable", ",", "return_value", ",", "args", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Execution status observer register new state machine sm_id: {}\"", ".", "format", "(", "args", "[", "1", "]",...
55.555556
21.333333
def dump_all_handler_stats(self): ''' Return handler capture statistics Return a dictionary of capture handler statistics of the form: .. code-block:: none [{ 'name': The handler's name, 'reads': The number of packet reads this handler has received 'data_read_length': The total length of the data received 'approx_data_rate': The approximate data rate for this handler }, ...] ''' stats = [] for h in self.capture_handlers: now = calendar.timegm(time.gmtime()) rot_time = calendar.timegm(h['log_rot_time']) time_delta = now - rot_time approx_data_rate = '{} bytes/second'.format(h['data_read'] / float(time_delta)) stats.append({ 'name': h['name'], 'reads': h['reads'], 'data_read_length': '{} bytes'.format(h['data_read']), 'approx_data_rate': approx_data_rate }) return stats
[ "def", "dump_all_handler_stats", "(", "self", ")", ":", "stats", "=", "[", "]", "for", "h", "in", "self", ".", "capture_handlers", ":", "now", "=", "calendar", ".", "timegm", "(", "time", ".", "gmtime", "(", ")", ")", "rot_time", "=", "calendar", ".", ...
31.363636
24.636364
def ls(ctx, name, list_instances): """List ELB instances""" session = create_session(ctx.obj['AWS_PROFILE_NAME']) client = session.client('elb') inst = {'LoadBalancerDescriptions': []} if name == '*': inst = client.describe_load_balancers() else: try: inst = client.describe_load_balancers(LoadBalancerNames=[name]) except ClientError as e: click.echo(e, err=True) for i in inst['LoadBalancerDescriptions']: click.echo(i['LoadBalancerName']) if list_instances: for ec2 in i['Instances']: health = client.describe_instance_health( LoadBalancerName=name, Instances=[ec2] ) click.echo('{0}\t{1}'.format(ec2['InstanceId'], health['InstanceStates'][0]['State']))
[ "def", "ls", "(", "ctx", ",", "name", ",", "list_instances", ")", ":", "session", "=", "create_session", "(", "ctx", ".", "obj", "[", "'AWS_PROFILE_NAME'", "]", ")", "client", "=", "session", ".", "client", "(", "'elb'", ")", "inst", "=", "{", "'LoadBa...
36.043478
16.478261
def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True): """ NAME: _find_closest_trackpointaA PURPOSE: find the closest point on the stream track to a given point in frequency-angle coordinates INPUT: Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point interp= (True), if True, return the index of the interpolated track OUTPUT: index into the track of the closest track point HISTORY: 2013-12-22 - Written - Bovy (IAS) """ #Calculate angle offset along the stream parallel to the stream track, # finding first the angle among a few wraps where the point is # closest to the parallel track and then the closest trackpoint to that # point da= numpy.stack(\ numpy.meshgrid(_TWOPIWRAPS+ar-self._progenitor_angle[0], _TWOPIWRAPS+ap-self._progenitor_angle[1], _TWOPIWRAPS+az-self._progenitor_angle[2], indexing='xy')).T.reshape((len(_TWOPIWRAPS)**3,3)) dapar= self._sigMeanSign*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\ numpy.cross(da,self._dsigomeanProgDirection),axis=1))], self._dsigomeanProgDirection) if interp: dist= numpy.fabs(dapar-self._interpolatedThetasTrack) else: dist= numpy.fabs(dapar-self._thetasTrack) return numpy.argmin(dist)
[ "def", "_find_closest_trackpointaA", "(", "self", ",", "Or", ",", "Op", ",", "Oz", ",", "ar", ",", "ap", ",", "az", ",", "interp", "=", "True", ")", ":", "#Calculate angle offset along the stream parallel to the stream track,", "# finding first the angle among a few wra...
47.84375
23.84375
def after(self, idx): """Return datetime of oldest existing data record whose datetime is >= idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = max(idx.date(), self._lo_limit) while day < self._hi_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr < len(self._rd_cache.data): return self._rd_cache.data[self._rd_cache.ptr]['idx'] day = self._rd_cache.hi return None
[ "def", "after", "(", "self", ",", "idx", ")", ":", "if", "not", "isinstance", "(", "idx", ",", "datetime", ")", ":", "raise", "TypeError", "(", "\"'%s' is not %s\"", "%", "(", "idx", ",", "datetime", ")", ")", "day", "=", "max", "(", "idx", ".", "d...
42.588235
14.470588
def gpg_decrypt(cfg, gpg_config=None): """Decrypt GPG objects in configuration. Args: cfg (dict): configuration dictionary gpg_config (dict): gpg configuration dict of arguments for gpg including: homedir, binary, and keyring (require all if any) example: gpg_config = {'homedir': '~/.gnupg/', 'binary': 'gpg', 'keyring': 'pubring.kbx'} Returns: dict: decrypted configuration dictionary The aim is to find in the dictionary items which have been encrypted with gpg, then decrypt them if possible. We will either detect the encryption based on the PGP block text or a user can create a key "_gpg" in which to store the data. Either case will work. In the case of the "_gpg" key all data at this level will be replaced with the decrypted contents. For example: {'component': {'key': 'PGP Block ...'}} will transform to: {'component': {'key': 'decrypted value'}} However: {'component': {'key': {'_gpg': 'PGP Block ...', 'nothing': 'should go here'}}} will transform to: {'component': {'key': 'decrypted value'}} """ def decrypt(obj): """Decrypt the object. It is an inner function because we must first verify that gpg is ready. If we did them in the same function we would end up calling the gpg checks several times, potentially, since we are calling this recursively. """ if isinstance(obj, list): res_v = [] for item in obj: res_v.append(decrypt(item)) return res_v elif isinstance(obj, dict): if '_gpg' in obj: try: decrypted = gpg.decrypt(obj['_gpg']) if decrypted.ok: obj = n(decrypted.data.decode('utf-8').encode()) else: log.error("gpg error unpacking secrets %s", decrypted.stderr) except Exception as err: log.error("error unpacking secrets %s", err) else: for k, v in obj.items(): obj[k] = decrypt(v) else: try: if 'BEGIN PGP' in obj: try: decrypted = gpg.decrypt(obj) if decrypted.ok: obj = n(decrypted.data.decode('utf-8').encode()) else: log.error("gpg error unpacking secrets %s", decrypted.stderr) except Exception as err: log.error("error unpacking secrets %s", err) except TypeError: log.debug('Pass on decryption. Only decrypt strings') return obj if GPG_IMPORTED: if not gpg_config: gpg_config = {} defaults = {'homedir': '~/.gnupg/'} env_fields = {'homedir': 'FIGGYPY_GPG_HOMEDIR', 'binary': 'FIGGYPY_GPG_BINARY', 'keyring': 'FIGGYPY_GPG_KEYRING'} for k, v in env_fields.items(): gpg_config[k] = env_or_default(v, defaults[k] if k in defaults else None) try: gpg = gnupg.GPG(**gpg_config) except (OSError, RuntimeError): log.exception('Failed to configure gpg. Will be unable to decrypt secrets.') return decrypt(cfg) return cfg
[ "def", "gpg_decrypt", "(", "cfg", ",", "gpg_config", "=", "None", ")", ":", "def", "decrypt", "(", "obj", ")", ":", "\"\"\"Decrypt the object.\n\n It is an inner function because we must first verify that gpg\n is ready. If we did them in the same function we would end ...
36.968085
20.053191
def cached(fun): """ memoizing decorator for linkage functions. Parameters have been hardcoded (no ``*args``, ``**kwargs`` magic), because, the way this is coded (interchangingly using sets and frozensets) is true for this specific case. For other cases that is not necessarily guaranteed. """ _cache = {} @wraps(fun) def newfun(a, b, distance_function): frozen_a = frozenset(a) frozen_b = frozenset(b) if (frozen_a, frozen_b) not in _cache: result = fun(a, b, distance_function) _cache[(frozen_a, frozen_b)] = result return _cache[(frozen_a, frozen_b)] return newfun
[ "def", "cached", "(", "fun", ")", ":", "_cache", "=", "{", "}", "@", "wraps", "(", "fun", ")", "def", "newfun", "(", "a", ",", "b", ",", "distance_function", ")", ":", "frozen_a", "=", "frozenset", "(", "a", ")", "frozen_b", "=", "frozenset", "(", ...
32.35
19.15
def check_palette(palette): """ Check a palette argument (to the :class:`Writer` class) for validity. Returns the palette as a list if okay; raises an exception otherwise. """ # None is the default and is allowed. if palette is None: return None p = list(palette) if not (0 < len(p) <= 256): raise ProtocolError( "a palette must have between 1 and 256 entries," " see https://www.w3.org/TR/PNG/#11PLTE") seen_triple = False for i, t in enumerate(p): if len(t) not in (3, 4): raise ProtocolError( "palette entry %d: entries must be 3- or 4-tuples." % i) if len(t) == 3: seen_triple = True if seen_triple and len(t) == 4: raise ProtocolError( "palette entry %d: all 4-tuples must precede all 3-tuples" % i) for x in t: if int(x) != x or not(0 <= x <= 255): raise ProtocolError( "palette entry %d: " "values must be integer: 0 <= x <= 255" % i) return p
[ "def", "check_palette", "(", "palette", ")", ":", "# None is the default and is allowed.", "if", "palette", "is", "None", ":", "return", "None", "p", "=", "list", "(", "palette", ")", "if", "not", "(", "0", "<", "len", "(", "p", ")", "<=", "256", ")", ...
33.65625
15.09375
def check_game_end(self): '''Checks for the game's win/lose conditions and 'alters' the game state to reflect the condition found. If the game has not been won or lost then it just returns the game state unaltered.''' if self.player in self.crashes.union(self.robots): return self.end_game('You Died!') elif not self.robots: return self.end_game('You Win!') else: return self
[ "def", "check_game_end", "(", "self", ")", ":", "if", "self", ".", "player", "in", "self", ".", "crashes", ".", "union", "(", "self", ".", "robots", ")", ":", "return", "self", ".", "end_game", "(", "'You Died!'", ")", "elif", "not", "self", ".", "ro...
38.083333
19.583333
def print_stmt(self, print_loc, stmt): """ (2.6-2.7) print_stmt: 'print' ( [ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ] ) """ stmt.keyword_loc = print_loc if stmt.loc is None: stmt.loc = print_loc else: stmt.loc = print_loc.join(stmt.loc) return stmt
[ "def", "print_stmt", "(", "self", ",", "print_loc", ",", "stmt", ")", ":", "stmt", ".", "keyword_loc", "=", "print_loc", "if", "stmt", ".", "loc", "is", "None", ":", "stmt", ".", "loc", "=", "print_loc", "else", ":", "stmt", ".", "loc", "=", "print_l...
31.083333
12.083333
def _isinstance(expr, classname): """Check whether `expr` is an instance of the class with name `classname` This is like the builtin `isinstance`, but it take the `classname` a string, instead of the class directly. Useful for when we don't want to import the class for which we want to check (also, remember that printer choose rendering method based on the class name, so this is totally ok) """ for cls in type(expr).__mro__: if cls.__name__ == classname: return True return False
[ "def", "_isinstance", "(", "expr", ",", "classname", ")", ":", "for", "cls", "in", "type", "(", "expr", ")", ".", "__mro__", ":", "if", "cls", ".", "__name__", "==", "classname", ":", "return", "True", "return", "False" ]
41.357143
19.071429
def _merge_default_values(self): """Merge default values with resource data.""" values = self._get_default_values() for key, value in values.items(): if not self.data.get(key): self.data[key] = value
[ "def", "_merge_default_values", "(", "self", ")", ":", "values", "=", "self", ".", "_get_default_values", "(", ")", "for", "key", ",", "value", "in", "values", ".", "items", "(", ")", ":", "if", "not", "self", ".", "data", ".", "get", "(", "key", ")"...
41
2.666667
def humanize_error(data, validation_error, max_sub_error_length=MAX_VALIDATION_ERROR_ITEM_LENGTH): """ Provide a more helpful + complete validation error message than that provided automatically Invalid and MultipleInvalid do not include the offending value in error messages, and MultipleInvalid.__str__ only provides the first error. """ if isinstance(validation_error, MultipleInvalid): return '\n'.join(sorted( humanize_error(data, sub_error, max_sub_error_length) for sub_error in validation_error.errors )) else: offending_item_summary = repr(_nested_getitem(data, validation_error.path)) if len(offending_item_summary) > max_sub_error_length: offending_item_summary = offending_item_summary[:max_sub_error_length - 3] + '...' return '%s. Got %s' % (validation_error, offending_item_summary)
[ "def", "humanize_error", "(", "data", ",", "validation_error", ",", "max_sub_error_length", "=", "MAX_VALIDATION_ERROR_ITEM_LENGTH", ")", ":", "if", "isinstance", "(", "validation_error", ",", "MultipleInvalid", ")", ":", "return", "'\\n'", ".", "join", "(", "sorted...
58.866667
26.333333
def taxon_table(self): """ Returns the .tests list of taxa as a pandas dataframe. By auto-generating this table from tests it means that the table itself cannot be modified unless it is returned and saved. """ if self.tests: keys = sorted(self.tests[0].keys()) if isinstance(self.tests, list): ld = [[(key, i[key]) for key in keys] for i in self.tests] dd = [dict(i) for i in ld] df = pd.DataFrame(dd) return df else: return pd.DataFrame(pd.Series(self.tests)).T else: return None
[ "def", "taxon_table", "(", "self", ")", ":", "if", "self", ".", "tests", ":", "keys", "=", "sorted", "(", "self", ".", "tests", "[", "0", "]", ".", "keys", "(", ")", ")", "if", "isinstance", "(", "self", ".", "tests", ",", "list", ")", ":", "ld...
36.611111
15.611111
def from_ashrae_revised_clear_sky(cls, location, monthly_tau_beam, monthly_tau_diffuse, timestep=1, is_leap_year=False): """Create a wea object representing an ASHRAE Revised Clear Sky ("Tau Model") ASHRAE Revised Clear Skies are intended to determine peak solar load and sizing parmeters for HVAC systems. The revised clear sky is currently the default recommended sky model used to autosize HVAC systems in EnergyPlus. For more information on the ASHRAE Revised Clear Sky model, see the EnergyPlus Engineering Reference: https://bigladdersoftware.com/epx/docs/8-9/engineering-reference/climate-calculations.html Args: location: Ladybug location object. monthly_tau_beam: A list of 12 float values indicating the beam optical depth of the sky at each month of the year. monthly_tau_diffuse: A list of 12 float values indicating the diffuse optical depth of the sky at each month of the year. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. is_leap_year: A boolean to indicate if values are representing a leap year. Default is False. """ # extract metadata metadata = {'source': location.source, 'country': location.country, 'city': location.city} # create sunpath and get altitude at every timestep of the year sp = Sunpath.from_location(location) sp.is_leap_year = is_leap_year altitudes = [[] for i in range(12)] dates = cls._get_datetimes(timestep, is_leap_year) for t_date in dates: sun = sp.calculate_sun_from_date_time(t_date) altitudes[sun.datetime.month - 1].append(sun.altitude) # run all of the months through the ashrae_revised_clear_sky model direct_norm, diffuse_horiz = [], [] for i_mon, alt_list in enumerate(altitudes): dir_norm_rad, dif_horiz_rad = ashrae_revised_clear_sky( alt_list, monthly_tau_beam[i_mon], monthly_tau_diffuse[i_mon]) direct_norm.extend(dir_norm_rad) diffuse_horiz.extend(dif_horiz_rad) direct_norm_rad, diffuse_horiz_rad = \ cls._get_data_collections(direct_norm, diffuse_horiz, metadata, timestep, is_leap_year) return cls(location, direct_norm_rad, diffuse_horiz_rad, timestep, is_leap_year)
[ "def", "from_ashrae_revised_clear_sky", "(", "cls", ",", "location", ",", "monthly_tau_beam", ",", "monthly_tau_diffuse", ",", "timestep", "=", "1", ",", "is_leap_year", "=", "False", ")", ":", "# extract metadata", "metadata", "=", "{", "'source'", ":", "location...
52.55102
24.55102
def fromgtf(args): """ %prog fromgtf gtffile Convert gtf to gff file. In gtf, the "transcript_id" will convert to "ID=", the "transcript_id" in exon/CDS feature will be converted to "Parent=". """ p = OptionParser(fromgtf.__doc__) p.add_option("--transcript_id", default="transcript_id", help="Field name for transcript [default: %default]") p.add_option("--gene_id", default="gene_id", help="Field name for gene [default: %default]") p.add_option("--augustus", default=False, action="store_true", help="Input is AUGUSTUS gtf [default: %default]") p.set_home("augustus") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gtffile, = args outfile = opts.outfile if opts.augustus: ahome = opts.augustus_home s = op.join(ahome, "scripts/gtf2gff.pl") cmd = "{0} --gff3 < {1} --out={2}".format(s, gtffile, outfile) sh(cmd) return gff = Gff(gtffile) fw = must_open(outfile, "w") transcript_id = opts.transcript_id gene_id = opts.gene_id nfeats = 0 for g in gff: if g.type in ("transcript", "mRNA"): g.type = "mRNA" g.update_tag(transcript_id, "ID") g.update_tag("mRNA", "ID") g.update_tag(gene_id, "Parent") g.update_tag("Gene", "Parent") elif g.type in ("exon", "CDS") or "UTR" in g.type: g.update_tag("transcript_id", "Parent") g.update_tag(g.type, "Parent") elif g.type == "gene": g.update_tag(gene_id, "ID") g.update_tag("Gene", "ID") else: assert 0, "Don't know how to deal with {0}".format(g.type) g.update_attributes() print(g, file=fw) nfeats += 1 logging.debug("A total of {0} features written.".format(nfeats))
[ "def", "fromgtf", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fromgtf", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--transcript_id\"", ",", "default", "=", "\"transcript_id\"", ",", "help", "=", "\"Field name for transcript [default: %default]...
33.517857
17.767857
def imagecapture(self, window_name=None, x=0, y=0, width=None, height=None): """ Captures screenshot of the whole desktop or given window @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param x: x co-ordinate value @type x: int @param y: y co-ordinate value @type y: int @param width: width co-ordinate value @type width: int @param height: height co-ordinate value @type height: int @return: screenshot with base64 encoded for the client @rtype: string """ if x or y or (width and width != -1) or (height and height != -1): raise LdtpServerException("Not implemented") if window_name: handle, name, app = self._get_window_handle(window_name) try: self._grabfocus(handle) except: pass rect = self._getobjectsize(handle) screenshot = CGWindowListCreateImage(NSMakeRect(rect[0], rect[1], rect[2], rect[3]), 1, 0, 0) else: screenshot = CGWindowListCreateImage(CGRectInfinite, 1, 0, 0) image = CIImage.imageWithCGImage_(screenshot) bitmapRep = NSBitmapImageRep.alloc().initWithCIImage_(image) blob = bitmapRep.representationUsingType_properties_(NSPNGFileType, None) tmpFile = tempfile.mktemp('.png', 'ldtpd_') blob.writeToFile_atomically_(tmpFile, False) rv = b64encode(open(tmpFile).read()) os.remove(tmpFile) return rv
[ "def", "imagecapture", "(", "self", ",", "window_name", "=", "None", ",", "x", "=", "0", ",", "y", "=", "0", ",", "width", "=", "None", ",", "height", "=", "None", ")", ":", "if", "x", "or", "y", "or", "(", "width", "and", "width", "!=", "-", ...
41.02439
18.097561
def _get_int64(data, position, dummy0, dummy1, dummy2): """Decode a BSON int64 to bson.int64.Int64.""" end = position + 8 return Int64(_UNPACK_LONG(data[position:end])[0]), end
[ "def", "_get_int64", "(", "data", ",", "position", ",", "dummy0", ",", "dummy1", ",", "dummy2", ")", ":", "end", "=", "position", "+", "8", "return", "Int64", "(", "_UNPACK_LONG", "(", "data", "[", "position", ":", "end", "]", ")", "[", "0", "]", "...
46.25
12.75
def iter_transform(filename, key): """Generate encrypted file with given key. This generator function reads the file in chunks and encrypts them using AES-CTR, with the specified key. :param filename: The name of the file to encrypt. :type filename: str :param key: The key used to encrypt the file. :type key: str :returns: A generator that produces encrypted file chunks. :rtype: generator """ # We are not specifying the IV here. aes = AES.new(key, AES.MODE_CTR, counter=Counter.new(128)) with open(filename, 'rb+') as f: for chunk in iter(lambda: f.read(CHUNK_SIZE), b''): yield aes.encrypt(chunk), f
[ "def", "iter_transform", "(", "filename", ",", "key", ")", ":", "# We are not specifying the IV here.", "aes", "=", "AES", ".", "new", "(", "key", ",", "AES", ".", "MODE_CTR", ",", "counter", "=", "Counter", ".", "new", "(", "128", ")", ")", "with", "ope...
33.2
14.75
def Run(self, args): """Delete all the GRR temp files in path. If path is a directory, look in the top level for filenames beginning with Client.tempfile_prefix, and delete them. If path is a regular file and starts with Client.tempfile_prefix delete it. Args: args: pathspec pointing to directory containing temp files to be deleted, or a single file to be deleted. Returns: deleted: array of filename strings that were deleted Raises: ErrorBadPath: if path doesn't exist or is not a regular file or directory """ allowed_temp_dirs = [ GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"] ] if args.path: # Normalize the path, so DeleteGRRTempFile can correctly check if # it is within Client.tempdir. path = utils.NormalizePath(args.path) if platform.system() == "Windows": # TODO: On non-Windows systems `CanonicalPathToLocalPath` # is equivalent to `SmartStr`, so it does nothing except for breaking # the types. However, a lot of code actually depends on this behaviour # so we cannot easily change it. As a workaround for now we simply do # not call it on Linux and macOS but ideally we should get rid of this # `SmartStr` call and not branch here. path = client_utils.CanonicalPathToLocalPath(path) paths = [path] else: paths = allowed_temp_dirs deleted = [] errors = [] for path in paths: if os.path.isdir(path): for filename in os.listdir(path): abs_filename = os.path.join(path, filename) try: DeleteGRRTempFile(abs_filename) deleted.append(abs_filename) except Exception as e: # pylint: disable=broad-except # The error we are most likely to get is ErrorNotTempFile but # especially on Windows there might be locking issues that raise # various WindowsErrors so we just catch them all and continue # deleting all other temp files in this directory. errors.append(e) elif os.path.isfile(path): DeleteGRRTempFile(path) deleted = [path] elif path not in allowed_temp_dirs: if not os.path.exists(path): raise ErrorBadPath("File %s does not exist" % path) else: raise ErrorBadPath("Not a regular file or directory: %s" % path) reply = "" if deleted: reply = "Deleted: %s." % deleted else: reply = "Nothing deleted." if errors: reply += "\n%s" % errors self.SendReply(rdf_client.LogMessage(data=reply))
[ "def", "Run", "(", "self", ",", "args", ")", ":", "allowed_temp_dirs", "=", "[", "GetTempDirForRoot", "(", "root", ")", "for", "root", "in", "config", ".", "CONFIG", "[", "\"Client.tempdir_roots\"", "]", "]", "if", "args", ".", "path", ":", "# Normalize th...
34.6
22.76
def get_hex(self): """ Returns a HEX String, separated by spaces every byte """ s = binascii.hexlify(self.get_raw()).decode("ascii") return " ".join(s[i:i + 2] for i in range(0, len(s), 2))
[ "def", "get_hex", "(", "self", ")", ":", "s", "=", "binascii", ".", "hexlify", "(", "self", ".", "get_raw", "(", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "return", "\" \"", ".", "join", "(", "s", "[", "i", ":", "i", "+", "2", "]", "for",...
32
18
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1., pressure=101325., dni_extra=1364.): """ Calculate the clear sky GHI, DNI, and DHI according to the simplified Solis model [1]_. Reference [1]_ describes the accuracy of the model as being 15, 20, and 18 W/m^2 for the beam, global, and diffuse components. Reference [2]_ provides comparisons with other clear sky models. Parameters ---------- apparent_elevation : numeric The apparent elevation of the sun above the horizon (deg). aod700 : numeric, default 0.1 The aerosol optical depth at 700 nm (unitless). Algorithm derived for values between 0 and 0.45. precipitable_water : numeric, default 1.0 The precipitable water of the atmosphere (cm). Algorithm derived for values between 0.2 and 10 cm. Values less than 0.2 will be assumed to be equal to 0.2. pressure : numeric, default 101325.0 The atmospheric pressure (Pascals). Algorithm derived for altitudes between sea level and 7000 m, or 101325 and 41000 Pascals. dni_extra : numeric, default 1364.0 Extraterrestrial irradiance. The units of ``dni_extra`` determine the units of the output. Returns ------- clearsky : DataFrame (if Series input) or OrderedDict of arrays DataFrame/OrderedDict contains the columns/keys ``'dhi', 'dni', 'ghi'``. References ---------- .. [1] P. Ineichen, "A broadband simplified version of the Solis clear sky model," Solar Energy, 82, 758-762 (2008). .. [2] P. Ineichen, "Validation of models that estimate the clear sky global and beam solar irradiance," Solar Energy, 132, 332-344 (2016). """ p = pressure w = precipitable_water # algorithm fails for pw < 0.2 w = np.maximum(w, 0.2) # this algorithm is reasonably fast already, but it could be made # faster by precalculating the powers of aod700, the log(p/p0), and # the log(w) instead of repeating the calculations as needed in each # function i0p = _calc_i0p(dni_extra, w, aod700, p) taub = _calc_taub(w, aod700, p) b = _calc_b(w, aod700) taug = _calc_taug(w, aod700, p) g = _calc_g(w, aod700) taud = _calc_taud(w, aod700, p) d = _calc_d(aod700, p) # this prevents the creation of nans at night instead of 0s # it's also friendly to scalar and series inputs sin_elev = np.maximum(1.e-30, np.sin(np.radians(apparent_elevation))) dni = i0p * np.exp(-taub/sin_elev**b) ghi = i0p * np.exp(-taug/sin_elev**g) * sin_elev dhi = i0p * np.exp(-taud/sin_elev**d) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads
[ "def", "simplified_solis", "(", "apparent_elevation", ",", "aod700", "=", "0.1", ",", "precipitable_water", "=", "1.", ",", "pressure", "=", "101325.", ",", "dni_extra", "=", "1364.", ")", ":", "p", "=", "pressure", "w", "=", "precipitable_water", "# algorithm...
31.674157
22.325843
def arg_list(self, ending_char=TokenTypes.RPAREN): """ arglist : expression, arglist arglist : expression arglist : """ args = [] while not self.cur_token.type == ending_char: args.append(self.expression()) if self.cur_token.type == TokenTypes.COMMA: self.eat(TokenTypes.COMMA) return args
[ "def", "arg_list", "(", "self", ",", "ending_char", "=", "TokenTypes", ".", "RPAREN", ")", ":", "args", "=", "[", "]", "while", "not", "self", ".", "cur_token", ".", "type", "==", "ending_char", ":", "args", ".", "append", "(", "self", ".", "expression...
30.076923
11.923077
def lazy_gettext(self, singular, plural=None, n=1, locale=None) -> LazyProxy: """ Lazy get text :param singular: :param plural: :param n: :param locale: :return: """ return LazyProxy(self.gettext, singular, plural, n, locale)
[ "def", "lazy_gettext", "(", "self", ",", "singular", ",", "plural", "=", "None", ",", "n", "=", "1", ",", "locale", "=", "None", ")", "->", "LazyProxy", ":", "return", "LazyProxy", "(", "self", ".", "gettext", ",", "singular", ",", "plural", ",", "n"...
26.181818
20.181818
def unlock(thing_name, key, session=None): """Unlock a thing """ return _request('get', '/unlock/{0}'.format(thing_name), params={'key': key}, session=session)
[ "def", "unlock", "(", "thing_name", ",", "key", ",", "session", "=", "None", ")", ":", "return", "_request", "(", "'get'", ",", "'/unlock/{0}'", ".", "format", "(", "thing_name", ")", ",", "params", "=", "{", "'key'", ":", "key", "}", ",", "session", ...
42
15
def exec_(controller, cmd, *args): """Executes a subprocess in the foreground, blocking until returned.""" controller.logger.info("Executing: {0} {1}", cmd, " ".join(args)) try: subprocess.check_call([cmd] + list(args)) except (OSError, subprocess.CalledProcessError) as err: controller.logger.error("Failed to execute process: {0}", err)
[ "def", "exec_", "(", "controller", ",", "cmd", ",", "*", "args", ")", ":", "controller", ".", "logger", ".", "info", "(", "\"Executing: {0} {1}\"", ",", "cmd", ",", "\" \"", ".", "join", "(", "args", ")", ")", "try", ":", "subprocess", ".", "check_call...
45.5
20.625
def extern_call(self, context_handle, func, args_ptr, args_len): """Given a callable, call it.""" c = self._ffi.from_handle(context_handle) runnable = c.from_value(func[0]) args = tuple(c.from_value(arg[0]) for arg in self._ffi.unpack(args_ptr, args_len)) return self.call(c, runnable, args)
[ "def", "extern_call", "(", "self", ",", "context_handle", ",", "func", ",", "args_ptr", ",", "args_len", ")", ":", "c", "=", "self", ".", "_ffi", ".", "from_handle", "(", "context_handle", ")", "runnable", "=", "c", ".", "from_value", "(", "func", "[", ...
51
13.333333
def iglob(pathname): """ Return an iterator which yields the same values as glob() without actually storing them all simultaneously. Parameters ---------- pathname : string A glob pattern string which will be used for finding files Returns ------- iterator An iterator instance which will yield full path name """ dirname = os.path.dirname(pathname) basename_pattern = os.path.basename(pathname) for root, dirs, files in os.walk(dirname): for basename in files: if fnmatch.fnmatch(basename, basename_pattern): yield os.path.join(root, basename)
[ "def", "iglob", "(", "pathname", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "pathname", ")", "basename_pattern", "=", "os", ".", "path", ".", "basename", "(", "pathname", ")", "for", "root", ",", "dirs", ",", "files", "in", "os...
28.636364
20
def add_existence(self, rev): """Add existence constraint for the field. This is necessary because the normal meaning of 'x > 0' is: x > 0 and is present. Without the existence constraint, MongoDB will treat 'x > 0' as: 'x' > 0 *or* is absent. Of course, if the constraint is already about existence, nothing is done. :rtype: None """ if len(self.constraints) == 1 and ( # both 'exists' and strict equality don't require the extra clause self.constraints[0].op.is_exists() or self.constraints[0].op.is_equality()): return value = not rev # value is False if reversed, otherwise True constraint = Constraint(self._field, ConstraintOperator.EXISTS, value) self._existence_constraints.append(constraint)
[ "def", "add_existence", "(", "self", ",", "rev", ")", ":", "if", "len", "(", "self", ".", "constraints", ")", "==", "1", "and", "(", "# both 'exists' and strict equality don't require the extra clause", "self", ".", "constraints", "[", "0", "]", ".", "op", "."...
48.235294
25
def calc_qpout_v1(self): """Calculate the ARMA results for the different response functions. Required derived parameter: |Nmb| Required flux sequences: |QMA| |QAR| Calculated flux sequence: |QPOut| Examples: Initialize an arma model with three different response functions: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> fluxes.qma.shape = 3 >>> fluxes.qar.shape = 3 >>> fluxes.qpout.shape = 3 Define the output values of the MA and of the AR processes associated with the three response functions and apply method |calc_qpout_v1|: >>> fluxes.qar = 4.0, 5.0, 6.0 >>> fluxes.qma = 1.0, 2.0, 3.0 >>> model.calc_qpout_v1() >>> fluxes.qpout qpout(5.0, 7.0, 9.0) """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for idx in range(der.nmb): flu.qpout[idx] = flu.qma[idx]+flu.qar[idx]
[ "def", "calc_qpout_v1", "(", "self", ")", ":", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "for", "idx", "in", "range", "(", "der", ".", "nmb", ")", ...
26.368421
18.842105
def columnInfo(self): """ display metadata about the table, size, number of rows, columns and their data type """ code = "proc contents data=" + self.libref + '.' + self.table + ' ' + self._dsopts() + ";ods select Variables;run;" if self.sas.nosub: print(code) return if self.results.upper() == 'PANDAS': code = "proc contents data=%s.%s %s ;ods output Variables=work._variables ;run;" % (self.libref, self.table, self._dsopts()) pd = self._returnPD(code, '_variables') pd['Type'] = pd['Type'].str.rstrip() return pd else: ll = self._is_valid() if self.HTML: if not ll: ll = self.sas._io.submit(code) if not self.sas.batch: self.sas.DISPLAY(self.sas.HTML(ll['LST'])) else: return ll else: if not ll: ll = self.sas._io.submit(code, "text") if not self.sas.batch: print(ll['LST']) else: return ll
[ "def", "columnInfo", "(", "self", ")", ":", "code", "=", "\"proc contents data=\"", "+", "self", ".", "libref", "+", "'.'", "+", "self", ".", "table", "+", "' '", "+", "self", ".", "_dsopts", "(", ")", "+", "\";ods select Variables;run;\"", "if", "self", ...
35.8125
21.3125
def get_connection_by_id(self, id): '''Search for a connection on this port by its ID.''' with self._mutex: for conn in self.connections: if conn.id == id: return conn return None
[ "def", "get_connection_by_id", "(", "self", ",", "id", ")", ":", "with", "self", ".", "_mutex", ":", "for", "conn", "in", "self", ".", "connections", ":", "if", "conn", ".", "id", "==", "id", ":", "return", "conn", "return", "None" ]
35.571429
10.714286
def cache_response(self, request, response, body=None): """ Algorithm for caching requests. This assumes a requests Response object. """ # From httplib2: Don't cache 206's since we aren't going to # handle byte range requests if response.status not in [200, 203, 300, 301]: return response_headers = CaseInsensitiveDict(response.headers) cc_req = self.parse_cache_control(request.headers) cc = self.parse_cache_control(response_headers) cache_url = self.cache_url(request.url) # Delete it from the cache if we happen to have it stored there no_store = cc.get('no-store') or cc_req.get('no-store') if no_store and self.cache.get(cache_url): self.cache.delete(cache_url) # If we've been given an etag, then keep the response if self.cache_etags and 'etag' in response_headers: self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) # Add to the cache any 301s. We do this before looking that # the Date headers. elif response.status == 301: self.cache.set( cache_url, self.serializer.dumps(request, response) ) # Add to the cache if the response headers demand it. If there # is no date header then we can't do anything about expiring # the cache. elif 'date' in response_headers: # cache when there is a max-age > 0 if cc and cc.get('max-age'): if int(cc['max-age']) > 0: self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) # If the request can expire, it means we should cache it # in the meantime. elif 'expires' in response_headers: if response_headers['expires']: self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), )
[ "def", "cache_response", "(", "self", ",", "request", ",", "response", ",", "body", "=", "None", ")", ":", "# From httplib2: Don't cache 206's since we aren't going to", "# handle byte range requests", "if", "response", ".", "status", "not", "in", "[", "2...
37.241379
18.862069
def enableBranch(self, enabled): """ Sets the enabled member to True or False for a node and all it's children """ self.enabled = enabled for child in self.childItems: child.enableBranch(enabled)
[ "def", "enableBranch", "(", "self", ",", "enabled", ")", ":", "self", ".", "enabled", "=", "enabled", "for", "child", "in", "self", ".", "childItems", ":", "child", ".", "enableBranch", "(", "enabled", ")" ]
39
3.666667
def mass_loss_loon05(L,Teff): ''' mass loss rate van Loon etal (2005). Parameters ---------- L : float L in L_sun. Teff : float Teff in K. Returns ------- Mdot Mdot in Msun/yr Notes ----- ref: van Loon etal 2005, A&A 438, 273 ''' Mdot = -5.65 + np.log10(old_div(L,10.**4)) -6.3*np.log10(old_div(Teff,3500.)) return Mdot
[ "def", "mass_loss_loon05", "(", "L", ",", "Teff", ")", ":", "Mdot", "=", "-", "5.65", "+", "np", ".", "log10", "(", "old_div", "(", "L", ",", "10.", "**", "4", ")", ")", "-", "6.3", "*", "np", ".", "log10", "(", "old_div", "(", "Teff", ",", "...
16.916667
26.583333
def check_for_update(self, force=True, download=False): """ Returns a :class:`~plexapi.base.Release` object containing release info. Parameters: force (bool): Force server to check for new releases download (bool): Download if a update is available. """ part = '/updater/check?download=%s' % (1 if download else 0) if force: self.query(part, method=self._session.put) releases = self.fetchItems('/updater/status') if len(releases): return releases[0]
[ "def", "check_for_update", "(", "self", ",", "force", "=", "True", ",", "download", "=", "False", ")", ":", "part", "=", "'/updater/check?download=%s'", "%", "(", "1", "if", "download", "else", "0", ")", "if", "force", ":", "self", ".", "query", "(", "...
42.615385
17.769231
def __get_WIOD_env_extension(root_path, year, ll_co, para): """ Parses the wiod environmental extension Extension can either be given as original .zip files or as extracted data in a folder with the same name as the corresponding zip file (with- out the extension). This function is based on the structure of the extensions from _may12. Note ---- The function deletes 'secQ' which is not present in the economic tables. Parameters ---------- root_path : string Path to the WIOD data or the path with the extension data folder or zip file. year : str or int Year to return for the extension = valid sheetname for the xls file. ll_co : list like List of countries in WIOD - used for finding and matching extension data in the given folder. para : dict Defining the parameters for reading the extension. Returns ------- dict with keys F : pd.DataFrame with index 'stressor' and columns 'region', 'sector' FY : pd.Dataframe with index 'stressor' and column 'region' This data is for household stressors - must be applied to the right final demand column afterwards. unit : pd.DataFrame with index 'stressor' and column 'unit' """ ll_root_content = [ff for ff in os.listdir(root_path) if ff.startswith(para['start'])] if len(ll_root_content) < 1: warnings.warn( 'Extension data for {} not found - ' 'Extension not included'.format(para['start']), ParserWarning) return None elif len(ll_root_content) > 1: raise ParserError( 'Several raw data for extension' '{} available - clean extension folder.'.format(para['start'])) pf_env = os.path.join(root_path, ll_root_content[0]) if pf_env.endswith('.zip'): rf_zip = zipfile.ZipFile(pf_env) ll_env_content = [ff for ff in rf_zip.namelist() if ff.endswith(para['ext'])] else: ll_env_content = [ff for ff in os.listdir(pf_env) if ff.endswith(para['ext'])] dl_env = dict() dl_env_hh = dict() for co in ll_co: ll_pff_read = [ff for ff in ll_env_content if ff.endswith(para['ext']) and (ff.startswith(co.upper()) or ff.startswith(co.lower()))] if len(ll_pff_read) < 1: raise ParserError('Country data not complete for Extension ' '{} - missing {}.'.format(para['start'], co)) elif len(ll_pff_read) > 1: raise ParserError('Multiple country data for Extension ' '{} - country {}.'.format(para['start'], co)) pff_read = ll_pff_read[0] if pf_env.endswith('.zip'): ff_excel = pd.ExcelFile(rf_zip.open(pff_read)) else: ff_excel = pd.ExcelFile(os.path.join(pf_env, pff_read)) if str(year) in ff_excel.sheet_names: df_env = ff_excel.parse(sheet_name=str(year), index_col=None, header=0 ) else: warnings.warn('Extension {} does not include' 'data for the year {} - ' 'Extension not included'.format(para['start'], year), ParserWarning) return None if not df_env.index.is_numeric(): # upper case letter extensions gets parsed with multiindex, not # quite sure why... df_env.reset_index(inplace=True) # unit can be taken from the first cell in the excel sheet if df_env.columns[0] != 'level_0': para['unit']['all'] = df_env.columns[0] # two clean up cases - can be identified by lower/upper case extension # description if para['start'].islower(): pass elif para['start'].isupper(): df_env = df_env.iloc[:, 1:] else: raise ParserError('Format of extension not given.') df_env.dropna(axis=0, how='all', inplace=True) df_env = df_env[df_env.iloc[:, 0] != 'total'] df_env = df_env[df_env.iloc[:, 0] != 'secTOT'] df_env = df_env[df_env.iloc[:, 0] != 'secQ'] df_env.iloc[:, 0].astype(str, inplace=True) df_env.iloc[:, 0].replace(to_replace='sec', value='', regex=True, inplace=True) df_env.set_index([df_env.columns[0]], inplace=True) df_env.index.names = ['sector'] df_env = df_env.T ikc_hh = 'FC_HH' dl_env_hh[co] = df_env[ikc_hh] del df_env[ikc_hh] dl_env[co] = df_env df_F = pd.concat(dl_env, axis=1)[ll_co] df_FY = pd.concat(dl_env_hh, axis=1)[ll_co] df_F.fillna(0, inplace=True) df_FY.fillna(0, inplace=True) df_F.columns.names = IDX_NAMES['F_col'] df_F.index.names = IDX_NAMES['F_row_single'] df_FY.columns.names = IDX_NAMES['Y_col1'] df_FY.index.names = IDX_NAMES['F_row_single'] # build the unit df df_unit = pd.DataFrame(index=df_F.index, columns=['unit']) _ss_unit = para['unit'].get('all', 'undef') for ikr in df_unit.index: df_unit.ix[ikr, 'unit'] = para['unit'].get(ikr, _ss_unit) df_unit.columns.names = ['unit'] df_unit.index.names = ['stressor'] if pf_env.endswith('.zip'): rf_zip.close() return {'F': df_F, 'FY': df_FY, 'unit': df_unit }
[ "def", "__get_WIOD_env_extension", "(", "root_path", ",", "year", ",", "ll_co", ",", "para", ")", ":", "ll_root_content", "=", "[", "ff", "for", "ff", "in", "os", ".", "listdir", "(", "root_path", ")", "if", "ff", ".", "startswith", "(", "para", "[", "...
34.924528
20.484277
def border(self, ax, wcs, **kw_mpl_pathpatch): """ Draws the MOC border(s) on a matplotlib axis. This performs the projection of the sky coordinates defining the perimeter of the MOC to the pixel image coordinate system. You are able to specify various styling kwargs for `matplotlib.patches.PathPatch` (see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__). Parameters ---------- ax : `matplotlib.axes.Axes` Matplotlib axis. wcs : `astropy.wcs.WCS` WCS defining the World system <-> Image system projection. kw_mpl_pathpatch Plotting arguments for `matplotlib.patches.PathPatch` Examples -------- >>> from mocpy import MOC, WCS >>> from astropy.coordinates import Angle, SkyCoord >>> import astropy.units as u >>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV >>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits' >>> moc = MOC.from_fits(filename) >>> # Plot the MOC using matplotlib >>> import matplotlib.pyplot as plt >>> fig = plt.figure(111, figsize=(15, 15)) >>> # Define a WCS as a context >>> with WCS(fig, ... fov=50 * u.deg, ... center=SkyCoord(0, 20, unit='deg', frame='icrs'), ... coordsys="icrs", ... rotation=Angle(0, u.degree), ... projection="AIT") as wcs: ... ax = fig.add_subplot(1, 1, 1, projection=wcs) ... # Call border giving the matplotlib axe and the `~astropy.wcs.WCS` object. ... moc.border(ax=ax, wcs=wcs, alpha=0.5, color="red") >>> plt.xlabel('ra') >>> plt.ylabel('dec') >>> plt.grid(color="black", linestyle="dotted") """ border.border(self, ax, wcs, **kw_mpl_pathpatch)
[ "def", "border", "(", "self", ",", "ax", ",", "wcs", ",", "*", "*", "kw_mpl_pathpatch", ")", ":", "border", ".", "border", "(", "self", ",", "ax", ",", "wcs", ",", "*", "*", "kw_mpl_pathpatch", ")" ]
44.767442
20.348837
def make_federation_entity(config, eid='', httpcli=None, verify_ssl=True): """ Construct a :py:class:`fedoidcmsg.entity.FederationEntity` instance based on given configuration. :param config: Federation entity configuration :param eid: Entity ID :param httpcli: A http client instance to use when sending HTTP requests :param verify_ssl: Whether TLS/SSL certificates should be verified :return: A :py:class:`fedoidcmsg.entity.FederationEntity` instance """ args = {} if not eid: try: eid = config['entity_id'] except KeyError: pass if 'self_signer' in config: self_signer = make_internal_signing_service(config['self_signer'], eid) args['self_signer'] = self_signer try: bundle_cnf = config['fo_bundle'] except KeyError: pass else: _args = dict([(k, v) for k, v in bundle_cnf.items() if k in KJ_SPECS]) if _args: _kj = init_key_jar(**_args) else: _kj = None if 'dir' in bundle_cnf: jb = FSJWKSBundle(eid, _kj, bundle_cnf['dir'], key_conv={'to': quote_plus, 'from': unquote_plus}) else: jb = JWKSBundle(eid, _kj) args['fo_bundle'] = jb for item in ['context', 'entity_id', 'fo_priority', 'mds_owner']: try: args[item] = config[item] except KeyError: pass if 'entity_id' not in args: args['entity_id'] = eid # These are mutually exclusive if 'sms_dir' in config: args['sms_dir'] = config['sms_dir'] return FederationEntityOOB(httpcli, iss=eid, **args) elif 'mds_service' in config: args['verify_ssl'] = verify_ssl args['mds_service'] = config['mds_service'] return FederationEntityAMS(httpcli, iss=eid, **args) elif 'mdss_endpoint' in config: args['verify_ssl'] = verify_ssl # These are mandatory for this type of entity for key in ['mdss_endpoint', 'mdss_owner', 'mdss_keys']: args[key] = config[key] return FederationEntitySwamid(httpcli, iss=eid, **args)
[ "def", "make_federation_entity", "(", "config", ",", "eid", "=", "''", ",", "httpcli", "=", "None", ",", "verify_ssl", "=", "True", ")", ":", "args", "=", "{", "}", "if", "not", "eid", ":", "try", ":", "eid", "=", "config", "[", "'entity_id'", "]", ...
33.307692
20.046154
def timelimit(timeout): """borrowed from web.py""" def _1(function): def _2(*args, **kw): class Dispatch(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None self.error = None self.setDaemon(True) self.start() def run(self): try: self.result = function(*args, **kw) except: self.error = sys.exc_info() c = Dispatch() c.join(timeout) if c.isAlive(): raise TimeoutError, 'took too long' if c.error: raise c.error[0], c.error[1] return c.result return _2 return _1
[ "def", "timelimit", "(", "timeout", ")", ":", "def", "_1", "(", "function", ")", ":", "def", "_2", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "class", "Dispatch", "(", "threading", ".", "Thread", ")", ":", "def", "__init__", "(", "self", ")...
29.5
14.5
def geocode(self, query, **kwargs): """ Given a string to search for, return the results from OpenCage's Geocoder. :param string query: String to search for :returns: Dict results :raises InvalidInputError: if the query string is not a unicode string :raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again :raises UnknownError: if something goes wrong with the OpenCage API """ if six.PY2: # py3 doesn't have unicode() function, and instead we check the text_type later try: query = unicode(query) except UnicodeDecodeError: raise InvalidInputError(bad_value=query) if not isinstance(query, six.text_type): raise InvalidInputError(bad_value=query) data = { 'q': query, 'key': self.key } # Add user parameters data.update(kwargs) url = self.url response = requests.get(url, params=data) if (response.status_code == 402 or response.status_code == 429): # Rate limit exceeded reset_time = datetime.utcfromtimestamp(response.json()['rate']['reset']) raise RateLimitExceededError(reset_to=int(response.json()['rate']['limit']), reset_time=reset_time) elif response.status_code == 500: raise UnknownError("500 status code from API") try: response_json = response.json() except ValueError: raise UnknownError("Non-JSON result from server") if 'results' not in response_json: raise UnknownError("JSON from API doesn't have a 'results' key") return floatify_latlng(response_json['results'])
[ "def", "geocode", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "if", "six", ".", "PY2", ":", "# py3 doesn't have unicode() function, and instead we check the text_type later", "try", ":", "query", "=", "unicode", "(", "query", ")", "except", "Un...
35.6
25.56
def set_progress_brackets(self, start, end): """Set brackets to set around a progress bar.""" self.sep_start = start self.sep_end = end
[ "def", "set_progress_brackets", "(", "self", ",", "start", ",", "end", ")", ":", "self", ".", "sep_start", "=", "start", "self", ".", "sep_end", "=", "end" ]
39
7
def handle_start_stateful_processing(self, start_msg): """Called when we receive StartInstanceStatefulProcessing message :param start_msg: StartInstanceStatefulProcessing type """ Log.info("Received start stateful processing for %s" % start_msg.checkpoint_id) self.is_stateful_started = True self.start_instance_if_possible()
[ "def", "handle_start_stateful_processing", "(", "self", ",", "start_msg", ")", ":", "Log", ".", "info", "(", "\"Received start stateful processing for %s\"", "%", "start_msg", ".", "checkpoint_id", ")", "self", ".", "is_stateful_started", "=", "True", "self", ".", "...
49
11.857143
def add_filter(self, filter_key, operator, value): """ Adds a filter given a key, operator, and value""" filter_key = self._metadata_map.get(filter_key, filter_key) self.filters.append({'filter': filter_key, 'operator': operator, 'value': value})
[ "def", "add_filter", "(", "self", ",", "filter_key", ",", "operator", ",", "value", ")", ":", "filter_key", "=", "self", ".", "_metadata_map", ".", "get", "(", "filter_key", ",", "filter_key", ")", "self", ".", "filters", ".", "append", "(", "{", "'filte...
66.75
21.5
def get(self, record_id): """ Retrieves a record by its id >>> record = airtable.get('recwPQIfs4wKPyc9D') Args: record_id(``str``): Airtable record id Returns: record (``dict``): Record """ record_url = self.record_url(record_id) return self._get(record_url)
[ "def", "get", "(", "self", ",", "record_id", ")", ":", "record_url", "=", "self", ".", "record_url", "(", "record_id", ")", "return", "self", ".", "_get", "(", "record_url", ")" ]
24.928571
15.928571
def get_logger(level=None, name=None, filename=None): """ Create a logger or return the current one if already instantiated. Parameters ---------- level : int one of the logger.level constants name : string name of the logger filename : string name of the log file Returns ------- logger.logger """ if level is None: level = settings.log_level if name is None: name = settings.log_name if filename is None: filename = settings.log_filename logger = lg.getLogger(name) # if a logger with this name is not already set up if not getattr(logger, 'handler_set', None): # get today's date and construct a log filename todays_date = dt.datetime.today().strftime('%Y_%m_%d') log_filename = os.path.join(settings.logs_folder, '{}_{}.log'.format(filename, todays_date)) # if the logs folder does not already exist, create it if not os.path.exists(settings.logs_folder): os.makedirs(settings.logs_folder) # create file handler and log formatter and set them up handler = lg.FileHandler(log_filename, encoding='utf-8') formatter = lg.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(level) logger.handler_set = True return logger
[ "def", "get_logger", "(", "level", "=", "None", ",", "name", "=", "None", ",", "filename", "=", "None", ")", ":", "if", "level", "is", "None", ":", "level", "=", "settings", ".", "log_level", "if", "name", "is", "None", ":", "name", "=", "settings", ...
29.659574
21.319149
def _calculate_block_structure(self, inequalities, equalities, momentinequalities, momentequalities, extramomentmatrix, removeequalities, block_struct=None): """Calculates the block_struct array for the output file. """ if block_struct is None: if self.verbose > 0: print("Calculating block structure...") self.block_struct = [] if self.parameters is not None: self.block_struct += [1 for _ in self.parameters] for monomials in self.monomial_sets: if len(monomials) > 0 and isinstance(monomials[0], list): self.block_struct.append(len(monomials[0])) else: self.block_struct.append(len(monomials)) if extramomentmatrix is not None: for _ in extramomentmatrix: for monomials in self.monomial_sets: if len(monomials) > 0 and \ isinstance(monomials[0], list): self.block_struct.append(len(monomials[0])) else: self.block_struct.append(len(monomials)) else: self.block_struct = block_struct degree_warning = False if inequalities is not None: self._n_inequalities = len(inequalities) n_tmp_inequalities = len(inequalities) else: self._n_inequalities = 0 n_tmp_inequalities = 0 constraints = flatten([inequalities]) if momentinequalities is not None: self._n_inequalities += len(momentinequalities) constraints += momentinequalities if not removeequalities: constraints += flatten([equalities]) monomial_sets = [] for k, constraint in enumerate(constraints): # Find the order of the localizing matrix if k < n_tmp_inequalities or k >= self._n_inequalities: if isinstance(constraint, str): ineq_order = 2 * self.level else: if constraint.is_Relational: constraint = convert_relational(constraint) ineq_order = ncdegree(constraint) if iscomplex(constraint): self.complex_matrix = True if ineq_order > 2 * self.level: degree_warning = True localization_order = (2*self.level - ineq_order)//2 if self.level == -1: localization_order = 0 if self.localizing_monomial_sets is not None and \ self.localizing_monomial_sets[k] is not None: localizing_monomials = self.localizing_monomial_sets[k] else: index = find_variable_set(self.variables, constraint) localizing_monomials = \ pick_monomials_up_to_degree(self.monomial_sets[index], localization_order) ln = len(localizing_monomials) if ln == 0: localizing_monomials = [S.One] else: localizing_monomials = [S.One] ln = 1 localizing_monomials = unique(localizing_monomials) monomial_sets.append(localizing_monomials) if k < self._n_inequalities: self.block_struct.append(ln) else: monomial_sets += [None for _ in range(ln*(ln+1)//2-1)] monomial_sets.append(localizing_monomials) monomial_sets += [None for _ in range(ln*(ln+1)//2-1)] self.block_struct += [1 for _ in range(ln*(ln+1))] if degree_warning and self.verbose > 0: print("A constraint has degree %d. Either choose a higher level " "relaxation or ensure that a mixed-order relaxation has the" " necessary monomials" % (ineq_order), file=sys.stderr) if momentequalities is not None: for moment_eq in momentequalities: self._moment_equalities.append(moment_eq) if not removeequalities: monomial_sets += [[S.One], [S.One]] self.block_struct += [1, 1] self.localizing_monomial_sets = monomial_sets
[ "def", "_calculate_block_structure", "(", "self", ",", "inequalities", ",", "equalities", ",", "momentinequalities", ",", "momentequalities", ",", "extramomentmatrix", ",", "removeequalities", ",", "block_struct", "=", "None", ")", ":", "if", "block_struct", "is", "...
48.11828
15.709677
def _normalize(x, cmin=None, cmax=None, clip=True): """Normalize an array from the range [cmin, cmax] to [0,1], with optional clipping.""" if not isinstance(x, np.ndarray): x = np.array(x) if cmin is None: cmin = x.min() if cmax is None: cmax = x.max() if cmin == cmax: return .5 * np.ones(x.shape) else: cmin, cmax = float(cmin), float(cmax) y = (x - cmin) * 1. / (cmax - cmin) if clip: y = np.clip(y, 0., 1.) return y
[ "def", "_normalize", "(", "x", ",", "cmin", "=", "None", ",", "cmax", "=", "None", ",", "clip", "=", "True", ")", ":", "if", "not", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "x", "=", "np", ".", "array", "(", "x", ")", "if"...
29.823529
13.176471
def next_token(self, tokenum, value, scol): """Determine what to do with the next token""" # Make self.current reflect these values self.current.set(tokenum, value, scol) # Determine indent_type based on this token if self.current.tokenum == INDENT and self.current.value: self.indent_type = self.current.value[0] # Only proceed if we shouldn't ignore this token if not self.ignore_token(): # Determining if this token is whitespace self.determine_if_whitespace() # Determine if inside a container self.determine_inside_container() # Change indentation as necessary self.determine_indentation() # See if we are force inserting this token if self.forced_insert(): return # If we have a newline after an inserted line, then we don't need to worry about semicolons if self.inserted_line and self.current.tokenum == NEWLINE: self.inserted_line = False # If we have a non space, non comment after an inserted line, then insert a semicolon if self.result and not self.is_space and self.inserted_line: if self.current.tokenum != COMMENT: self.result.append((OP, ';')) self.inserted_line = False # Progress the tracker self.progress() # Add a newline if we just skipped a single if self.single and self.single.skipped: self.single.skipped = False self.result.append((NEWLINE, '\n')) # Set after_space so next line knows if it is after space self.after_space = self.is_space
[ "def", "next_token", "(", "self", ",", "tokenum", ",", "value", ",", "scol", ")", ":", "# Make self.current reflect these values", "self", ".", "current", ".", "set", "(", "tokenum", ",", "value", ",", "scol", ")", "# Determine indent_type based on this token", "i...
38.488889
19.777778
def _load_paths(self, paths, depth=0): ''' Goes recursevly through the given list of paths in order to find and pass all preset files to ```_load_preset()``` ''' if depth > self.MAX_DEPTH: return for path in paths: try: # avoid empty string if not path: continue # cleanUp path if depth == 0: path = os.path.expanduser(path) # replace ~ path = os.path.expandvars(path) # replace vars path = os.path.normpath(path) # replace /../ , "" will be converted to "." if not os.path.exists(path): raise PresetException("does not exists or is a broken link or not enough permissions to read") elif os.path.isdir(path): try: for child in os.listdir(path): self._load_paths([os.path.join(path, child)], depth + 1) except OSError as e: raise PresetException("IOError: " + e.strerror) elif os.path.isfile(path): if path.endswith(".json"): self._load_preset(path) else: raise PresetException("not regular file") except PresetException as e: e.message = "Failed to load preset: \"{}\" [ {} ]".format(path, e.message) if self.strict: raise logger.error(str(e))
[ "def", "_load_paths", "(", "self", ",", "paths", ",", "depth", "=", "0", ")", ":", "if", "depth", ">", "self", ".", "MAX_DEPTH", ":", "return", "for", "path", "in", "paths", ":", "try", ":", "# avoid empty string", "if", "not", "path", ":", "continue",...
40.307692
20.666667
def get_or_create_iexact(self, **kwargs): """ Case insensitive title version of ``get_or_create``. Also allows for multiple existing results. """ lookup = dict(**kwargs) try: lookup["title__iexact"] = lookup.pop("title") except KeyError: pass try: return self.filter(**lookup)[0], False except IndexError: return self.create(**kwargs), True
[ "def", "get_or_create_iexact", "(", "self", ",", "*", "*", "kwargs", ")", ":", "lookup", "=", "dict", "(", "*", "*", "kwargs", ")", "try", ":", "lookup", "[", "\"title__iexact\"", "]", "=", "lookup", ".", "pop", "(", "\"title\"", ")", "except", "KeyErr...
31.928571
13.071429
def color(self, x, y, paint_method): """ :param paint_method: 'point' or 'replace' or 'floodfill' or 'filltoborder' or 'reset' :type paint_method: str or pgmagick.PaintMethod """ paint_method = _convert_paintmethod(paint_method) color = pgmagick.DrawableColor(x, y, paint_method) self.drawer.append(color)
[ "def", "color", "(", "self", ",", "x", ",", "y", ",", "paint_method", ")", ":", "paint_method", "=", "_convert_paintmethod", "(", "paint_method", ")", "color", "=", "pgmagick", ".", "DrawableColor", "(", "x", ",", "y", ",", "paint_method", ")", "self", "...
42.444444
11.333333
def audiosamples(language, word, key = ''): ''' Returns a list of URLs to suitable audiosamples for a given word. ''' from lltk.audiosamples import forvo, google urls = [] urls += forvo(language, word, key) urls += google(language, word) return urls
[ "def", "audiosamples", "(", "language", ",", "word", ",", "key", "=", "''", ")", ":", "from", "lltk", ".", "audiosamples", "import", "forvo", ",", "google", "urls", "=", "[", "]", "urls", "+=", "forvo", "(", "language", ",", "word", ",", "key", ")", ...
27.666667
21.444444
def parse(cls, fptr, offset, length): """Parse JPX free box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- FreeBox Instance of the current free box. """ # Must seek to end of box. nbytes = offset + length - fptr.tell() fptr.read(nbytes) return cls(length=length, offset=offset)
[ "def", "parse", "(", "cls", ",", "fptr", ",", "offset", ",", "length", ")", ":", "# Must seek to end of box.", "nbytes", "=", "offset", "+", "length", "-", "fptr", ".", "tell", "(", ")", "fptr", ".", "read", "(", "nbytes", ")", "return", "cls", "(", ...
24.952381
15.285714
def save(self, filename, format=None): """ Saves the SArray to file. The saved SArray will be in a directory named with the `targetfile` parameter. Parameters ---------- filename : string A local path or a remote URL. If format is 'text', it will be saved as a text file. If format is 'binary', a directory will be created at the location which will contain the SArray. format : {'binary', 'text', 'csv'}, optional Format in which to save the SFrame. Binary saved SArrays can be loaded much faster and without any format conversion losses. 'text' and 'csv' are synonymous: Each SArray row will be written as a single line in an output text file. If not given, will try to infer the format from filename given. If file name ends with 'csv', 'txt' or '.csv.gz', then save as 'csv' format, otherwise save as 'binary' format. """ from .sframe import SFrame as _SFrame if format is None: if filename.endswith(('.csv', '.csv.gz', 'txt')): format = 'text' else: format = 'binary' if format == 'binary': with cython_context(): self.__proxy__.save(_make_internal_url(filename)) elif format == 'text' or format == 'csv': sf = _SFrame({'X1':self}) with cython_context(): sf.__proxy__.save_as_csv(_make_internal_url(filename), {'header':False}) else: raise ValueError("Unsupported format: {}".format(format))
[ "def", "save", "(", "self", ",", "filename", ",", "format", "=", "None", ")", ":", "from", ".", "sframe", "import", "SFrame", "as", "_SFrame", "if", "format", "is", "None", ":", "if", "filename", ".", "endswith", "(", "(", "'.csv'", ",", "'.csv.gz'", ...
41.820513
21.512821
def _do_put(self): """ HTTP Put Request """ return requests.put(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
[ "def", "_do_put", "(", "self", ")", ":", "return", "requests", ".", "put", "(", "self", ".", "_url", ",", "data", "=", "self", ".", "_data", ",", "headers", "=", "self", ".", "_headers", ",", "auth", "=", "(", "self", ".", "_email", ",", "self", ...
35.8
22.6
def _check_pool_attr(self, attr, req_attr=None): """ Check pool attributes. """ if req_attr is None: req_attr = [] # check attribute names self._check_attr(attr, req_attr, _pool_attrs) # validate IPv4 prefix length if attr.get('ipv4_default_prefix_length') is not None: try: attr['ipv4_default_prefix_length'] = \ int(attr['ipv4_default_prefix_length']) if (attr['ipv4_default_prefix_length'] > 32 or attr['ipv4_default_prefix_length'] < 1): raise ValueError() except ValueError: raise NipapValueError('Default IPv4 prefix length must be an integer between 1 and 32.') # validate IPv6 prefix length if attr.get('ipv6_default_prefix_length'): try: attr['ipv6_default_prefix_length'] = \ int(attr['ipv6_default_prefix_length']) if (attr['ipv6_default_prefix_length'] > 128 or attr['ipv6_default_prefix_length'] < 1): raise ValueError() except ValueError: raise NipapValueError('Default IPv6 prefix length must be an integer between 1 and 128.')
[ "def", "_check_pool_attr", "(", "self", ",", "attr", ",", "req_attr", "=", "None", ")", ":", "if", "req_attr", "is", "None", ":", "req_attr", "=", "[", "]", "# check attribute names", "self", ".", "_check_attr", "(", "attr", ",", "req_attr", ",", "_pool_at...
38.30303
20.818182
def get(self, name_or_klass): """ Gets a mode by name (or class) :param name_or_klass: The name or the class of the mode to get :type name_or_klass: str or type :rtype: pyqode.core.api.Mode """ if not isinstance(name_or_klass, str): name_or_klass = name_or_klass.__name__ return self._modes[name_or_klass]
[ "def", "get", "(", "self", ",", "name_or_klass", ")", ":", "if", "not", "isinstance", "(", "name_or_klass", ",", "str", ")", ":", "name_or_klass", "=", "name_or_klass", ".", "__name__", "return", "self", ".", "_modes", "[", "name_or_klass", "]" ]
33.818182
9.454545
def process(self, user, timestamp, data=None): """ Processes a user event. :Parameters: user : `hashable` A hashable value to identify a user (`int` or `str` are OK) timestamp : :class:`mwtypes.Timestamp` The timestamp of the event data : `mixed` Event meta data :Returns: A generator of :class:`~mwsessions.Session` expired after processing the user event. """ event = Event(user, mwtypes.Timestamp(timestamp), self.event_i, data) self.event_i += 1 for user, events in self._clear_expired(event.timestamp): yield Session(user, unpack_events(events)) # Apply revision if event.user in self.active_users: events = self.active_users[event.user] else: events = [] self.active_users[event.user] = events active_session = ActiveSession(event.timestamp, event.i, events) self.recently_active.push(active_session) events.append(event)
[ "def", "process", "(", "self", ",", "user", ",", "timestamp", ",", "data", "=", "None", ")", ":", "event", "=", "Event", "(", "user", ",", "mwtypes", ".", "Timestamp", "(", "timestamp", ")", ",", "self", ".", "event_i", ",", "data", ")", "self", "....
33.65625
18.90625
def add_or_return_host(self, host): """ Returns a tuple (host, new), where ``host`` is a Host instance, and ``new`` is a bool indicating whether the host was newly added. """ with self._hosts_lock: try: return self._hosts[host.endpoint], False except KeyError: self._hosts[host.endpoint] = host return host, True
[ "def", "add_or_return_host", "(", "self", ",", "host", ")", ":", "with", "self", ".", "_hosts_lock", ":", "try", ":", "return", "self", ".", "_hosts", "[", "host", ".", "endpoint", "]", ",", "False", "except", "KeyError", ":", "self", ".", "_hosts", "[...
35.083333
10.75
def get_br(self): """Returns the bottom right border of the cell""" cell_below = CellBorders(self.cell_attributes, *self.cell.get_below_key_rect()) return cell_below.get_r()
[ "def", "get_br", "(", "self", ")", ":", "cell_below", "=", "CellBorders", "(", "self", ".", "cell_attributes", ",", "*", "self", ".", "cell", ".", "get_below_key_rect", "(", ")", ")", "return", "cell_below", ".", "get_r", "(", ")" ]
37.666667
18.166667
def apply_correlation(self, sites, imt, residuals, stddev_intra): """ Apply correlation to randomly sampled residuals. See Parent function """ # stddev_intra is repeated if it is only 1 value for all the residuals if stddev_intra.shape[0] == 1: stddev_intra = numpy.matlib.repmat( stddev_intra, len(sites.complete), 1) # Reshape 'stddev_intra' if needed stddev_intra = stddev_intra.squeeze() if not stddev_intra.shape: stddev_intra = stddev_intra[None] if self.uncertainty_multiplier == 0: # No uncertainty # residuals were sampled from a normal distribution with # stddev_intra standard deviation. 'residuals_norm' are residuals # normalized, sampled from a standard normal distribution. # For this, every row of 'residuals' (every site) is divided by its # corresponding standard deviation element. residuals_norm = residuals / stddev_intra[sites.sids, None] # Lower diagonal of the Cholesky decomposition from/to cache try: cormaLow = self.cache[imt] except KeyError: # Note that instead of computing the whole correlation matrix # corresponding to sites.complete, here we compute only the # correlation matrix corresponding to sites. cormaLow = numpy.linalg.cholesky( numpy.diag(stddev_intra[sites.sids]) * self._get_correlation_matrix(sites, imt) * numpy.diag(stddev_intra[sites.sids])) self.cache[imt] = cormaLow # Apply correlation return numpy.dot(cormaLow, residuals_norm) else: # Variability (uncertainty) is included nsim = len(residuals[1]) nsites = len(residuals) # Re-sample all the residuals residuals_correlated = residuals * 0 for isim in range(0, nsim): corma = self._get_correlation_matrix(sites, imt) cov = (numpy.diag(stddev_intra[sites.sids]) * corma * numpy.diag(stddev_intra[sites.sids])) residuals_correlated[0:, isim] = ( numpy.random.multivariate_normal( numpy.zeros(nsites), cov, 1)) return residuals_correlated
[ "def", "apply_correlation", "(", "self", ",", "sites", ",", "imt", ",", "residuals", ",", "stddev_intra", ")", ":", "# stddev_intra is repeated if it is only 1 value for all the residuals", "if", "stddev_intra", ".", "shape", "[", "0", "]", "==", "1", ":", "stddev_i...
43.8
19.290909
def create(quiet, name, base_uri, symlink_path): """Create a proto dataset.""" _validate_name(name) admin_metadata = dtoolcore.generate_admin_metadata(name) parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri) if parsed_base_uri.scheme == "symlink": if symlink_path is None: raise click.UsageError("Need to specify symlink path using the -s/--symlink-path option") # NOQA if symlink_path: base_uri = dtoolcore.utils.sanitise_uri( "symlink:" + parsed_base_uri.path ) parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri) # Create the dataset. proto_dataset = dtoolcore.generate_proto_dataset( admin_metadata=admin_metadata, base_uri=dtoolcore.utils.urlunparse(parsed_base_uri), config_path=CONFIG_PATH) # If we are creating a symlink dataset we need to set the symlink_path # attribute on the storage broker. if symlink_path: symlink_abspath = os.path.abspath(symlink_path) proto_dataset._storage_broker.symlink_path = symlink_abspath try: proto_dataset.create() except dtoolcore.storagebroker.StorageBrokerOSError as err: raise click.UsageError(str(err)) proto_dataset.put_readme("") if quiet: click.secho(proto_dataset.uri) else: # Give the user some feedback and hints on what to do next. click.secho("Created proto dataset ", nl=False, fg="green") click.secho(proto_dataset.uri) click.secho("Next steps: ") step = 1 if parsed_base_uri.scheme != "symlink": click.secho("{}. Add raw data, eg:".format(step)) click.secho( " dtool add item my_file.txt {}".format(proto_dataset.uri), fg="cyan") if parsed_base_uri.scheme == "file": # Find the abspath of the data directory for user feedback. data_path = proto_dataset._storage_broker._data_abspath click.secho(" Or use your system commands, e.g: ") click.secho( " mv my_data_directory {}/".format(data_path), fg="cyan" ) step = step + 1 click.secho("{}. Add descriptive metadata, e.g: ".format(step)) click.secho( " dtool readme interactive {}".format(proto_dataset.uri), fg="cyan") step = step + 1 click.secho( "{}. Convert the proto dataset into a dataset: ".format(step) ) click.secho(" dtool freeze {}".format(proto_dataset.uri), fg="cyan")
[ "def", "create", "(", "quiet", ",", "name", ",", "base_uri", ",", "symlink_path", ")", ":", "_validate_name", "(", "name", ")", "admin_metadata", "=", "dtoolcore", ".", "generate_admin_metadata", "(", "name", ")", "parsed_base_uri", "=", "dtoolcore", ".", "uti...
36.338028
22.859155
def decodeEntities(self, len, what, end, end2, end3): """This function is deprecated, we now always process entities content through xmlStringDecodeEntities TODO: remove it in next major release. [67] Reference ::= EntityRef | CharRef [69] PEReference ::= '%' Name ';' """ ret = libxml2mod.xmlDecodeEntities(self._o, len, what, end, end2, end3) return ret
[ "def", "decodeEntities", "(", "self", ",", "len", ",", "what", ",", "end", ",", "end2", ",", "end3", ")", ":", "ret", "=", "libxml2mod", ".", "xmlDecodeEntities", "(", "self", ".", "_o", ",", "len", ",", "what", ",", "end", ",", "end2", ",", "end3"...
58.142857
18.857143
def compile_insert_get_id(self, query, values, sequence=None): """ Compile an insert and get ID statement into SQL. :param query: A QueryBuilder instance :type query: QueryBuilder :param values: The values to insert :type values: dict :param sequence: The id sequence :type sequence: str :return: The compiled statement :rtype: str """ if sequence is None: sequence = "id" return "%s RETURNING %s" % ( self.compile_insert(query, values), self.wrap(sequence), )
[ "def", "compile_insert_get_id", "(", "self", ",", "query", ",", "values", ",", "sequence", "=", "None", ")", ":", "if", "sequence", "is", "None", ":", "sequence", "=", "\"id\"", "return", "\"%s RETURNING %s\"", "%", "(", "self", ".", "compile_insert", "(", ...
25.695652
16.391304
def update(self, value=None): """ Update progress bar via the console or notebook accordingly. """ # Update self.value if value is None: value = self._current_value + 1 self._current_value = value # Choose the appropriate environment if self._ipython_widget: try: self._update_ipython_widget(value) except RuntimeError: pass else: self._update_console(value)
[ "def", "update", "(", "self", ",", "value", "=", "None", ")", ":", "# Update self.value", "if", "value", "is", "None", ":", "value", "=", "self", ".", "_current_value", "+", "1", "self", ".", "_current_value", "=", "value", "# Choose the appropriate environmen...
27.5
14.277778
def remove(self,num): """Remove a finished (completed or dead) job.""" try: job = self.all[num] except KeyError: error('Job #%s not found' % num) else: stat_code = job.stat_code if stat_code == self._s_running: error('Job #%s is still running, it can not be removed.' % num) return elif stat_code == self._s_completed: self.completed.remove(job) elif stat_code == self._s_dead: self.dead.remove(job)
[ "def", "remove", "(", "self", ",", "num", ")", ":", "try", ":", "job", "=", "self", ".", "all", "[", "num", "]", "except", "KeyError", ":", "error", "(", "'Job #%s not found'", "%", "num", ")", "else", ":", "stat_code", "=", "job", ".", "stat_code", ...
34.5625
13.9375
def _default_data(self, *args, **kwargs): """ Generate a one-time signature and other data required to send a secure POST request to the Bitstamp API. """ data = super(Trading, self)._default_data(*args, **kwargs) data['key'] = self.key nonce = self.get_nonce() msg = str(nonce) + self.username + self.key signature = hmac.new( self.secret.encode('utf-8'), msg=msg.encode('utf-8'), digestmod=hashlib.sha256).hexdigest().upper() data['signature'] = signature data['nonce'] = nonce return data
[ "def", "_default_data", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "super", "(", "Trading", ",", "self", ")", ".", "_default_data", "(", "*", "args", ",", "*", "*", "kwargs", ")", "data", "[", "'key'", "]", "="...
37.3125
13.9375
def load_annot(self): """Action: load a file for annotations.""" if self.parent.info.filename is not None: filename = splitext(self.parent.info.filename)[0] + '_scores.xml' else: filename = None filename, _ = QFileDialog.getOpenFileName(self, 'Load annotation file', filename, 'Annotation File (*.xml)') if filename == '': return try: self.update_notes(filename, False) except FileNotFoundError: msg = 'Annotation file not found' self.parent.statusBar().showMessage(msg) lg.info(msg)
[ "def", "load_annot", "(", "self", ")", ":", "if", "self", ".", "parent", ".", "info", ".", "filename", "is", "not", "None", ":", "filename", "=", "splitext", "(", "self", ".", "parent", ".", "info", ".", "filename", ")", "[", "0", "]", "+", "'_scor...
35.35
21.45
def down(self, migration_id): """Rollback to migration.""" if not self.check_directory(): return for migration in self.get_migrations_to_down(migration_id): logger.info('Rollback migration %s' % migration.filename) migration_module = self.load_migration_file(migration.filename) if hasattr(migration_module, 'down'): migration_module.down(self.db) else: logger.info('No down method on %s' % migration.filename) self.collection.remove({'filename': migration.filename})
[ "def", "down", "(", "self", ",", "migration_id", ")", ":", "if", "not", "self", ".", "check_directory", "(", ")", ":", "return", "for", "migration", "in", "self", ".", "get_migrations_to_down", "(", "migration_id", ")", ":", "logger", ".", "info", "(", "...
38.933333
22.933333
def do_notification_type_list(mc, args): '''List notification types supported by monasca.''' try: notification_types = mc.notificationtypes.list() except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: if args.json: print(utils.json_formatter(notification_types)) return else: formatters = {'types': lambda x: x["type"]} # utils.print_list(notification_types['types'], ["types"], formatters=formatters) utils.print_list(notification_types, ["types"], formatters=formatters)
[ "def", "do_notification_type_list", "(", "mc", ",", "args", ")", ":", "try", ":", "notification_types", "=", "mc", ".", "notificationtypes", ".", "list", "(", ")", "except", "(", "osc_exc", ".", "ClientException", ",", "k_exc", ".", "HttpError", ")", "as", ...
42.666667
25.466667
def get_mac_address_table_input_request_type_get_interface_based_request_forwarding_interface_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_interface_based_request = ET.SubElement(request_type, "get-interface-based-request") forwarding_interface = ET.SubElement(get_interface_based_request, "forwarding-interface") interface_name = ET.SubElement(forwarding_interface, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_mac_address_table_input_request_type_get_interface_based_request_forwarding_interface_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_mac_address_table", "=", "ET", ".", "Elemen...
56.333333
26.133333
def setDayWidth(self, width): """ Sets the day width that will be used for drawing this gantt widget. :param width | <int> """ self._dayWidth = width start = self.ganttWidget().dateStart() end = self.ganttWidget().dateEnd() self._dirty = True
[ "def", "setDayWidth", "(", "self", ",", "width", ")", ":", "self", ".", "_dayWidth", "=", "width", "start", "=", "self", ".", "ganttWidget", "(", ")", ".", "dateStart", "(", ")", "end", "=", "self", ".", "ganttWidget", "(", ")", ".", "dateEnd", "(", ...
28.166667
14.833333
def display(self, complete=False): """ Display information about the point source. :param complete : if True, displays also information on fixed parameters :return: (none) """ # Switch on the complete display flag self._complete_display = bool(complete) # This will automatically choose the best representation among repr and repr_html super(Model, self).display() # Go back to default self._complete_display = False
[ "def", "display", "(", "self", ",", "complete", "=", "False", ")", ":", "# Switch on the complete display flag", "self", ".", "_complete_display", "=", "bool", "(", "complete", ")", "# This will automatically choose the best representation among repr and repr_html", "super", ...
27.388889
21.833333
def initialize_ui(self): """ Initializes the Component ui. :return: Method success. :rtype: bool """ LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__)) self.__model = ComponentsModel(self, horizontal_headers=self.__headers) self.set_components() self.Components_Manager_Ui_treeView.setParent(None) self.Components_Manager_Ui_treeView = Components_QTreeView(self, self.__model) self.Components_Manager_Ui_treeView.setObjectName("Components_Manager_Ui_treeView") self.Components_Manager_Ui_gridLayout.setContentsMargins(self.__tree_view_inner_margins) self.Components_Manager_Ui_gridLayout.addWidget(self.Components_Manager_Ui_treeView, 0, 0) self.__view = self.Components_Manager_Ui_treeView self.__view.setContextMenuPolicy(Qt.ActionsContextMenu) self.__view_add_actions() self.Components_Informations_textBrowser.setText(self.__components_informations_default_text) self.Components_Manager_Ui_splitter.setSizes([16777215, 1]) # Signals / Slots. self.__view.selectionModel().selectionChanged.connect(self.__view_selectionModel__selectionChanged) self.refresh_nodes.connect(self.__model__refresh_nodes) self.initialized_ui = True return True
[ "def", "initialize_ui", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"> Initializing '{0}' Component ui.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "__model", "=", "ComponentsModel", "(", "self", ",", "...
41.65625
30.09375
def is_attribute_deprecated(self, attribute): """ Check if the attribute is deprecated by the current KMIP version. Args: attribute (string): The name of the attribute (e.g., 'Unique Identifier'). Required. """ rule_set = self._attribute_rule_sets.get(attribute) if rule_set.version_deprecated: if self._version >= rule_set.version_deprecated: return True else: return False else: return False
[ "def", "is_attribute_deprecated", "(", "self", ",", "attribute", ")", ":", "rule_set", "=", "self", ".", "_attribute_rule_sets", ".", "get", "(", "attribute", ")", "if", "rule_set", ".", "version_deprecated", ":", "if", "self", ".", "_version", ">=", "rule_set...
33.1875
16.6875
def predict_topk(self, dataset, output_type="probability", k=3, batch_size=64): """ Return top-k predictions for the ``dataset``, using the trained model. Predictions are returned as an SFrame with three columns: `id`, `class`, and `probability`, `margin`, or `rank`, depending on the ``output_type`` parameter. Input dataset size must be the same as for training of the model. Parameters ---------- dataset : SFrame | SArray | turicreate.Image Images to be classified. If dataset is an SFrame, it must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'probability', 'rank', 'margin'}, optional Choose the return type of the prediction: - `probability`: Probability associated with each label in the prediction. - `rank` : Rank associated with each label in the prediction. - `margin` : Margin associated with each label in the prediction. k : int, optional Number of classes to return for each input example. Returns ------- out : SFrame An SFrame with model predictions. See Also -------- predict, classify, evaluate Examples -------- >>> pred = m.predict_topk(validation_data, k=3) >>> pred +----+-------+-------------------+ | id | class | probability | +----+-------+-------------------+ | 0 | 4 | 0.995623886585 | | 0 | 9 | 0.0038311756216 | | 0 | 7 | 0.000301006948575 | | 1 | 1 | 0.928708016872 | | 1 | 3 | 0.0440889261663 | | 1 | 2 | 0.0176190119237 | | 2 | 3 | 0.996967732906 | | 2 | 2 | 0.00151345680933 | | 2 | 7 | 0.000637513934635 | | 3 | 1 | 0.998070061207 | | .. | ... | ... | +----+-------+-------------------+ [35688 rows x 3 columns] """ if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)): raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image') if(batch_size < 1): raise ValueError("'batch_size' must be greater than or equal to 1") dataset, _ = self._canonize_input(dataset) extracted_features = self._extract_features(dataset) return self.classifier.predict_topk(extracted_features, output_type = output_type, k = k)
[ "def", "predict_topk", "(", "self", ",", "dataset", ",", "output_type", "=", "\"probability\"", ",", "k", "=", "3", ",", "batch_size", "=", "64", ")", ":", "if", "not", "isinstance", "(", "dataset", ",", "(", "_tc", ".", "SFrame", ",", "_tc", ".", "S...
40.875
21.75
def prep_patterns(filenames): """Load pattern files passed via options and return list of patterns.""" patterns = [] for filename in filenames: try: with open(filename) as file: patterns += [l.rstrip('\n') for l in file] except: # pylint: disable=W0702 LOGGER.error("Unable to load pattern file '%s'" % filename) sys.exit(1) if patterns: # return a set to eliminate duplicates return set(patterns) else: LOGGER.error('No terms were loaded') sys.exit(1)
[ "def", "prep_patterns", "(", "filenames", ")", ":", "patterns", "=", "[", "]", "for", "filename", "in", "filenames", ":", "try", ":", "with", "open", "(", "filename", ")", "as", "file", ":", "patterns", "+=", "[", "l", ".", "rstrip", "(", "'\\n'", ")...
29.368421
18.736842
def package_url(self): """Return the package URL associated with this metadata""" if self.resource_file == DEFAULT_METATAB_FILE or self.target_format in ('txt','ipynb'): u = self.inner.clone().clear_fragment() u.path = dirname(self.path) + '/' u.scheme_extension = 'metapack' else: u = self return MetapackPackageUrl(str(u.clear_fragment()), downloader=self._downloader)
[ "def", "package_url", "(", "self", ")", ":", "if", "self", ".", "resource_file", "==", "DEFAULT_METATAB_FILE", "or", "self", ".", "target_format", "in", "(", "'txt'", ",", "'ipynb'", ")", ":", "u", "=", "self", ".", "inner", ".", "clone", "(", ")", "."...
40.181818
24.181818
def draw_edge_visibility(gl, v, e, f, hidden_wireframe=True): """Assumes camera is set up correctly in gl context.""" gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); ec = np.arange(1, len(e)+1) ec = np.tile(col(ec), (1, 3)) ec[:, 0] = ec[:, 0] & 255 ec[:, 1] = (ec[:, 1] >> 8 ) & 255 ec[:, 2] = (ec[:, 2] >> 16 ) & 255 ec = np.asarray(ec, dtype=np.uint8) draw_colored_primitives(gl, v, e, ec) if hidden_wireframe: gl.Enable(GL_POLYGON_OFFSET_FILL) gl.PolygonOffset(10.0, 1.0) draw_colored_primitives(gl, v, f, fc=np.zeros(f.shape)) gl.Disable(GL_POLYGON_OFFSET_FILL) raw = np.asarray(gl.getImage(), np.uint32) raw = raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256 - 1 return raw
[ "def", "draw_edge_visibility", "(", "gl", ",", "v", ",", "e", ",", "f", ",", "hidden_wireframe", "=", "True", ")", ":", "gl", ".", "Clear", "(", "GL_COLOR_BUFFER_BIT", "|", "GL_DEPTH_BUFFER_BIT", ")", "ec", "=", "np", ".", "arange", "(", "1", ",", "len...
34.681818
14.545455
def set_config(new_config={}): """ Reset config options to defaults, and then update (optionally) with the provided dictionary of options. """ # The default base configuration. flask_app.base_config = dict(working_directory='.', template='collapse-input', debug=False, port=None) update_config(new_config)
[ "def", "set_config", "(", "new_config", "=", "{", "}", ")", ":", "# The default base configuration.", "flask_app", ".", "base_config", "=", "dict", "(", "working_directory", "=", "'.'", ",", "template", "=", "'collapse-input'", ",", "debug", "=", "False", ",", ...
46.222222
7.333333
def formatted_command(self, command): """Issue a raw, formatted command to the device. This function is invoked by both query and command and is the point where we actually send bytes out over the network. This function does the wrapping and formatting required by the Anthem API so that the higher-level function can just operate with regular strings without the burden of byte encoding and terminating device requests. :param command: Any command as documented in the Anthem API :type command: str :Example: >>> formatted_command('Z1VOL-50') """ command = command command = command.encode() self.log.debug('> %s', command) try: self.transport.write(command) time.sleep(0.01) except: self.log.warning('No transport found, unable to send command')
[ "def", "formatted_command", "(", "self", ",", "command", ")", ":", "command", "=", "command", "command", "=", "command", ".", "encode", "(", ")", "self", ".", "log", ".", "debug", "(", "'> %s'", ",", "command", ")", "try", ":", "self", ".", "transport"...
36.04
22.4
def mangle(self, name, x): """ Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name. """ h = hashlib.md5(name.encode('utf8')).hexdigest() return '%s-%s' % (h, x)
[ "def", "mangle", "(", "self", ",", "name", ",", "x", ")", ":", "h", "=", "hashlib", ".", "md5", "(", "name", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")", "return", "'%s-%s'", "%", "(", "h", ",", "x", ")" ]
33.571429
10.142857
def unlock_keychain(username): """ If the user is running via SSH, their Keychain must be unlocked first. """ if 'SSH_TTY' not in os.environ: return # Don't unlock if we've already seen this user. if username in _unlocked: return _unlocked.add(username) if sys.platform == 'darwin': sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n") subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "def", "unlock_keychain", "(", "username", ")", ":", "if", "'SSH_TTY'", "not", "in", "os", ".", "environ", ":", "return", "# Don't unlock if we've already seen this user.", "if", "username", "in", "_unlocked", ":", "return", "_unlocked", ".", "add", "(", "username...
34.266667
26
def unzip(x, split_dim, current_length, num_splits=2, name=None): """Splits a tensor by unzipping along the split_dim. For example the following array split into 2 would be: [1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6] and by 3: [1, 2, 3, 4] -> [1, 4], [2], [3] Args: x: The tensor to split. split_dim: The dimension to split along. current_length: Current length along the split_dim. num_splits: The number of splits. name: Optional name for this op. Returns: A length num_splits sequence. """ with tf.name_scope(name, 'unzip', [x]) as scope: x = tf.convert_to_tensor(x, name='x') # There is probably a more efficient way to do this. all_splits = tf.split( value=x, num_or_size_splits=current_length, axis=split_dim, name=scope) splits = [[] for _ in xrange(num_splits)] for i in xrange(current_length): splits[i % num_splits].append(all_splits[i]) return [tf.concat(s, split_dim) for s in splits]
[ "def", "unzip", "(", "x", ",", "split_dim", ",", "current_length", ",", "num_splits", "=", "2", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "'unzip'", ",", "[", "x", "]", ")", "as", "scope", ":", "x", "=...
36.961538
14.615385
def addVariantSet(self): """ Adds a new VariantSet into this repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) dataUrls = self._args.dataFiles name = self._args.name if len(dataUrls) == 1: if self._args.name is None: name = getNameFromPath(dataUrls[0]) if os.path.isdir(dataUrls[0]): # Read in the VCF files from the directory. # TODO support uncompressed VCF and BCF files vcfDir = dataUrls[0] pattern = os.path.join(vcfDir, "*.vcf.gz") dataUrls = glob.glob(pattern) if len(dataUrls) == 0: raise exceptions.RepoManagerException( "Cannot find any VCF files in the directory " "'{}'.".format(vcfDir)) dataUrls[0] = self._getFilePath(dataUrls[0], self._args.relativePath) elif self._args.name is None: raise exceptions.RepoManagerException( "Cannot infer the intended name of the VariantSet when " "more than one VCF file is provided. Please provide a " "name argument using --name.") parsed = urlparse.urlparse(dataUrls[0]) if parsed.scheme not in ['http', 'ftp']: dataUrls = map(lambda url: self._getFilePath( url, self._args.relativePath), dataUrls) # Now, get the index files for the data files that we've now obtained. indexFiles = self._args.indexFiles if indexFiles is None: # First check if all the paths exist locally, as they must # if we are making a default index path. for dataUrl in dataUrls: if not os.path.exists(dataUrl): raise exceptions.MissingIndexException( "Cannot find file '{}'. All variant files must be " "stored locally if the default index location is " "used. If you are trying to create a VariantSet " "based on remote URLs, please download the index " "files to the local file system and provide them " "with the --indexFiles argument".format(dataUrl)) # We assume that the indexes are made by adding .tbi indexSuffix = ".tbi" # TODO support BCF input properly here by adding .csi indexFiles = [filename + indexSuffix for filename in dataUrls] indexFiles = map(lambda url: self._getFilePath( url, self._args.relativePath), indexFiles) variantSet = variants.HtslibVariantSet(dataset, name) variantSet.populateFromFile(dataUrls, indexFiles) # Get the reference set that is associated with the variant set. referenceSetName = self._args.referenceSetName if referenceSetName is None: # Try to find a reference set name from the VCF header. referenceSetName = variantSet.getVcfHeaderReferenceSetName() if referenceSetName is None: raise exceptions.RepoManagerException( "Cannot infer the ReferenceSet from the VCF header. Please " "specify the ReferenceSet to associate with this " "VariantSet using the --referenceSetName option") referenceSet = self._repo.getReferenceSetByName(referenceSetName) variantSet.setReferenceSet(referenceSet) variantSet.setAttributes(json.loads(self._args.attributes)) # Now check for annotations annotationSets = [] if variantSet.isAnnotated() and self._args.addAnnotationSets: ontologyName = self._args.ontologyName if ontologyName is None: raise exceptions.RepoManagerException( "A sequence ontology name must be provided") ontology = self._repo.getOntologyByName(ontologyName) self._checkSequenceOntology(ontology) for annotationSet in variantSet.getVariantAnnotationSets(): annotationSet.setOntology(ontology) annotationSets.append(annotationSet) # Add the annotation sets and the variant set as an atomic update def updateRepo(): self._repo.insertVariantSet(variantSet) for annotationSet in annotationSets: self._repo.insertVariantAnnotationSet(annotationSet) self._updateRepo(updateRepo)
[ "def", "addVariantSet", "(", "self", ")", ":", "self", ".", "_openRepo", "(", ")", "dataset", "=", "self", ".", "_repo", ".", "getDatasetByName", "(", "self", ".", "_args", ".", "datasetName", ")", "dataUrls", "=", "self", ".", "_args", ".", "dataFiles",...
52.639535
17.360465
def near_dupe_hashes(labels, values, languages=None, **kw): """ Hash the given address into normalized strings that can be used to group similar addresses together for more detailed pairwise comparison. This can be thought of as the blocking function in record linkage or locally-sensitive hashing in the document near-duplicate detection. Required -------- @param labels: array of component labels as either Unicode or UTF-8 encoded strings e.g. ["house_number", "road", "postcode"] @param values: array of component values as either Unicode or UTF-8 encoded strings e.g. ["123", "Broadway", "11216"]. Note len(values) must be equal to len(labels). Options ------- @param languages: a tuple or list of ISO language code strings (e.g. "en", "fr", "de", etc.) to use in expansion. If None is passed, use language classifier to detect language automatically. @param with_name: use name in the hashes @param with_address: use house_number & street in the hashes @param with_unit: use secondary unit as part of the hashes @param with_city_or_equivalent: use the city, city_district, suburb, or island name as one of the geo qualifiers @param with_small_containing_boundaries: use small containing boundaries (currently state_district) as one of the geo qualifiers @param with_postal_code: use postal code as one of the geo qualifiers @param with_latlon: use geohash + neighbors as one of the geo qualifiers @param latitude: latitude (Y coordinate) @param longitude: longitude (X coordinate) @param geohash_precision: geohash tile size (default = 6) @param name_and_address_keys: include keys with name + address + geo @param name_only_keys: include keys with name + geo @param address_only_keys: include keys with address + geo """ return _near_dupe.near_dupe_hashes(labels, values, languages=languages, **kw)
[ "def", "near_dupe_hashes", "(", "labels", ",", "values", ",", "languages", "=", "None", ",", "*", "*", "kw", ")", ":", "return", "_near_dupe", ".", "near_dupe_hashes", "(", "labels", ",", "values", ",", "languages", "=", "languages", ",", "*", "*", "kw",...
55.702703
28.567568
def unregister_area(self, area_code, index): """'Unshares' a memory area previously shared with Srv_RegisterArea(). That memory block will be no longer visible by the clients. """ return self.library.Srv_UnregisterArea(self.pointer, area_code, index)
[ "def", "unregister_area", "(", "self", ",", "area_code", ",", "index", ")", ":", "return", "self", ".", "library", ".", "Srv_UnregisterArea", "(", "self", ".", "pointer", ",", "area_code", ",", "index", ")" ]
55.6
13.8
def transfer(self, user): """Transfers app to given username's account.""" r = self._h._http_resource( method='PUT', resource=('apps', self.name), data={'app[transfer_owner]': user} ) return r.ok
[ "def", "transfer", "(", "self", ",", "user", ")", ":", "r", "=", "self", ".", "_h", ".", "_http_resource", "(", "method", "=", "'PUT'", ",", "resource", "=", "(", "'apps'", ",", "self", ".", "name", ")", ",", "data", "=", "{", "'app[transfer_owner]'"...
28.444444
14.888889
def show_prediction(estimator, doc, **kwargs): """ Return an explanation of estimator prediction as an IPython.display.HTML object. Use this function to show information about classifier prediction in IPython. :func:`show_prediction` accepts all :func:`eli5.explain_prediction` arguments and all :func:`eli5.formatters.html.format_as_html` keyword arguments, so it is possible to get explanation and customize formatting in a single call. Parameters ---------- estimator : object Estimator instance. This argument must be positional. doc : object Example to run estimator on. Estimator makes a prediction for this example, and :func:`show_prediction` tries to show information about this prediction. Pass a single element, not a one-element array: if you fitted your estimator on ``X``, that would be ``X[i]`` for most containers, and ``X.iloc[i]`` for ``pandas.DataFrame``. top : int or (int, int) tuple, optional Number of features to show. When ``top`` is int, ``top`` features with a highest absolute values are shown. When it is (pos, neg) tuple, no more than ``pos`` positive features and no more than ``neg`` negative features is shown. ``None`` value means no limit (default). This argument may be supported or not, depending on estimator type. top_targets : int, optional Number of targets to show. When ``top_targets`` is provided, only specified number of targets with highest scores are shown. Negative value means targets with lowest scores are shown. Must not be given with ``targets`` argument. ``None`` value means no limit: all targets are shown (default). This argument may be supported or not, depending on estimator type. target_names : list[str] or {'old_name': 'new_name'} dict, optional Names of targets or classes. This argument can be used to provide human-readable class/target names for estimators which don't expose clss names themselves. It can be also used to rename estimator-provided classes before displaying them. This argument may be supported or not, depending on estimator type. targets : list, optional Order of class/target names to show. This argument can be also used to show information only for a subset of classes. It should be a list of class / target names which match either names provided by an estimator or names defined in ``target_names`` parameter. In case of binary classification you can use this argument to set the class which probability or score should be displayed, with an appropriate explanation. By default a result for predicted class is shown. For example, you can use ``targets=[True]`` to always show result for a positive class, even if the predicted label is False. This argument may be supported or not, depending on estimator type. feature_names : list, optional A list of feature names. It allows to specify feature names when they are not provided by an estimator object. This argument may be supported or not, depending on estimator type. feature_re : str, optional Only feature names which match ``feature_re`` regex are shown (more precisely, ``re.search(feature_re, x)`` is checked). feature_filter : Callable[[str, float], bool], optional Only feature names for which ``feature_filter`` function returns True are shown. It must accept feature name and feature value. Missing features always have a NaN value. show : List[str], optional List of sections to show. Allowed values: * 'targets' - per-target feature weights; * 'transition_features' - transition features of a CRF model; * 'feature_importances' - feature importances of a decision tree or an ensemble-based estimator; * 'decision_tree' - decision tree in a graphical form; * 'method' - a string with explanation method; * 'description' - description of explanation method and its caveats. ``eli5.formatters.fields`` provides constants that cover common cases: ``INFO`` (method and description), ``WEIGHTS`` (all the rest), and ``ALL`` (all). horizontal_layout : bool When True, feature weight tables are printed horizontally (left to right); when False, feature weight tables are printed vertically (top to down). Default is True. highlight_spaces : bool or None, optional Whether to highlight spaces in feature names. This is useful if you work with text and have ngram features which may include spaces at left or right. Default is None, meaning that the value used is set automatically based on vectorizer and feature values. include_styles : bool Most styles are inline, but some are included separately in <style> tag; you can omit them by passing ``include_styles=False``. Default is True. force_weights : bool When True, a table with feature weights is displayed even if all features are already highlighted in text. Default is False. preserve_density: bool or None This argument currently only makes sense when used with text data and vectorizers from scikit-learn. If preserve_density is True, then color for longer fragments will be less intensive than for shorter fragments, so that "sum" of intensities will correspond to feature weight. If preserve_density is None, then it's value is chosen depending on analyzer kind: it is preserved for "char" and "char_wb" analyzers, and not preserved for "word" analyzers. Default is None. show_feature_values : bool When True, feature values are shown along with feature contributions. Default is False. **kwargs: dict Keyword arguments. All keyword arguments are passed to concrete explain_prediction... implementations. Returns ------- IPython.display.HTML The result is printed in IPython notebook as an HTML widget. If you need to display several explanations as an output of a single cell, or if you want to display it from a function then use IPython.display.display:: from IPython.display import display display(eli5.show_weights(clf1)) display(eli5.show_weights(clf2)) """ format_kwargs, explain_kwargs = _split_kwargs(kwargs) expl = explain_prediction(estimator, doc, **explain_kwargs) html = format_as_html(expl, **format_kwargs) return HTML(html)
[ "def", "show_prediction", "(", "estimator", ",", "doc", ",", "*", "*", "kwargs", ")", ":", "format_kwargs", ",", "explain_kwargs", "=", "_split_kwargs", "(", "kwargs", ")", "expl", "=", "explain_prediction", "(", "estimator", ",", "doc", ",", "*", "*", "ex...
44.57047
26.328859
def provStacks(self, offs, size): ''' Returns a stream of provenance stacks at the given offset ''' for _, iden in self.provseq.slice(offs, size): stack = self.getProvStack(iden) if stack is None: continue yield (iden, stack)
[ "def", "provStacks", "(", "self", ",", "offs", ",", "size", ")", ":", "for", "_", ",", "iden", "in", "self", ".", "provseq", ".", "slice", "(", "offs", ",", "size", ")", ":", "stack", "=", "self", ".", "getProvStack", "(", "iden", ")", "if", "sta...
33.444444
15.888889
def replace_entity_resource(model, oldres, newres): ''' Replace one entity in the model with another with the same links :param model: Versa model to be updated :param oldres: old/former resource IRI to be replaced :param newres: new/replacement resource IRI :return: None ''' oldrids = set() for rid, link in model: if link[ORIGIN] == oldres or link[TARGET] == oldres or oldres in link[ATTRIBUTES].values(): oldrids.add(rid) new_link = (newres if o == oldres else o, r, newres if t == oldres else t, dict((k, newres if v == oldres else v) for k, v in a.items())) model.add(*new_link) model.delete(oldrids) return
[ "def", "replace_entity_resource", "(", "model", ",", "oldres", ",", "newres", ")", ":", "oldrids", "=", "set", "(", ")", "for", "rid", ",", "link", "in", "model", ":", "if", "link", "[", "ORIGIN", "]", "==", "oldres", "or", "link", "[", "TARGET", "]"...
40.352941
27.176471
def get_assets_metadata(self): """Gets the metadata for the assets. return: (osid.Metadata) - metadata for the assets *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template metadata = dict(self._mdata['assets']) metadata.update({'existing_assets_values': self._my_map['assetIds']}) return Metadata(**metadata)
[ "def", "get_assets_metadata", "(", "self", ")", ":", "# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'assets'", "]", ")", "metadata", ".", "update", "(", "{", "'existin...
42
21.545455
def send_ready_for_review(build_id, release_name, release_number): """Sends an email indicating that the release is ready for review.""" build = models.Build.query.get(build_id) if not build.send_email: logging.debug( 'Not sending ready for review email because build does not have ' 'email enabled. build_id=%r', build.id) return ops = operations.BuildOps(build_id) release, run_list, stats_dict, _ = ops.get_release( release_name, release_number) if not run_list: logging.debug( 'Not sending ready for review email because there are ' ' no runs. build_id=%r, release_name=%r, release_number=%d', build.id, release.name, release.number) return title = '%s: %s - Ready for review' % (build.name, release.name) email_body = render_template( 'email_ready_for_review.html', build=build, release=release, run_list=run_list, stats_dict=stats_dict) recipients = [] if build.email_alias: recipients.append(build.email_alias) else: for user in build.owners: recipients.append(user.email_address) if not recipients: logging.debug( 'Not sending ready for review email because there are no ' 'recipients. build_id=%r, release_name=%r, release_number=%d', build.id, release.name, release.number) return message = Message(title, recipients=recipients) message.html = email_body logging.info('Sending ready for review email for build_id=%r, ' 'release_name=%r, release_number=%d to %r', build.id, release.name, release.number, recipients) return render_or_send(send_ready_for_review, message)
[ "def", "send_ready_for_review", "(", "build_id", ",", "release_name", ",", "release_number", ")", ":", "build", "=", "models", ".", "Build", ".", "query", ".", "get", "(", "build_id", ")", "if", "not", "build", ".", "send_email", ":", "logging", ".", "debu...
33.75
21.576923
def by_land_area_in_sqmi(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.land_area_in_sqmi.name, ascending=False, returns=DEFAULT_LIMIT): """ Search zipcode information by land area / sq miles range. """ return self.query( land_area_in_sqmi_lower=lower, land_area_in_sqmi_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
[ "def", "by_land_area_in_sqmi", "(", "self", ",", "lower", "=", "-", "1", ",", "upper", "=", "2", "**", "31", ",", "zipcode_type", "=", "ZipcodeType", ".", "Standard", ",", "sort_by", "=", "SimpleZipcode", ".", "land_area_in_sqmi", ".", "name", ",", "ascend...
40.9375
11.6875
def body_block_content_render(tag, recursive=False, base_url=None): """ Render the tag as body content and call recursively if the tag has child tags """ block_content_list = [] tag_content = OrderedDict() if tag.name == "p": for block_content in body_block_paragraph_render(tag, base_url=base_url): if block_content != {}: block_content_list.append(block_content) else: tag_content = body_block_content(tag, base_url=base_url) nodenames = body_block_nodenames() tag_content_content = [] # Collect the content of the tag but only for some tags if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]: for child_tag in tag: if not(hasattr(child_tag, 'name')): continue if child_tag.name == "p": # Ignore paragraphs that start with DOI: if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0: continue for block_content in body_block_paragraph_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) elif child_tag.name == "fig" and tag.name == "fig-group": # Do not fig inside fig-group a second time pass elif child_tag.name == "media" and tag.name == "fig-group": # Do not include a media video inside fig-group a second time if child_tag.get("mimetype") == "video": pass else: for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) if len(tag_content_content) > 0: if tag.name in nodenames or recursive is False: tag_content["content"] = [] for block_content in tag_content_content: tag_content["content"].append(block_content) block_content_list.append(tag_content) else: # Not a block tag, e.g. a caption tag, let the content pass through block_content_list = tag_content_content else: block_content_list.append(tag_content) return block_content_list
[ "def", "body_block_content_render", "(", "tag", ",", "recursive", "=", "False", ",", "base_url", "=", "None", ")", ":", "block_content_list", "=", "[", "]", "tag_content", "=", "OrderedDict", "(", ")", "if", "tag", ".", "name", "==", "\"p\"", ":", "for", ...
40.482759
22.241379
def replace(self, left=None, lower=None, upper=None, right=None, ignore_inf=True): """ Create a new interval based on the current one and the provided values. If current interval is not atomic, it is extended or restricted such that its enclosure satisfies the new bounds. In other words, its new enclosure will be equal to self.to_atomic().replace(left, lower, upper, right). Callable can be passed instead of values. In that case, it is called with the current corresponding value except if ignore_inf if set (default) and the corresponding bound is an infinity. :param left: (a function of) left boundary. :param lower: (a function of) value of the lower bound. :param upper: (a function of) value of the upper bound. :param right: (a function of) right boundary. :param ignore_inf: ignore infinities if functions are provided (default is True). :return: an Interval instance """ enclosure = self.to_atomic() if callable(left): left = left(enclosure._left) else: left = enclosure._left if left is None else left if callable(lower): lower = enclosure._lower if ignore_inf and enclosure._lower in [-inf, inf] else lower(enclosure._lower) else: lower = enclosure._lower if lower is None else lower if callable(upper): upper = enclosure._upper if ignore_inf and enclosure._upper in [-inf, inf] else upper(enclosure._upper) else: upper = enclosure._upper if upper is None else upper if callable(right): right = right(enclosure._right) else: right = enclosure._right if right is None else right n_interval = self & AtomicInterval(left, lower, upper, right) if len(n_interval) > 1: lowest = n_interval[0].replace(left=left, lower=lower) highest = n_interval[-1].replace(upper=upper, right=right) return Interval(*[lowest] + n_interval[1:-1] + [highest]) else: return Interval(n_interval[0].replace(left, lower, upper, right))
[ "def", "replace", "(", "self", ",", "left", "=", "None", ",", "lower", "=", "None", ",", "upper", "=", "None", ",", "right", "=", "None", ",", "ignore_inf", "=", "True", ")", ":", "enclosure", "=", "self", ".", "to_atomic", "(", ")", "if", "callabl...
43.714286
28.367347
def kill_all(self): """ Kill all currently running jobs. """ logger.info('Job {0} killing all currently running tasks'.format(self.name)) for task in self.tasks.itervalues(): if task.started_at and not task.completed_at: task.kill()
[ "def", "kill_all", "(", "self", ")", ":", "logger", ".", "info", "(", "'Job {0} killing all currently running tasks'", ".", "format", "(", "self", ".", "name", ")", ")", "for", "task", "in", "self", ".", "tasks", ".", "itervalues", "(", ")", ":", "if", "...
46.5
16.5