text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def add_spec(self, *specs): """Add specs to the topology :type specs: HeronComponentSpec :param specs: specs to add to the topology """ for spec in specs: if not isinstance(spec, HeronComponentSpec): raise TypeError("Argument to add_spec needs to be HeronComponentSpec, given: %s" % str(spec)) if spec.name is None: raise ValueError("TopologyBuilder cannot take a spec without name") if spec.name == "config": raise ValueError("config is a reserved name") if spec.name in self._specs: raise ValueError("Attempting to add duplicate spec name: %r %r" % (spec.name, spec)) self._specs[spec.name] = spec
[ "def", "add_spec", "(", "self", ",", "*", "specs", ")", ":", "for", "spec", "in", "specs", ":", "if", "not", "isinstance", "(", "spec", ",", "HeronComponentSpec", ")", ":", "raise", "TypeError", "(", "\"Argument to add_spec needs to be HeronComponentSpec, given: %...
38.333333
17.611111
def _register_mecab_loc(location): ''' Set MeCab binary location ''' global MECAB_LOC if not os.path.isfile(location): logging.getLogger(__name__).warning("Provided mecab binary location does not exist {}".format(location)) logging.getLogger(__name__).info("Mecab binary is switched to: {}".format(location)) MECAB_LOC = location
[ "def", "_register_mecab_loc", "(", "location", ")", ":", "global", "MECAB_LOC", "if", "not", "os", ".", "path", ".", "isfile", "(", "location", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "warning", "(", "\"Provided mecab binary location ...
51
23.857143
def _generate_examples( self, image_dir, annotation_dir, split_type, has_annotation=True): """Generate examples as dicts. Args: image_dir: `str`, directory containing the images annotation_dir: `str`, directory containing split_type: `str`, <split_name><year> (ex: train2014) has_annotation: `bool`, when False (for the testing set), the annotations are not recorded Yields: Generator yielding the next samples """ if has_annotation: instance_filename = "instances_{}.json" else: instance_filename = "image_info_{}.json" # Load the label names and images instance_path = os.path.join( annotation_dir, "annotations", instance_filename.format(split_type), ) coco_annotation = CocoAnnotation(instance_path) # Each category is a dict: # { # 'id': 51, # From 1-91, some entry missing # 'name': 'bowl', # 'supercategory': 'kitchen', # } categories = coco_annotation.categories # Each image is a dict: # { # 'id': 262145, # 'file_name': 'COCO_train2014_000000262145.jpg' # 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg', # 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg', # 'license': 2, # 'date_captured': '2013-11-20 02:07:55', # 'height': 427, # 'width': 640, # } images = coco_annotation.images # TODO(b/121375022): ClassLabel names should also contains 'id' and # and 'supercategory' (in addition to 'name') # Warning: As Coco only use 80 out of the 91 labels, the c['id'] and # dataset names ids won't match. self.info.features["objects"]["label"].names = [ c["name"] for c in categories ] # TODO(b/121375022): Conversion should be done by ClassLabel categories_id2name = {c["id"]: c["name"] for c in categories} # Iterate over all images annotation_skipped = 0 for image_info in sorted(images, key=lambda x: x["id"]): if has_annotation: # Each instance annotation is a dict: # { # 'iscrowd': 0, # 'bbox': [116.95, 305.86, 285.3, 266.03], # 'image_id': 480023, # 'segmentation': [[312.29, 562.89, 402.25, ...]], # 'category_id': 58, # 'area': 54652.9556, # 'id': 86, # } instances = coco_annotation.get_annotations(img_id=image_info["id"]) else: instances = [] # No annotations if not instances: annotation_skipped += 1 def build_bbox(x, y, width, height): # pylint: disable=cell-var-from-loop # build_bbox is only used within the loop so it is ok to use image_info return tfds.features.BBox( ymin=y / image_info["height"], xmin=x / image_info["width"], ymax=(y + height) / image_info["height"], xmax=(x + width) / image_info["width"], ) # pylint: enable=cell-var-from-loop yield { "image": os.path.join(image_dir, split_type, image_info["file_name"]), "image/filename": image_info["file_name"], "objects": [{ "bbox": build_bbox(*instance_info["bbox"]), "label": categories_id2name[instance_info["category_id"]], "is_crowd": bool(instance_info["iscrowd"]), } for instance_info in instances], } logging.info( "%d/%d images do not contains any annotations", annotation_skipped, len(images), )
[ "def", "_generate_examples", "(", "self", ",", "image_dir", ",", "annotation_dir", ",", "split_type", ",", "has_annotation", "=", "True", ")", ":", "if", "has_annotation", ":", "instance_filename", "=", "\"instances_{}.json\"", "else", ":", "instance_filename", "=",...
34.147059
18.882353
def _construct_unique_id(self, id_prefix, lines): """Constructs a unique ID for a particular prompt in this case, based on the id_prefix and the lines in the prompt. """ text = [] for line in lines: if isinstance(line, str): text.append(line) elif isinstance(line, CodeAnswer): text.append(line.dump()) return id_prefix + '\n' + '\n'.join(text)
[ "def", "_construct_unique_id", "(", "self", ",", "id_prefix", ",", "lines", ")", ":", "text", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "isinstance", "(", "line", ",", "str", ")", ":", "text", ".", "append", "(", "line", ")", "elif", ...
39.818182
8.181818
def bind_and_save(self, lxc): """Binds metadata to an LXC and saves it""" bound_meta = self.bind(lxc) bound_meta.save() return bound_meta
[ "def", "bind_and_save", "(", "self", ",", "lxc", ")", ":", "bound_meta", "=", "self", ".", "bind", "(", "lxc", ")", "bound_meta", ".", "save", "(", ")", "return", "bound_meta" ]
33
9.2
def calculate(self, T, method): r'''Method to calculate surface tension of a liquid at temperature `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate surface tension, [K] method : str Name of the method to use Returns ------- sigma : float Surface tension of the liquid at T, [N/m] ''' if method == STREFPROP: sigma0, n0, sigma1, n1, sigma2, n2, Tc = self.STREFPROP_coeffs sigma = REFPROP(T, Tc=Tc, sigma0=sigma0, n0=n0, sigma1=sigma1, n1=n1, sigma2=sigma2, n2=n2) elif method == VDI_PPDS: sigma = EQ106(T, self.VDI_PPDS_Tc, *self.VDI_PPDS_coeffs) elif method == SOMAYAJULU2: A, B, C = self.SOMAYAJULU2_coeffs sigma = Somayajulu(T, Tc=self.SOMAYAJULU2_Tc, A=A, B=B, C=C) elif method == SOMAYAJULU: A, B, C = self.SOMAYAJULU_coeffs sigma = Somayajulu(T, Tc=self.SOMAYAJULU_Tc, A=A, B=B, C=C) elif method == JASPER: sigma = Jasper(T, a=self.JASPER_coeffs[0], b=self.JASPER_coeffs[1]) elif method == BROCK_BIRD: sigma = Brock_Bird(T, self.Tb, self.Tc, self.Pc) elif method == SASTRI_RAO: sigma = Sastri_Rao(T, self.Tb, self.Tc, self.Pc) elif method == PITZER: sigma = Pitzer(T, self.Tc, self.Pc, self.omega) elif method == ZUO_STENBY: sigma = Zuo_Stenby(T, self.Tc, self.Pc, self.omega) elif method == MIQUEU: sigma = Miqueu(T, self.Tc, self.Vc, self.omega) elif method == ALEEM: Cpl = self.Cpl(T) if hasattr(self.Cpl, '__call__') else self.Cpl Vml = self.Vml(T) if hasattr(self.Vml, '__call__') else self.Vml rhol = Vm_to_rho(Vml, self.MW) sigma = Aleem(T=T, MW=self.MW, Tb=self.Tb, rhol=rhol, Hvap_Tb=self.Hvap_Tb, Cpl=Cpl) elif method in self.tabular_data: sigma = self.interpolate(T, method) return sigma
[ "def", "calculate", "(", "self", ",", "T", ",", "method", ")", ":", "if", "method", "==", "STREFPROP", ":", "sigma0", ",", "n0", ",", "sigma1", ",", "n1", ",", "sigma2", ",", "n2", ",", "Tc", "=", "self", ".", "STREFPROP_coeffs", "sigma", "=", "REF...
42.235294
20.235294
def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n")
[ "def", "dump_credibilities", "(", "self", ",", "output", ")", ":", "for", "p", "in", "self", ".", "products", ":", "json", ".", "dump", "(", "{", "\"product_id\"", ":", "p", ".", "name", ",", "\"credibility\"", ":", "self", ".", "credibility", "(", "p"...
28
11.75
def find_sources_in_image(self, filename, hdu_index=0, outfile=None, rms=None, bkg=None, max_summits=None, innerclip=5, outerclip=4, cores=None, rmsin=None, bkgin=None, beam=None, doislandflux=False, nopositive=False, nonegative=False, mask=None, lat=None, imgpsf=None, blank=False, docov=True, cube_index=None): """ Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found. """ # Tell numpy to be quiet np.seterr(invalid='ignore') if cores is not None: if not (cores >= 1): raise AssertionError("cores must be one or more") self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, rms=rms, bkg=bkg, cores=cores, verb=True, mask=mask, lat=lat, psf=imgpsf, blank=blank, docov=docov, cube_index=cube_index) global_data = self.global_data rmsimg = global_data.rmsimg data = global_data.data_pix self.log.info("beam = {0:5.2f}'' x {1:5.2f}'' at {2:5.2f}deg".format( global_data.beam.a * 3600, global_data.beam.b * 3600, global_data.beam.pa)) # stop people from doing silly things. if outerclip > innerclip: outerclip = innerclip self.log.info("seedclip={0}".format(innerclip)) self.log.info("floodclip={0}".format(outerclip)) isle_num = 0 if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._fit_islands)) island_group = [] group_size = 20 for i, xmin, xmax, ymin, ymax in self._gen_flood_wrap(data, rmsimg, innerclip, outerclip, domask=True): # ignore empty islands # This should now be impossible to trigger if np.size(i) < 1: self.log.warn("Empty island detected, this should be imposisble.") continue isle_num += 1 scalars = (innerclip, outerclip, max_summits) offsets = (xmin, xmax, ymin, ymax) island_data = IslandFittingData(isle_num, i, scalars, offsets, doislandflux) # If cores==1 run fitting in main process. Otherwise build up groups of islands # and submit to queue for subprocesses. Passing a group of islands is more # efficient than passing single islands to the subprocesses. if cores == 1: res = self._fit_island(island_data) queue.append(res) else: island_group.append(island_data) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: fit_parallel(island_group) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: fit_parallel(island_group) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) sources = [] for srcs in queue: if srcs: # ignore empty lists for src in srcs: # ignore sources that we have been told to ignore if (src.peak_flux > 0 and nopositive) or (src.peak_flux < 0 and nonegative): continue sources.append(src) if outfile: print(str(src), file=outfile) self.sources.extend(sources) return sources
[ "def", "find_sources_in_image", "(", "self", ",", "filename", ",", "hdu_index", "=", "0", ",", "outfile", "=", "None", ",", "rms", "=", "None", ",", "bkg", "=", "None", ",", "max_summits", "=", "None", ",", "innerclip", "=", "5", ",", "outerclip", "=",...
39.682759
25.37931
def board(self, *, _cache: bool = False) -> chess.Board: """ Gets the starting position of the game. Unless the ``FEN`` header tag is set, this is the default starting position (for the ``Variant``). """ return self.headers.board()
[ "def", "board", "(", "self", ",", "*", ",", "_cache", ":", "bool", "=", "False", ")", "->", "chess", ".", "Board", ":", "return", "self", ".", "headers", ".", "board", "(", ")" ]
34.125
12.875
def put_collisions( self, block_id, collisions ): """ Put collision state for a particular block. Any operations checked at this block_id that collide with the given collision state will be rejected. """ self.collisions[ block_id ] = copy.deepcopy( collisions )
[ "def", "put_collisions", "(", "self", ",", "block_id", ",", "collisions", ")", ":", "self", ".", "collisions", "[", "block_id", "]", "=", "copy", ".", "deepcopy", "(", "collisions", ")" ]
43.428571
11.714286
def get_dates(self): """Get DataCite dates.""" if 'dates' in self.xml: if isinstance(self.xml['dates']['date'], dict): return self.xml['dates']['date'].values()[0] return self.xml['dates']['date'] return None
[ "def", "get_dates", "(", "self", ")", ":", "if", "'dates'", "in", "self", ".", "xml", ":", "if", "isinstance", "(", "self", ".", "xml", "[", "'dates'", "]", "[", "'date'", "]", ",", "dict", ")", ":", "return", "self", ".", "xml", "[", "'dates'", ...
38
13.285714
def add_maxjobs_category(self,categoryName,maxJobsNum): """ Add a category to this DAG called categoryName with a maxjobs of maxJobsNum. @param node: Add (categoryName,maxJobsNum) tuple to CondorDAG.__maxjobs_categories. """ self.__maxjobs_categories.append((str(categoryName),str(maxJobsNum)))
[ "def", "add_maxjobs_category", "(", "self", ",", "categoryName", ",", "maxJobsNum", ")", ":", "self", ".", "__maxjobs_categories", ".", "append", "(", "(", "str", "(", "categoryName", ")", ",", "str", "(", "maxJobsNum", ")", ")", ")" ]
51.5
22.5
def get_endpoints(self, endpoints=[]): """ Universal selector method to obtain specific endpoints from the data set. Parameters ---------- endpoints: str or list Desired valid endpoints for retrieval Notes ----- Only allows JSON format (pandas not supported). Raises ------ IEXEndpointError If an invalid endpoint is specified IEXSymbolError If a symbol is invalid IEXQueryError If issues arise during query """ if isinstance(endpoints, str) and endpoints in self._ENDPOINTS: endpoints = list(endpoints) if not endpoints or not set(endpoints).issubset(self._ENDPOINTS): raise IEXEndpointError("Please provide a valid list of endpoints") elif len(endpoints) > 10: raise ValueError("Please input up to 10 valid endpoints") self.optional_params = {} self.endpoints = endpoints json_data = self.fetch(fmt_p=no_pandas) for symbol in self.symbols: if symbol not in json_data: raise IEXSymbolError(symbol) return json_data[self.symbols[0]] if self.n_symbols == 1 else json_data
[ "def", "get_endpoints", "(", "self", ",", "endpoints", "=", "[", "]", ")", ":", "if", "isinstance", "(", "endpoints", ",", "str", ")", "and", "endpoints", "in", "self", ".", "_ENDPOINTS", ":", "endpoints", "=", "list", "(", "endpoints", ")", "if", "not...
35.25
17.305556
def ips(self): """return all the possible ips of this request, this will include public and private ips""" r = [] names = ['X_FORWARDED_FOR', 'CLIENT_IP', 'X_REAL_IP', 'X_FORWARDED', 'X_CLUSTER_CLIENT_IP', 'FORWARDED_FOR', 'FORWARDED', 'VIA', 'REMOTE_ADDR'] for name in names: vs = self.get_header(name, '') if vs: r.extend(map(lambda v: v.strip(), vs.split(','))) vs = self.environ.get(name, '') if vs: r.extend(map(lambda v: v.strip(), vs.split(','))) return r
[ "def", "ips", "(", "self", ")", ":", "r", "=", "[", "]", "names", "=", "[", "'X_FORWARDED_FOR'", ",", "'CLIENT_IP'", ",", "'X_REAL_IP'", ",", "'X_FORWARDED'", ",", "'X_CLUSTER_CLIENT_IP'", ",", "'FORWARDED_FOR'", ",", "'FORWARDED'", ",", "'VIA'", ",", "'REMO...
35.294118
23
def transmit_content_metadata(username, channel_code, channel_pk): """ Task to send content metadata to each linked integrated channel. Arguments: username (str): The username of the User to be used for making API requests to retrieve content metadata. channel_code (str): Capitalized identifier for the integrated channel. channel_pk (str): Primary key for identifying integrated channel. """ start = time.time() api_user = User.objects.get(username=username) integrated_channel = INTEGRATED_CHANNEL_CHOICES[channel_code].objects.get(pk=channel_pk) LOGGER.info('Transmitting content metadata to integrated channel using configuration: [%s]', integrated_channel) try: integrated_channel.transmit_content_metadata(api_user) except Exception: # pylint: disable=broad-except LOGGER.exception( 'Transmission of content metadata failed for user [%s] and for integrated ' 'channel with code [%s] and id [%s].', username, channel_code, channel_pk ) duration = time.time() - start LOGGER.info( 'Content metadata transmission task for integrated channel configuration [%s] took [%s] seconds', integrated_channel, duration )
[ "def", "transmit_content_metadata", "(", "username", ",", "channel_code", ",", "channel_pk", ")", ":", "start", "=", "time", ".", "time", "(", ")", "api_user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "integrated_channe...
45.888889
30.777778
def render_log_filename(ti, try_number, filename_template): """ Given task instance, try_number, filename_template, return the rendered log filename :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return filename_jinja_template.render(**jinja_context) return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number)
[ "def", "render_log_filename", "(", "ti", ",", "try_number", ",", "filename_template", ")", ":", "filename_template", ",", "filename_jinja_template", "=", "parse_template_string", "(", "filename_template", ")", "if", "filename_jinja_template", ":", "jinja_context", "=", ...
43.6
20.8
def touch(): """ Create a .vacationrc file if none exists. """ if not os.path.isfile(get_rc_path()): open(get_rc_path(), 'a').close() print('Created file: {}'.format(get_rc_path()))
[ "def", "touch", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "get_rc_path", "(", ")", ")", ":", "open", "(", "get_rc_path", "(", ")", ",", "'a'", ")", ".", "close", "(", ")", "print", "(", "'Created file: {}'", ".", "format", ...
40.2
8.8
def get_buy_price(self, **params): """https://developers.coinbase.com/api/v2#get-buy-price""" currency_pair = params.get('currency_pair', 'BTC-USD') response = self._get('v2', 'prices', currency_pair, 'buy', params=params) return self._make_api_object(response, APIObject)
[ "def", "get_buy_price", "(", "self", ",", "*", "*", "params", ")", ":", "currency_pair", "=", "params", ".", "get", "(", "'currency_pair'", ",", "'BTC-USD'", ")", "response", "=", "self", ".", "_get", "(", "'v2'", ",", "'prices'", ",", "currency_pair", "...
60
17.2
def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): """Create a request token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param realms: A list of realms the resource is protected under. This will be supplied to the ``validate_realms`` method of the request validator. :returns: A tuple of 2 elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request object. """ try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid client credentials. # Note: This is postponed in order to avoid timing attacks, instead # a dummy client is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid or expired token. # Note: This is postponed in order to avoid timing attacks, instead # a dummy token is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is only used in authorization headers and how # it should be interepreted is not included in the OAuth spec. # However they could be seen as a scope or realm to which the # client has access and as such every client should be checked # to ensure it is authorized access to that scope or realm. # .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would enable client realm access enumeration. # # The require_realm indicates this is the first step in the OAuth # workflow where a client requests access to a specific realm. # This first step (obtaining request token) need not require a realm # and can then be identified by checking the require_resource_owner # flag and abscence of realm. # # Clients obtaining an access token will not supply a realm and it will # not be checked. Instead the previously requested realm should be # transferred from the request token to the access token. # # Access to protected resources will always validate the realm but note # that the realm is now tied to the access token and not provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # log the results to the validator_log # this lets us handle internal reporting and analysis request.validator_log['client'] = valid_client request.validator_log['resource_owner'] = valid_resource_owner request.validator_log['realm'] = valid_realm request.validator_log['signature'] = valid_signature # We delay checking validity until the very end, using dummy values for # calculations and fetching secrets/keys to ensure the flow of every # request remains almost identical regardless of whether valid values # have been supplied. This ensures near constant time execution and # prevents malicious users from guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info("[Failure] request verification failed.") log.info("Valid client: %s", valid_client) log.info("Valid token: %s", valid_resource_owner) log.info("Valid realm: %s", valid_realm) log.info("Valid signature: %s", valid_signature) return v, request
[ "def", "validate_protected_resource_request", "(", "self", ",", "uri", ",", "http_method", "=", "'GET'", ",", "body", "=", "None", ",", "headers", "=", "None", ",", "realms", "=", "None", ")", ":", "try", ":", "request", "=", "self", ".", "_create_request"...
50.216216
24.945946
def attach_file(self, locator_or_path, path=None, **kwargs): """ Find a file field on the page and attach a file given its path. The file field can be found via its name, id, or label text. :: page.attach_file(locator, "/path/to/file.png") Args: locator_or_path (str): Which field to attach the file to, or the path of the file that will be attached. path (str, optional): The path of the file that will be attached. Defaults to ``locator_or_path``. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. Raises: FileNotFound: No file exists at the given path. """ if path is None: locator, path = None, locator_or_path else: locator = locator_or_path if not os.path.isfile(path): raise FileNotFound("cannot attach file, {0} does not exist".format(path)) self.find("file_field", locator, **kwargs).set(path)
[ "def", "attach_file", "(", "self", ",", "locator_or_path", ",", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "path", "is", "None", ":", "locator", ",", "path", "=", "None", ",", "locator_or_path", "else", ":", "locator", "=", "locator_...
37.259259
25.555556
def migrateUp(self): """ Recreate the hooks in the site store to trigger this SubScheduler. """ te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending) if te is not None: self._transientSchedule(te.time, None)
[ "def", "migrateUp", "(", "self", ")", ":", "te", "=", "self", ".", "store", ".", "findFirst", "(", "TimedEvent", ",", "sort", "=", "TimedEvent", ".", "time", ".", "descending", ")", "if", "te", "is", "not", "None", ":", "self", ".", "_transientSchedule...
38.571429
16.571429
def updateItemIcon(self, item): """ Updates the items icon based on its state. :param item | <QTreeWidgetItem> """ # update the column width self.setUpdatesEnabled(False) colwidth = self.columnWidth(0) self.resizeColumnToContents(0) new_colwidth = self.columnWidth(0) if new_colwidth < colwidth: self.setColumnWidth(0, colwidth) self.setUpdatesEnabled(True)
[ "def", "updateItemIcon", "(", "self", ",", "item", ")", ":", "# update the column width\r", "self", ".", "setUpdatesEnabled", "(", "False", ")", "colwidth", "=", "self", ".", "columnWidth", "(", "0", ")", "self", ".", "resizeColumnToContents", "(", "0", ")", ...
32
7.466667
def create_grid(self, grid_width, grid_height): """Create a grid layout with stacked widgets. Parameters ---------- grid_width : int the width of the grid grid_height : int the height of the grid """ self.grid_layout = QGridLayout() self.setLayout(self.grid_layout) self.grid_layout.setSpacing(1) self.grid_wgs = {} for i in xrange(grid_height): for j in xrange(grid_width): self.grid_wgs[(i, j)] = FieldWidget() self.grid_layout.addWidget(self.grid_wgs[(i, j)], i, j)
[ "def", "create_grid", "(", "self", ",", "grid_width", ",", "grid_height", ")", ":", "self", ".", "grid_layout", "=", "QGridLayout", "(", ")", "self", ".", "setLayout", "(", "self", ".", "grid_layout", ")", "self", ".", "grid_layout", ".", "setSpacing", "("...
33.777778
11
def compose(*funcs): ''' Compose an ordered list of functions. Args of a,b,c,d evaluates as a(b(c(d(ctx)))) ''' def _compose(ctx): # last func gets context, rest get result of previous func _result = funcs[-1](ctx) for f in reversed(funcs[:-1]): _result = f(_result) return _result return _compose
[ "def", "compose", "(", "*", "funcs", ")", ":", "def", "_compose", "(", "ctx", ")", ":", "# last func gets context, rest get result of previous func", "_result", "=", "funcs", "[", "-", "1", "]", "(", "ctx", ")", "for", "f", "in", "reversed", "(", "funcs", ...
29.25
22.75
def bz2_compress_stream(src, level=9): """Compress data from `src`. Args: src (iterable): iterable that yields blocks of data to compress level (int): compression level (1-9) default is 9 Yields: blocks of compressed data """ compressor = bz2.BZ2Compressor(level) for block in src: encoded = compressor.compress(block) if encoded: yield encoded yield compressor.flush()
[ "def", "bz2_compress_stream", "(", "src", ",", "level", "=", "9", ")", ":", "compressor", "=", "bz2", ".", "BZ2Compressor", "(", "level", ")", "for", "block", "in", "src", ":", "encoded", "=", "compressor", ".", "compress", "(", "block", ")", "if", "en...
25.647059
18.176471
def get_user_info(tokens, uk): '''获取用户的部分信息. 比如头像, 用户名, 自我介绍, 粉丝数等. 这个接口可用于查询任何用户的信息, 只要知道他/她的uk. ''' url = ''.join([ const.PAN_URL, 'pcloud/user/getinfo?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], '&query_uk=', uk, '&t=', util.timestamp(), ]) req = net.urlopen(url) if req: info = json.loads(req.data.decode()) if info and info['errno'] == 0: return info['user_info'] return None
[ "def", "get_user_info", "(", "tokens", ",", "uk", ")", ":", "url", "=", "''", ".", "join", "(", "[", "const", ".", "PAN_URL", ",", "'pcloud/user/getinfo?channel=chunlei&clienttype=0&web=1'", ",", "'&bdstoken='", ",", "tokens", "[", "'bdstoken'", "]", ",", "'&q...
26
17.157895
def create(self, model_obj): """Write a record to the dict repository""" # Update the value of the counters model_obj = self._set_auto_fields(model_obj) # Add the entity to the repository identifier = model_obj[self.entity_cls.meta_.id_field.field_name] with self.conn['lock']: self.conn['data'][self.schema_name][identifier] = model_obj return model_obj
[ "def", "create", "(", "self", ",", "model_obj", ")", ":", "# Update the value of the counters", "model_obj", "=", "self", ".", "_set_auto_fields", "(", "model_obj", ")", "# Add the entity to the repository", "identifier", "=", "model_obj", "[", "self", ".", "entity_cl...
37.636364
17.909091
def requeue(self): """Loop endlessly and requeue expired jobs.""" job_requeue_interval = float( self.config.get('sharq', 'job_requeue_interval')) while True: self.sq.requeue() gevent.sleep(job_requeue_interval / 1000.00)
[ "def", "requeue", "(", "self", ")", ":", "job_requeue_interval", "=", "float", "(", "self", ".", "config", ".", "get", "(", "'sharq'", ",", "'job_requeue_interval'", ")", ")", "while", "True", ":", "self", ".", "sq", ".", "requeue", "(", ")", "gevent", ...
39.142857
13.428571
def is_mass_balanced(reaction): """Confirm that a reaction is mass balanced.""" balance = defaultdict(int) for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.elements is None or len(metabolite.elements) == 0: return False for element, amount in iteritems(metabolite.elements): balance[element] += coefficient * amount return all(amount == 0 for amount in itervalues(balance))
[ "def", "is_mass_balanced", "(", "reaction", ")", ":", "balance", "=", "defaultdict", "(", "int", ")", "for", "metabolite", ",", "coefficient", "in", "iteritems", "(", "reaction", ".", "metabolites", ")", ":", "if", "metabolite", ".", "elements", "is", "None"...
50
16.555556
def refresh_rooms(self): """Calls GET /joined_rooms to refresh rooms list.""" for room_id in self.user_api.get_joined_rooms()["joined_rooms"]: self._rooms[room_id] = MatrixRoom(room_id, self.user_api)
[ "def", "refresh_rooms", "(", "self", ")", ":", "for", "room_id", "in", "self", ".", "user_api", ".", "get_joined_rooms", "(", ")", "[", "\"joined_rooms\"", "]", ":", "self", ".", "_rooms", "[", "room_id", "]", "=", "MatrixRoom", "(", "room_id", ",", "sel...
56.25
19.25
def create_db_in_shard(db_name, shard, client=None): """ In a sharded cluster, create a database in a particular shard. """ client = client or pymongo.MongoClient() # flush the router config to ensure it's not stale res = client.admin.command('flushRouterConfig') if not res.get('ok'): raise RuntimeError("unable to flush router config") if shard not in get_ids(client.config.shards): raise ValueError(f"Unknown shard {shard}") if db_name in get_ids(client.config.databases): raise ValueError("database already exists") # MongoDB doesn't have a 'create database' command, so insert an # item into a collection and then drop the collection. client[db_name].foo.insert({'foo': 1}) client[db_name].foo.drop() if client[db_name].collection_names(): raise ValueError("database has collections") primary = client['config'].databases.find_one(db_name)['primary'] if primary != shard: res = client.admin.command( 'movePrimary', value=db_name, to=shard) if not res.get('ok'): raise RuntimeError(str(res)) return ( f"Successfully created {db_name} in {shard} via {client.nodes} " f"from {hostname}")
[ "def", "create_db_in_shard", "(", "db_name", ",", "shard", ",", "client", "=", "None", ")", ":", "client", "=", "client", "or", "pymongo", ".", "MongoClient", "(", ")", "# flush the router config to ensure it's not stale", "res", "=", "client", ".", "admin", "."...
43.535714
12.892857
def getTerms(self, term=None, getFingerprint=None, startIndex=0, maxResults=10): """Get term objects Args: term, str: A term in the retina (optional) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) Returns: list of Term Raises: CorticalioException: if the request was not successful """ return self._terms.getTerm(self._retina, term, getFingerprint, startIndex, maxResults)
[ "def", "getTerms", "(", "self", ",", "term", "=", "None", ",", "getFingerprint", "=", "None", ",", "startIndex", "=", "0", ",", "maxResults", "=", "10", ")", ":", "return", "self", ".", "_terms", ".", "getTerm", "(", "self", ".", "_retina", ",", "ter...
49.692308
26.923077
def kube_resourcequota(self, metric, scraper_config): """ Quota and current usage by resource type. """ metric_base_name = scraper_config['namespace'] + '.resourcequota.{}.{}' suffixes = {'used': 'used', 'hard': 'limit'} if metric.type in METRIC_TYPES: for sample in metric.samples: mtype = sample[self.SAMPLE_LABELS].get("type") resource = sample[self.SAMPLE_LABELS].get("resource") tags = [ self._label_to_tag("namespace", sample[self.SAMPLE_LABELS], scraper_config), self._label_to_tag("resourcequota", sample[self.SAMPLE_LABELS], scraper_config), ] + scraper_config['custom_tags'] self.gauge(metric_base_name.format(resource, suffixes[mtype]), sample[self.SAMPLE_VALUE], tags) else: self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
[ "def", "kube_resourcequota", "(", "self", ",", "metric", ",", "scraper_config", ")", ":", "metric_base_name", "=", "scraper_config", "[", "'namespace'", "]", "+", "'.resourcequota.{}.{}'", "suffixes", "=", "{", "'used'", ":", "'used'", ",", "'hard'", ":", "'limi...
62.933333
27.666667
def new(self): # type: () -> None ''' A method to create a new UDF Anchor Volume Structure. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure already initialized') self.desc_tag = UDFTag() self.desc_tag.new(2) # FIXME: we should let the user set serial_number self.main_vd_length = 32768 self.main_vd_extent = 0 # This will get set later. self.reserve_vd_length = 32768 self.reserve_vd_extent = 0 # This will get set later. self._initialized = True
[ "def", "new", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Anchor Volume Structure already initialized'", ")", "self", ".", "desc_tag", "=", "UDFTag", "(", "...
31.47619
24.428571
def enter(self): """Send a LineConfirmation to the controller. When this state is entered, a :class:`AYABInterface.communication.host_messages.LineConfirmation` is sent to the controller. Also, the :attr:`last line requested <AYABInterface.communication.Communication.last_requested_line_number>` is set. """ self._communication.last_requested_line_number = self._line_number self._communication.send(LineConfirmation, self._line_number)
[ "def", "enter", "(", "self", ")", ":", "self", ".", "_communication", ".", "last_requested_line_number", "=", "self", ".", "_line_number", "self", ".", "_communication", ".", "send", "(", "LineConfirmation", ",", "self", ".", "_line_number", ")" ]
42.25
19.916667
def ecg_wave_detector(ecg, rpeaks): """ Returns the localization of the P, Q, T waves. This function needs massive help! Parameters ---------- ecg : list or ndarray ECG signal (preferably filtered). rpeaks : list or ndarray R peaks localization. Returns ---------- ecg_waves : dict Contains wave peaks location indices. Example ---------- >>> import neurokit as nk >>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000) >>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000) >>> rpeaks = ecg["ECG"]["R_Peaks"] >>> ecg = ecg["df"]["ECG_Filtered"] >>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks) >>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"]) Notes ---------- *Details* - **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization. *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ """ q_waves = [] p_waves = [] q_waves_starts = [] s_waves = [] t_waves = [] t_waves_starts = [] t_waves_ends = [] for index, rpeak in enumerate(rpeaks[:-3]): try: epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)] epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)] epoch_before = list(reversed(epoch_before)) q_wave_index = np.min(find_peaks(epoch_before)) q_wave = rpeak - q_wave_index p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:]) p_wave = rpeak - p_wave_index inter_pq = epoch_before[q_wave_index:p_wave_index] inter_pq_derivative = np.gradient(inter_pq, 2) q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative)) q_start = q_wave - q_start_index q_waves.append(q_wave) p_waves.append(p_wave) q_waves_starts.append(q_start) except ValueError: pass except IndexError: pass try: epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])] epoch_after = epoch_after[0:int(len(epoch_after)/2)] s_wave_index = np.min(find_peaks(epoch_after)) s_wave = rpeak + s_wave_index t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:]) t_wave = rpeak + t_wave_index inter_st = epoch_after[s_wave_index:t_wave_index] inter_st_derivative = np.gradient(inter_st, 2) t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative)) t_start = s_wave + t_start_index t_end = np.min(find_peaks(epoch_after[t_wave_index:])) t_end = t_wave + t_end s_waves.append(s_wave) t_waves.append(t_wave) t_waves_starts.append(t_start) t_waves_ends.append(t_end) except ValueError: pass except IndexError: pass # pd.Series(epoch_before).plot() # t_waves = [] # for index, rpeak in enumerate(rpeaks[0:-1]): # # epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])] # pd.Series(epoch).plot() # # # T wave # middle = (rpeaks[index+1] - rpeak) / 2 # quarter = middle/2 # # epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)] # # try: # t_wave = int(rpeak+quarter) + np.argmax(epoch) # t_waves.append(t_wave) # except ValueError: # pass # # p_waves = [] # for index, rpeak in enumerate(rpeaks[1:]): # index += 1 # # Q wave # middle = (rpeak - rpeaks[index-1]) / 2 # quarter = middle/2 # # epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)] # # try: # p_wave = int(rpeak-quarter) + np.argmax(epoch) # p_waves.append(p_wave) # except ValueError: # pass # # q_waves = [] # for index, p_wave in enumerate(p_waves): # epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])] # # try: # q_wave = p_wave + np.argmin(epoch) # q_waves.append(q_wave) # except ValueError: # pass # # # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval ecg_waves = {"T_Waves": t_waves, "P_Waves": p_waves, "Q_Waves": q_waves, "S_Waves": s_waves, "Q_Waves_Onsets": q_waves_starts, "T_Waves_Onsets": t_waves_starts, "T_Waves_Ends": t_waves_ends} return(ecg_waves)
[ "def", "ecg_wave_detector", "(", "ecg", ",", "rpeaks", ")", ":", "q_waves", "=", "[", "]", "p_waves", "=", "[", "]", "q_waves_starts", "=", "[", "]", "s_waves", "=", "[", "]", "t_waves", "=", "[", "]", "t_waves_starts", "=", "[", "]", "t_waves_ends", ...
36.695946
27.763514
def get_nested(self, *args): """ get a nested value, returns None if path does not exist """ data = self.data for key in args: if key not in data: return None data = data[key] return data
[ "def", "get_nested", "(", "self", ",", "*", "args", ")", ":", "data", "=", "self", ".", "data", "for", "key", "in", "args", ":", "if", "key", "not", "in", "data", ":", "return", "None", "data", "=", "data", "[", "key", "]", "return", "data" ]
26.6
12.2
def edges(self, nbunch=None, keys=False): """ Iterates over edges in current :class:`BreakpointGraph` instance. Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__edges`. :param nbunch: a vertex to iterate over edges outgoing from, if not provided,iteration over all edges is performed. :type nbuch: any hashable python object :param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge :type keys: ``Boolean`` :return: generator over edges in current :class:`BreakpointGraph` :rtype: ``generator`` """ for entry in self.__edges(nbunch=nbunch, keys=keys): yield entry
[ "def", "edges", "(", "self", ",", "nbunch", "=", "None", ",", "keys", "=", "False", ")", ":", "for", "entry", "in", "self", ".", "__edges", "(", "nbunch", "=", "nbunch", ",", "keys", "=", "keys", ")", ":", "yield", "entry" ]
50.428571
26.571429
def init_auth(username, password): """Initializes the auth settings for accessing MyAnimeList through its official API from a given username and password. :param username Your MyAnimeList account username. :param password Your MyAnimeList account password. :return A tuple containing your credentials. """ username = username.strip() password = password.strip() credentials = (username, password) if helpers.verif_auth(credentials, header): return credentials else: raise ValueError(constants.INVALID_CREDENTIALS)
[ "def", "init_auth", "(", "username", ",", "password", ")", ":", "username", "=", "username", ".", "strip", "(", ")", "password", "=", "password", ".", "strip", "(", ")", "credentials", "=", "(", "username", ",", "password", ")", "if", "helpers", ".", "...
40
10.928571
def autocorrplot(trace, vars=None, fontmap = None, max_lag=100): """Bar plot of the autocorrelation function for a trace""" try: # MultiTrace traces = trace.traces except AttributeError: # NpTrace traces = [trace] if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4} if vars is None: vars = traces[0].varnames # Extract sample data samples = [{v:trace[v] for v in vars} for trace in traces] chains = len(traces) n = len(samples[0]) f, ax = subplots(n, chains, squeeze=False) max_lag = min(len(samples[0][vars[0]])-1, max_lag) for i, v in enumerate(vars): for j in xrange(chains): d = np.squeeze(samples[j][v]) ax[i,j].acorr(d, detrend=mlab.detrend_mean, maxlags=max_lag) if not j: ax[i, j].set_ylabel("correlation") ax[i, j].set_xlabel("lag") if chains > 1: ax[i, j].set_title("chain {0}".format(j+1)) # Smaller tick labels tlabels = gca().get_xticklabels() setp(tlabels, 'fontsize', fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, 'fontsize', fontmap[1])
[ "def", "autocorrplot", "(", "trace", ",", "vars", "=", "None", ",", "fontmap", "=", "None", ",", "max_lag", "=", "100", ")", ":", "try", ":", "# MultiTrace", "traces", "=", "trace", ".", "traces", "except", "AttributeError", ":", "# NpTrace", "traces", "...
23.9375
22.770833
def from_json(self, json): """Create resource out of JSON data. :param json: JSON dict. :return: Resource with a type defined by the given JSON data. """ res_type = json['sys']['type'] if ResourceType.Array.value == res_type: return self.create_array(json) elif ResourceType.Entry.value == res_type: return self.create_entry(json) elif ResourceType.Asset.value == res_type: return ResourceFactory.create_asset(json) elif ResourceType.ContentType.value == res_type: return ResourceFactory.create_content_type(json) elif ResourceType.Space.value == res_type: return ResourceFactory.create_space(json)
[ "def", "from_json", "(", "self", ",", "json", ")", ":", "res_type", "=", "json", "[", "'sys'", "]", "[", "'type'", "]", "if", "ResourceType", ".", "Array", ".", "value", "==", "res_type", ":", "return", "self", ".", "create_array", "(", "json", ")", ...
40.166667
13.222222
def _get_socket(self, sid): """Return the socket object for a given session.""" try: s = self.sockets[sid] except KeyError: raise KeyError('Session not found') if s.closed: del self.sockets[sid] raise KeyError('Session is disconnected') return s
[ "def", "_get_socket", "(", "self", ",", "sid", ")", ":", "try", ":", "s", "=", "self", ".", "sockets", "[", "sid", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "'Session not found'", ")", "if", "s", ".", "closed", ":", "del", "self", "."...
32.4
13.5
def make_form(fields=None, layout=None, layout_class=None, base_class=None, get_form_field=None, name=None, rules=None, **kwargs): """ Make a from according dict data: {'fields':[ {'name':'name', 'type':'str', 'label':'label, 'rules':{ 'required': 'email' 'required:back|front' #back means server side, front means front side } ...}, ... ], #layout_class should be defined in settings.ini, just like #[FORM_LAYOUT_CLASSES] #bs3 = '#{appname}.form_help.Bootstrap3Layout' #is also can be a Layout Class #default is BootstrapLayout 'layout_class':'bs3', 'layout':{ 'rows':[ '-- legend title --', 'field_name', ['group_fieldname', 'group_fieldname'] {'name':'name', 'colspan':3} ], } 'base_class':'form class if not existed, then use Form' } get_form_field is a callback function, used to defined customized field class if has name then it'll be cached """ from uliweb.utils.sorteddict import SortedDict get_form_field = get_form_field or (lambda name, f:None) #make fields props = SortedDict({}) for f in fields or []: if isinstance(f, BaseField): props[f.name] = get_form_field(f.name, f) or f else: props[f['name']] = get_form_field(f['name'], f) or make_field(**f) #set other props if layout: props['layout'] = layout if layout_class: props['layout_class'] = layout_class if rules: props['rules'] = rules layout_class_args = kwargs.pop('layout_class_args', None) if layout_class_args: props['layout_class_args'] = layout_class_args cls = type(name or 'MakeForm_', (base_class or Form,), props) return cls
[ "def", "make_form", "(", "fields", "=", "None", ",", "layout", "=", "None", ",", "layout_class", "=", "None", ",", "base_class", "=", "None", ",", "get_form_field", "=", "None", ",", "name", "=", "None", ",", "rules", "=", "None", ",", "*", "*", "kwa...
31.15
20.583333
def walk(self, topdown=True): """ Artifact tree generator - analogue of `os.walk`. :param topdown: if is True or not specified, directories are scanned from top-down. If topdown is set to False, directories are scanned from bottom-up. :rtype: collections.Iterator[ (str, list[yagocd.resources.artifact.Artifact], list[yagocd.resources.artifact.Artifact]) ] """ return self._manager.walk(top=self._path, topdown=topdown)
[ "def", "walk", "(", "self", ",", "topdown", "=", "True", ")", ":", "return", "self", ".", "_manager", ".", "walk", "(", "top", "=", "self", ".", "_path", ",", "topdown", "=", "topdown", ")" ]
41.083333
22.916667
def get_status(self, response, finished=False): """Given the stdout from the command returned by :meth:`cmd_status`, return one of the status code defined in :mod:`clusterjob.status`""" status_pos = 0 for line in response.split("\n"): if line.startswith('JOBID'): try: status_pos = line.find('STAT') except ValueError: return None else: status = line[status_pos:].split()[0] if status in self.status_mapping: return self.status_mapping[status] return None
[ "def", "get_status", "(", "self", ",", "response", ",", "finished", "=", "False", ")", ":", "status_pos", "=", "0", "for", "line", "in", "response", ".", "split", "(", "\"\\n\"", ")", ":", "if", "line", ".", "startswith", "(", "'JOBID'", ")", ":", "t...
41.933333
10.066667
def sync_projects(self): """Sync projects. This function will retrieve project from keystone and populate them dfa database and dcnm """ p = self.keystone_event._service.projects.list() for proj in p: if proj.name in not_create_project_name: continue LOG.info("Syncing project %s" % proj.name) self.project_create_func(proj.id, proj=proj)
[ "def", "sync_projects", "(", "self", ")", ":", "p", "=", "self", ".", "keystone_event", ".", "_service", ".", "projects", ".", "list", "(", ")", "for", "proj", "in", "p", ":", "if", "proj", ".", "name", "in", "not_create_project_name", ":", "continue", ...
35.75
14.416667
def get_profile(name=None, **kwargs): """Get the profile by name; if no name is given, return the default profile. """ if isinstance(name, Profile): return name clazz = get_profile_class(name or 'default') return clazz(**kwargs)
[ "def", "get_profile", "(", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "name", ",", "Profile", ")", ":", "return", "name", "clazz", "=", "get_profile_class", "(", "name", "or", "'default'", ")", "return", "clazz", "...
28.111111
12.555556
def find_handfile(names=None): """ 尝试定位 ``handfile`` 文件,明确指定或逐级搜索父路径 :param str names: 可选,待查找的文件名,主要用于调试,默认使用终端传入的配置 :return: ``handfile`` 文件所在的绝对路径,默认为 None :rtype: str """ # 如果没有明确指定,则包含 env 中的值 names = names or [env.handfile] # 若无 ``.py`` 扩展名,则作为待查询名称,追加到 names 末尾 if not names[0].endswith('.py'): names += [names[0] + '.py'] # name 中是否包含路径元素 if os.path.dirname(names[0]): # 若存在,则扩展 Home 路径标志,并测试是否存在 for name in names: expanded = os.path.expanduser(name) if os.path.exists(expanded): if name.endswith('.py') or _is_package(expanded): return os.path.abspath(expanded) else: # 否则,逐级向上搜索,直到根路径 path = '.' # 在到系统根路径之前停止 while os.path.split(os.path.abspath(path))[1]: for name in names: joined = os.path.join(path, name) if os.path.exists(joined): if name.endswith('.py') or _is_package(joined): return os.path.abspath(joined) path = os.path.join('..', path) return None
[ "def", "find_handfile", "(", "names", "=", "None", ")", ":", "# 如果没有明确指定,则包含 env 中的值", "names", "=", "names", "or", "[", "env", ".", "handfile", "]", "# 若无 ``.py`` 扩展名,则作为待查询名称,追加到 names 末尾", "if", "not", "names", "[", "0", "]", ".", "endswith", "(", "'.py'", ...
30
15.243243
def parse_alert(server_handshake_bytes): """ Parses the handshake for protocol alerts :param server_handshake_bytes: A byte string of the handshake data received from the server :return: None or an 2-element tuple of integers: 0: 1 (warning) or 2 (fatal) 1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2) """ for record_type, _, record_data in parse_tls_records(server_handshake_bytes): if record_type != b'\x15': continue if len(record_data) != 2: return None return (int_from_bytes(record_data[0:1]), int_from_bytes(record_data[1:2])) return None
[ "def", "parse_alert", "(", "server_handshake_bytes", ")", ":", "for", "record_type", ",", "_", ",", "record_data", "in", "parse_tls_records", "(", "server_handshake_bytes", ")", ":", "if", "record_type", "!=", "b'\\x15'", ":", "continue", "if", "len", "(", "reco...
33.55
20.15
def model(self): """The Android code name for the device. """ # If device is in bootloader mode, get mode name from fastboot. if self.is_bootloader: out = self.fastboot.getvar('product').strip() # 'out' is never empty because of the 'total time' message fastboot # writes to stderr. lines = out.decode('utf-8').split('\n', 1) if lines: tokens = lines[0].split(' ') if len(tokens) > 1: return tokens[1].lower() return None model = self.adb.getprop('ro.build.product').lower() if model == 'sprout': return model return self.adb.getprop('ro.product.name').lower()
[ "def", "model", "(", "self", ")", ":", "# If device is in bootloader mode, get mode name from fastboot.", "if", "self", ".", "is_bootloader", ":", "out", "=", "self", ".", "fastboot", ".", "getvar", "(", "'product'", ")", ".", "strip", "(", ")", "# 'out' is never ...
40.833333
14.333333
def writeByteArray(self, n): """ Writes a L{ByteArray} to the data stream. @param n: The L{ByteArray} data to be encoded to the AMF3 data stream. @type n: L{ByteArray} """ self.stream.write(TYPE_BYTEARRAY) ref = self.context.getObjectReference(n) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(n) buf = str(n) l = len(buf) self._writeInteger(l << 1 | REFERENCE_BIT) self.stream.write(buf)
[ "def", "writeByteArray", "(", "self", ",", "n", ")", ":", "self", ".", "stream", ".", "write", "(", "TYPE_BYTEARRAY", ")", "ref", "=", "self", ".", "context", ".", "getObjectReference", "(", "n", ")", "if", "ref", "!=", "-", "1", ":", "self", ".", ...
23.954545
19.409091
def _autocorr_func2(mags, lag, maglen, magmed, magstd): ''' This is an alternative function to calculate the autocorrelation. This version is from (first definition): https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations Parameters ---------- mags : np.array This is the magnitudes array. MUST NOT have any nans. lag : float The specific lag value to calculate the auto-correlation for. This MUST be less than total number of observations in `mags`. maglen : int The number of elements in the `mags` array. magmed : float The median of the `mags` array. magstd : float The standard deviation of the `mags` array. Returns ------- float The auto-correlation at this specific `lag` value. ''' lagindex = nparange(0,maglen-lag) products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed) autocovarfunc = npsum(products)/lagindex.size varfunc = npsum( (mags[lagindex] - magmed)*(mags[lagindex] - magmed) )/mags.size acorr = autocovarfunc/varfunc return acorr
[ "def", "_autocorr_func2", "(", "mags", ",", "lag", ",", "maglen", ",", "magmed", ",", "magstd", ")", ":", "lagindex", "=", "nparange", "(", "0", ",", "maglen", "-", "lag", ")", "products", "=", "(", "mags", "[", "lagindex", "]", "-", "magmed", ")", ...
23.978261
27.5
def get_cds_ranges_for_transcript(self, transcript_id): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "application/json"} self.attempt = 0 ext = "/overlap/id/{}?feature=cds".format(transcript_id) r = self.ensembl_request(ext, headers) cds_ranges = [] for cds_range in json.loads(r): if cds_range["Parent"] != transcript_id: continue start = cds_range["start"] end = cds_range["end"] cds_ranges.append((start, end)) return cds_ranges
[ "def", "get_cds_ranges_for_transcript", "(", "self", ",", "transcript_id", ")", ":", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", "self", ".", "attempt", "=", "0", "ext", "=", "\"/overlap/id/{}?feature=cds\"", ".", "format", "(", "tra...
30.904762
15.761905
def getBWTRange(self, start, end): ''' This function masks the complexity of retrieving a chunk of the BWT from the compressed format @param start - the beginning of the range to retrieve @param end - the end of the range in normal python notation (bwt[end] is not part of the return) @return - a range of integers representing the characters in the bwt from start to end ''' #set aside an array block to fill startBlockIndex = start >> self.bitPower endBlockIndex = int(math.floor(float(end)/self.binSize)) trueStart = startBlockIndex*self.binSize #first we will extract the range of blocks return self.decompressBlocks(startBlockIndex, endBlockIndex)[start-trueStart:end-trueStart]
[ "def", "getBWTRange", "(", "self", ",", "start", ",", "end", ")", ":", "#set aside an array block to fill", "startBlockIndex", "=", "start", ">>", "self", ".", "bitPower", "endBlockIndex", "=", "int", "(", "math", ".", "floor", "(", "float", "(", "end", ")",...
55.357143
29.071429
def parse(self): """ parse data """ super(OpenWisp, self).parse() self.parsed_data = self.parsed_data.getElementsByTagName('item')
[ "def", "parse", "(", "self", ")", ":", "super", "(", "OpenWisp", ",", "self", ")", ".", "parse", "(", ")", "self", ".", "parsed_data", "=", "self", ".", "parsed_data", ".", "getElementsByTagName", "(", "'item'", ")" ]
37.75
14.75
def run(self): """run entry""" self._logger.info("Parsing data files for ssGSEA...........................") # load data data = self.load_data() # normalized samples, and rank normdat = self.norm_samples(data) # filtering out gene sets and build gene sets dictionary gmt = self.load_gmt(gene_list=normdat.index.values, gmt=self.gene_sets) self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt)) # set cpu numbers self._set_cores() # start analsis self._logger.info("Start to run ssGSEA...Might take a while................") if self.permutation_num == 0 : # ssGSEA without permutation self.runSamples(df=normdat, gmt=gmt) else: # run permutation procedure and calculate pvals, fdrs self._logger.warning("run ssGSEA with permutation procedure, don't use these part of results for publication.") self.runSamplesPermu(df=normdat, gmt=gmt) # clean up all outputs if _outdir is None if self._outdir is None: self._tmpdir.cleanup()
[ "def", "run", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"Parsing data files for ssGSEA...........................\"", ")", "# load data", "data", "=", "self", ".", "load_data", "(", ")", "# normalized samples, and rank", "normdat", "=", "sel...
47.583333
20.625
def get_csig(self): """ Generate a node's content signature, the digested signature of its content. node - the node cache - alternate node to use for the signature cache returns - the content signature """ try: return self.ninfo.csig except AttributeError: pass contents = self.get_contents() csig = SCons.Util.MD5signature(contents) self.get_ninfo().csig = csig return csig
[ "def", "get_csig", "(", "self", ")", ":", "try", ":", "return", "self", ".", "ninfo", ".", "csig", "except", "AttributeError", ":", "pass", "contents", "=", "self", ".", "get_contents", "(", ")", "csig", "=", "SCons", ".", "Util", ".", "MD5signature", ...
27.055556
15.944444
def _get_label(self, urn): """ Provisional route for GetLabel request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetLabel response """ node = self.resolver.getTextualNode(textId=urn) r = render_template( "cts/GetLabel.xml", request_urn=str(urn), full_urn=node.urn, metadata={ "groupname": [(literal.language, str(literal)) for literal in node.metadata.get(RDF_NAMESPACES.CTS.groupname)], "title": [(literal.language, str(literal)) for literal in node.metadata.get(RDF_NAMESPACES.CTS.title)], "description": [(literal.language, str(literal)) for literal in node.metadata.get(RDF_NAMESPACES.CTS.description)], "label": [(literal.language, str(literal)) for literal in node.metadata.get(RDF_NAMESPACES.CTS.label)] }, citation=Markup(node.citation.export(Mimetypes.XML.CTS)) ) return r, 200, {"content-type": "application/xml"}
[ "def", "_get_label", "(", "self", ",", "urn", ")", ":", "node", "=", "self", ".", "resolver", ".", "getTextualNode", "(", "textId", "=", "urn", ")", "r", "=", "render_template", "(", "\"cts/GetLabel.xml\"", ",", "request_urn", "=", "str", "(", "urn", ")"...
50
27.380952
def fromdict(dict): """Takes a dictionary as an argument and returns a new Challenge object from the dictionary. :param dict: the dictionary to convert """ seed = hb_decode(dict['seed']) index = dict['index'] return Challenge(seed, index)
[ "def", "fromdict", "(", "dict", ")", ":", "seed", "=", "hb_decode", "(", "dict", "[", "'seed'", "]", ")", "index", "=", "dict", "[", "'index'", "]", "return", "Challenge", "(", "seed", ",", "index", ")" ]
31.888889
9.777778
def create_html_select( options, name=None, selected=None, disabled=None, multiple=False, attrs=None, **other_attrs): """ Create an HTML select box. >>> print create_html_select(["foo", "bar"], selected="bar", name="baz") <select name="baz"> <option selected="selected" value="bar"> bar </option> <option value="foo"> foo </option> </select> >>> print create_html_select([("foo", "oof"), ("bar", "rab")], selected="bar", name="baz") <select name="baz"> <option value="foo"> oof </option> <option selected="selected" value="bar"> rab </option> </select> @param options: this can either be a sequence of strings, or a sequence of couples or a map of C{key->value}. In the former case, the C{select} tag will contain a list of C{option} tags (in alphabetical order), where the C{value} attribute is not specified. In the latter case, the C{value} attribute will be set to the C{key}, while the body of the C{option} will be set to C{value}. @type options: sequence or map @param name: the name of the form element. @type name: string @param selected: optional key(s)/value(s) to select by default. In case a map has been used for options. @type selected: string (or list of string) @param disabled: optional key(s)/value(s) to disable. @type disabled: string (or list of string) @param multiple: whether a multiple select box must be created. @type mutable: bool @param attrs: optional attributes to create the select tag. @type attrs: dict @param other_attrs: other optional attributes. @return: the HTML output. @rtype: string @note: the values and keys will be escaped for HTML. @note: it is important that parameter C{value} is always specified, in case some browser plugin play with the markup, for eg. when translating the page. """ body = [] if selected is None: selected = [] elif isinstance(selected, (str, unicode)): selected = [selected] if disabled is None: disabled = [] elif isinstance(disabled, (str, unicode)): disabled = [disabled] if name is not None and multiple and not name.endswith('[]'): name += "[]" if isinstance(options, dict): items = options.items() items.sort(lambda item1, item2: cmp(item1[1], item2[1])) elif isinstance(options, (list, tuple)): options = list(options) items = [] for item in options: if isinstance(item, (str, unicode)): items.append((item, item)) elif isinstance(item, (tuple, list)) and len(item) == 2: items.append(tuple(item)) else: raise ValueError( 'Item "%s" of incompatible type: %s' % (item, type(item))) else: raise ValueError('Options of incompatible type: %s' % type(options)) for key, value in items: option_attrs = {} if key in selected: option_attrs['selected'] = 'selected' if key in disabled: option_attrs['disabled'] = 'disabled' body.append( create_tag( "option", body=value, escape_body=True, value=key, attrs=option_attrs)) if attrs is None: attrs = {} if name is not None: attrs['name'] = name if multiple: attrs['multiple'] = 'multiple' return create_tag( "select", body='\n'.join(body), attrs=attrs, **other_attrs)
[ "def", "create_html_select", "(", "options", ",", "name", "=", "None", ",", "selected", "=", "None", ",", "disabled", "=", "None", ",", "multiple", "=", "False", ",", "attrs", "=", "None", ",", "*", "*", "other_attrs", ")", ":", "body", "=", "[", "]"...
34.009174
18.100917
def get_lists(client): ''' Gets all the client's lists ''' response = client.authenticated_request(client.api.Endpoints.LISTS) return response.json()
[ "def", "get_lists", "(", "client", ")", ":", "response", "=", "client", ".", "authenticated_request", "(", "client", ".", "api", ".", "Endpoints", ".", "LISTS", ")", "return", "response", ".", "json", "(", ")" ]
39.5
16
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0): """ The input parameters describe an option with only certain values allowed. They are returned with an appropriate converter and validator appended. The result is usable for input to Variables.Add(). 'key' and 'default' are the values to be passed on to Variables.Add(). 'help' will be appended by the allowed values automatically 'allowed_values' is a list of strings, which are allowed as values for this option. The 'map'-dictionary may be used for converting the input value into canonical values (e.g. for aliases). 'ignorecase' defines the behaviour of the validator: If ignorecase == 0, the validator/converter are case-sensitive. If ignorecase == 1, the validator/converter are case-insensitive. If ignorecase == 2, the validator/converter is case-insensitive and the converted value will always be lower-case. The 'validator' tests whether the value is in the list of allowed values. The 'converter' converts input values according to the given 'map'-dictionary (unmapped input values are returned unchanged). """ help = '%s (%s)' % (help, '|'.join(allowed_values)) # define validator if ignorecase >= 1: validator = lambda key, val, env: \ _validator(key, val.lower(), env, allowed_values) else: validator = lambda key, val, env: \ _validator(key, val, env, allowed_values) # define converter if ignorecase == 2: converter = lambda val: map.get(val.lower(), val).lower() elif ignorecase == 1: converter = lambda val: map.get(val.lower(), val) else: converter = lambda val: map.get(val, val) return (key, help, default, validator, converter)
[ "def", "EnumVariable", "(", "key", ",", "help", ",", "default", ",", "allowed_values", ",", "map", "=", "{", "}", ",", "ignorecase", "=", "0", ")", ":", "help", "=", "'%s (%s)'", "%", "(", "help", ",", "'|'", ".", "join", "(", "allowed_values", ")", ...
41.697674
26.813953
def delete(self, id): """Deletes a grant. Args: id (str): The id of the custom domain to delete See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/delete_custom_domains_by_id """ url = self._url('%s' % (id)) return self.client.delete(url)
[ "def", "delete", "(", "self", ",", "id", ")", ":", "url", "=", "self", ".", "_url", "(", "'%s'", "%", "(", "id", ")", ")", "return", "self", ".", "client", ".", "delete", "(", "url", ")" ]
27.454545
22.545455
def adapt_meta(self, meta): """Convert meta from error response to href and surge_id attributes.""" surge = meta.get('surge_confirmation') href = surge.get('href') surge_id = surge.get('surge_confirmation_id') return href, surge_id
[ "def", "adapt_meta", "(", "self", ",", "meta", ")", ":", "surge", "=", "meta", ".", "get", "(", "'surge_confirmation'", ")", "href", "=", "surge", ".", "get", "(", "'href'", ")", "surge_id", "=", "surge", ".", "get", "(", "'surge_confirmation_id'", ")", ...
33.25
16.375
def return_val(self): """ Returns the return value of the function, as a ParamDoc with an empty name: >>> comments = parse_comments_for_file('examples/module_closure.js') >>> fn1 = FunctionDoc(comments[1]) >>> fn1.return_val.name '' >>> fn1.return_val.doc 'Some value' >>> fn1.return_val.type 'String' >>> fn2 = FunctionDoc(comments[2]) >>> fn2.return_val.doc 'Some property of the elements.' >>> fn2.return_val.type 'Array<String>' """ ret = self.get('return') or self.get('returns') type = self.get('type') if '{' in ret and '}' in ret: if not '} ' in ret: # Ensure that name is empty ret = ret.replace('} ', '} ') return ParamDoc(ret) if ret and type: return ParamDoc('{%s} %s' % (type, ret)) return ParamDoc(ret)
[ "def", "return_val", "(", "self", ")", ":", "ret", "=", "self", ".", "get", "(", "'return'", ")", "or", "self", ".", "get", "(", "'returns'", ")", "type", "=", "self", ".", "get", "(", "'type'", ")", "if", "'{'", "in", "ret", "and", "'}'", "in", ...
30.193548
14.903226
def _on_completions_refreshed(self, new_completer): """Swap the completer object in cli with the newly created completer. """ with self._completer_lock: self.completer = new_completer # When cli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. if self.cli: self.cli.current_buffer.completer = new_completer if self.cli: # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self.cli.request_redraw()
[ "def", "_on_completions_refreshed", "(", "self", ",", "new_completer", ")", ":", "with", "self", ".", "_completer_lock", ":", "self", ".", "completer", "=", "new_completer", "# When cli is first launched we call refresh_completions before", "# instantiating the cli object. So i...
47.266667
18.066667
def get(self, url): """ Do a GET request """ r = requests.get(self._format_url(url), headers=self.headers, timeout=TIMEOUT) self._check_response(r, 200) return r.json()
[ "def", "get", "(", "self", ",", "url", ")", ":", "r", "=", "requests", ".", "get", "(", "self", ".", "_format_url", "(", "url", ")", ",", "headers", "=", "self", ".", "headers", ",", "timeout", "=", "TIMEOUT", ")", "self", ".", "_check_response", "...
26.25
18
def version(self): """ This attribute retrieve the API version. >>> Works().version '1.0.0' """ request_params = dict(self.request_params) request_url = str(self.request_url) result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ).json() return result['message-version']
[ "def", "version", "(", "self", ")", ":", "request_params", "=", "dict", "(", "self", ".", "request_params", ")", "request_url", "=", "str", "(", "self", ".", "request_url", ")", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url...
24.888889
15.222222
def get_sorted_hdrgo2usrgos(self, hdrgos, flat_list=None, hdrgo_prt=True, hdrgo_sort=True): """Return GO IDs sorting using go2nt's namedtuple.""" # Return user-specfied sort or default sort of header and user GO IDs sorted_hdrgos_usrgos = [] h2u_get = self.grprobj.hdrgo2usrgos.get # Sort GO group headers using GO info in go2nt hdr_go2nt = self._get_go2nt(hdrgos) if hdrgo_sort is True: hdr_go2nt = sorted(hdr_go2nt.items(), key=lambda t: self.hdrgo_sortby(t[1])) for hdrgo_id, hdrgo_nt in hdr_go2nt: if flat_list is not None: if hdrgo_prt or hdrgo_id in self.grprobj.usrgos: flat_list.append(hdrgo_nt) # Sort user GOs which are under the current GO header usrgos_unsorted = h2u_get(hdrgo_id) if usrgos_unsorted: usrgo2nt = self._get_go2nt(usrgos_unsorted) usrgont_sorted = sorted(usrgo2nt.items(), key=lambda t: self.usrgo_sortby(t[1])) usrgos_sorted, usrnts_sorted = zip(*usrgont_sorted) if flat_list is not None: flat_list.extend(usrnts_sorted) sorted_hdrgos_usrgos.append((hdrgo_id, usrgos_sorted)) else: sorted_hdrgos_usrgos.append((hdrgo_id, [])) return cx.OrderedDict(sorted_hdrgos_usrgos)
[ "def", "get_sorted_hdrgo2usrgos", "(", "self", ",", "hdrgos", ",", "flat_list", "=", "None", ",", "hdrgo_prt", "=", "True", ",", "hdrgo_sort", "=", "True", ")", ":", "# Return user-specfied sort or default sort of header and user GO IDs", "sorted_hdrgos_usrgos", "=", "[...
54.76
18.08
def generate_source_catalogs(imglist, **pars): """Generates a dictionary of source catalogs keyed by image name. Parameters ---------- imglist : list List of one or more calibrated fits images that will be used for source detection. Returns ------- sourcecatalogdict : dictionary a dictionary (keyed by image name) of two element dictionaries which in tern contain 1) a dictionary of the detector-specific processing parameters and 2) an astropy table of position and photometry information of all detected sources """ output = pars.get('output', False) sourcecatalogdict = {} for imgname in imglist: log.info("Image name: {}".format(imgname)) sourcecatalogdict[imgname] = {} # open image imghdu = fits.open(imgname) imgprimaryheader = imghdu[0].header instrument = imgprimaryheader['INSTRUME'].lower() detector = imgprimaryheader['DETECTOR'].lower() # get instrument/detector-specific image alignment parameters if instrument in detector_specific_params.keys(): if detector in detector_specific_params[instrument].keys(): detector_pars = detector_specific_params[instrument][detector] # to allow generate_source_catalog to get detector specific parameters detector_pars.update(pars) sourcecatalogdict[imgname]["params"] = detector_pars else: sys.exit("ERROR! Unrecognized detector '{}'. Exiting...".format(detector)) log.error("ERROR! Unrecognized detector '{}'. Exiting...".format(detector)) else: sys.exit("ERROR! Unrecognized instrument '{}'. Exiting...".format(instrument)) log.error("ERROR! Unrecognized instrument '{}'. Exiting...".format(instrument)) # Identify sources in image, convert coords from chip x, y form to reference WCS sky RA, Dec form. imgwcs = HSTWCS(imghdu, 1) fwhmpsf_pix = sourcecatalogdict[imgname]["params"]['fwhmpsf']/imgwcs.pscale #Convert fwhmpsf from arsec to pixels sourcecatalogdict[imgname]["catalog_table"] = amutils.generate_source_catalog(imghdu, fwhm=fwhmpsf_pix, **detector_pars) # write out coord lists to files for diagnostic purposes. Protip: To display the sources in these files in DS9, # set the "Coordinate System" option to "Physical" when loading the region file. imgroot = os.path.basename(imgname).split('_')[0] numSci = amutils.countExtn(imghdu) # Allow user to decide when and how to write out catalogs to files if output: for chip in range(1,numSci+1): chip_cat = sourcecatalogdict[imgname]["catalog_table"][chip] if chip_cat and len(chip_cat) > 0: regfilename = "{}_sci{}_src.reg".format(imgroot, chip) out_table = Table(chip_cat) out_table.write(regfilename, include_names=["xcentroid", "ycentroid"], format="ascii.fast_commented_header") log.info("Wrote region file {}\n".format(regfilename)) imghdu.close() return(sourcecatalogdict)
[ "def", "generate_source_catalogs", "(", "imglist", ",", "*", "*", "pars", ")", ":", "output", "=", "pars", ".", "get", "(", "'output'", ",", "False", ")", "sourcecatalogdict", "=", "{", "}", "for", "imgname", "in", "imglist", ":", "log", ".", "info", "...
50.83871
30.129032
def RegisterHelper(cls, resolver_helper): """Registers a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is already set for the corresponding type indicator. """ if resolver_helper.type_indicator in cls._resolver_helpers: raise KeyError(( 'Resolver helper object already set for type indicator: ' '{0!s}.').format(resolver_helper.type_indicator)) cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
[ "def", "RegisterHelper", "(", "cls", ",", "resolver_helper", ")", ":", "if", "resolver_helper", ".", "type_indicator", "in", "cls", ".", "_resolver_helpers", ":", "raise", "KeyError", "(", "(", "'Resolver helper object already set for type indicator: '", "'{0!s}.'", ")"...
35.4375
23.25
def paintEvent(self, event): """ Overloads the paint event to support rendering of hints if there are no items in the tree. :param event | <QPaintEvent> """ super(XListWidget, self).paintEvent(event) if not self.visibleCount() and self.hint(): text = self.hint() rect = self.rect() # modify the padding on the rect w = min(250, rect.width() - 30) x = (rect.width() - w) / 2 rect.setX(x) rect.setY(rect.y() + 15) rect.setWidth(w) rect.setHeight(rect.height() - 30) align = int(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop) # setup the coloring options clr = self.hintColor() # paint the hint with XPainter(self.viewport()) as painter: painter.setPen(clr) painter.drawText(rect, align | QtCore.Qt.TextWordWrap, text)
[ "def", "paintEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XListWidget", ",", "self", ")", ".", "paintEvent", "(", "event", ")", "if", "not", "self", ".", "visibleCount", "(", ")", "and", "self", ".", "hint", "(", ")", ":", "text", "=...
34.419355
14.032258
def update_resource(self, resource, underlined=None): """Update the cache for global names in `resource`""" try: pymodule = self.project.get_pymodule(resource) modname = self._module_name(resource) self._add_names(pymodule, modname, underlined) except exceptions.ModuleSyntaxError: pass
[ "def", "update_resource", "(", "self", ",", "resource", ",", "underlined", "=", "None", ")", ":", "try", ":", "pymodule", "=", "self", ".", "project", ".", "get_pymodule", "(", "resource", ")", "modname", "=", "self", ".", "_module_name", "(", "resource", ...
43.875
14.25
def get_rule(self, template_name): """Find a matching compilation rule for a function. Raises a :exc:`ValueError` if no matching rule can be found. :param template_name: the name of the template """ for regex, render_func in self.rules: if re.match(regex, template_name): return render_func raise ValueError("no matching rule")
[ "def", "get_rule", "(", "self", ",", "template_name", ")", ":", "for", "regex", ",", "render_func", "in", "self", ".", "rules", ":", "if", "re", ".", "match", "(", "regex", ",", "template_name", ")", ":", "return", "render_func", "raise", "ValueError", "...
35.909091
13.545455
def generate_signed_url_v4( credentials, resource, expiration, api_access_endpoint=DEFAULT_ENDPOINT, method="GET", content_md5=None, content_type=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, _request_timestamp=None, # for testing only ): """Generate a V4 signed URL to provide query-string auth'n to a resource. .. note:: Assumes ``credentials`` implements the :class:`google.auth.credentials.Signing` interface. Also assumes ``credentials`` has a ``service_account_email`` property which identifies the credentials. .. note:: If you are on Google Compute Engine, you can't generate a signed URL. Follow `Issue 922`_ for updates on this. If you'd like to be able to generate a signed URL from GCE, you can use a standard service account from a JSON file rather than a GCE service account. See headers `reference`_ for more details on optional arguments. .. _Issue 922: https://github.com/GoogleCloudPlatform/\ google-cloud-python/issues/922 .. _reference: https://cloud.google.com/storage/docs/reference-headers :type credentials: :class:`google.auth.credentials.Signing` :param credentials: Credentials object with an associated private key to sign text. :type resource: str :param resource: A pointer to a specific resource (typically, ``/bucket-name/path/to/blob.txt``). :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :type api_access_endpoint: str :param api_access_endpoint: Optional URI base. Defaults to "https://storage.googleapis.com/" :type method: str :param method: The HTTP verb that will be used when requesting the URL. Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the signature will additionally contain the `x-goog-resumable` header, and the method changed to POST. See the signed URL docs regarding this flow: https://cloud.google.com/storage/docs/access-control/signed-urls :type content_md5: str :param content_md5: (Optional) The MD5 hash of the object referenced by ``resource``. :type content_type: str :param content_type: (Optional) The content type of the object referenced by ``resource``. :type response_type: str :param response_type: (Optional) Content type of responses to requests for the signed URL. Used to over-ride the content type of the underlying resource. :type response_disposition: str :param response_disposition: (Optional) Content disposition of responses to requests for the signed URL. :type generation: str :param generation: (Optional) A value that indicates which generation of the resource to fetch. :type headers: dict :param headers: (Optional) Additional HTTP headers to be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers Requests using the signed URL *must* pass the specified header (name and value) with each request for the URL. :type query_parameters: dict :param query_parameters: (Optional) Additional query paramtersto be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers#query :raises: :exc:`TypeError` when expiration is not a valid type. :raises: :exc:`AttributeError` if credentials is not an instance of :class:`google.auth.credentials.Signing`. :rtype: str :returns: A signed URL you can use to access the resource until expiration. """ ensure_signed_credentials(credentials) expiration_seconds = get_expiration_seconds_v4(expiration) if _request_timestamp is None: now = NOW() request_timestamp = now.strftime("%Y%m%dT%H%M%SZ") datestamp = now.date().strftime("%Y%m%d") else: request_timestamp = _request_timestamp datestamp = _request_timestamp[:8] client_email = credentials.signer_email credential_scope = "{}/auto/storage/goog4_request".format(datestamp) credential = "{}/{}".format(client_email, credential_scope) if headers is None: headers = {} if content_type is not None: headers["Content-Type"] = content_type if content_md5 is not None: headers["Content-MD5"] = content_md5 header_names = [key.lower() for key in headers] if "host" not in header_names: headers["Host"] = "storage.googleapis.com" if method.upper() == "RESUMABLE": method = "POST" headers["x-goog-resumable"] = "start" canonical_headers, ordered_headers = get_canonical_headers(headers) canonical_header_string = ( "\n".join(canonical_headers) + "\n" ) # Yes, Virginia, the extra newline is part of the spec. signed_headers = ";".join([key for key, _ in ordered_headers]) if query_parameters is None: query_parameters = {} else: query_parameters = {key: value or "" for key, value in query_parameters.items()} query_parameters["X-Goog-Algorithm"] = "GOOG4-RSA-SHA256" query_parameters["X-Goog-Credential"] = credential query_parameters["X-Goog-Date"] = request_timestamp query_parameters["X-Goog-Expires"] = expiration_seconds query_parameters["X-Goog-SignedHeaders"] = signed_headers if response_type is not None: query_parameters["response-content-type"] = response_type if response_disposition is not None: query_parameters["response-content-disposition"] = response_disposition if generation is not None: query_parameters["generation"] = generation ordered_query_parameters = sorted(query_parameters.items()) canonical_query_string = six.moves.urllib.parse.urlencode(ordered_query_parameters) canonical_elements = [ method, resource, canonical_query_string, canonical_header_string, signed_headers, "UNSIGNED-PAYLOAD", ] canonical_request = "\n".join(canonical_elements) canonical_request_hash = hashlib.sha256( canonical_request.encode("ascii") ).hexdigest() string_elements = [ "GOOG4-RSA-SHA256", request_timestamp, credential_scope, canonical_request_hash, ] string_to_sign = "\n".join(string_elements) signature_bytes = credentials.sign_bytes(string_to_sign.encode("ascii")) signature = binascii.hexlify(signature_bytes).decode("ascii") return "{}{}?{}&X-Goog-Signature={}".format( api_access_endpoint, resource, canonical_query_string, signature )
[ "def", "generate_signed_url_v4", "(", "credentials", ",", "resource", ",", "expiration", ",", "api_access_endpoint", "=", "DEFAULT_ENDPOINT", ",", "method", "=", "\"GET\"", ",", "content_md5", "=", "None", ",", "content_type", "=", "None", ",", "response_type", "=...
35.880829
23.621762
def _process_binary_trigger(trigger_value, condition): """Create an InputTrigger object.""" ops = { 0: ">", 1: "<", 2: ">=", 3: "<=", 4: "==", 5: 'always' } sources = { 0: 'value', 1: 'count' } encoded_source = condition & 0b1 encoded_op = condition >> 1 oper = ops.get(encoded_op, None) source = sources.get(encoded_source, None) if oper is None: raise ArgumentError("Unknown operation in binary trigger", condition=condition, operation=encoded_op, known_ops=ops) if source is None: raise ArgumentError("Unknown value source in binary trigger", source=source, known_sources=sources) if oper == 'always': return TrueTrigger() return InputTrigger(source, oper, trigger_value)
[ "def", "_process_binary_trigger", "(", "trigger_value", ",", "condition", ")", ":", "ops", "=", "{", "0", ":", "\">\"", ",", "1", ":", "\"<\"", ",", "2", ":", "\">=\"", ",", "3", ":", "\"<=\"", ",", "4", ":", "\"==\"", ",", "5", ":", "'always'", "}...
24.6875
26.75
def to_dict(self): """ This method converts the DictCell into a python `dict`. This is useful for JSON serialization. """ output = {} for key, value in self.__dict__['p'].iteritems(): if value is None or isinstance(value, SIMPLE_TYPES): output[key] = value elif hasattr(value, 'to_dot'): output[key] = value.to_dot() elif hasattr(value, 'to_dict'): output[key] = value.to_dict() elif isinstance(value, datetime.date): # Convert date/datetime to ms-since-epoch ("new Date()"). ms = time.mktime(value.utctimetuple()) * 1000 ms += getattr(value, 'microseconds', 0) / 1000 output[key] = int(ms) elif isinstance(value, dict): output[key] = [] else: raise ValueError('cannot encode ' + repr(key)) return output
[ "def", "to_dict", "(", "self", ")", ":", "output", "=", "{", "}", "for", "key", ",", "value", "in", "self", ".", "__dict__", "[", "'p'", "]", ".", "iteritems", "(", ")", ":", "if", "value", "is", "None", "or", "isinstance", "(", "value", ",", "SI...
39.791667
14.708333
def matrix_is_equivalent(X, Y): """ Checks matrix equivalence with numpy, scipy and pandas """ return X is Y or (isinstance(X, Y.__class__) and X.shape == Y.shape and np.sum((X != Y).sum()) == 0)
[ "def", "matrix_is_equivalent", "(", "X", ",", "Y", ")", ":", "return", "X", "is", "Y", "or", "(", "isinstance", "(", "X", ",", "Y", ".", "__class__", ")", "and", "X", ".", "shape", "==", "Y", ".", "shape", "and", "np", ".", "sum", "(", "(", "X"...
38
12
def balance_ions(anions, cations, anion_zs=None, cation_zs=None, anion_concs=None, cation_concs=None, rho_w=997.1, method='increase dominant', selected_ion=None): r'''Performs an ion balance to adjust measured experimental ion compositions to electroneutrality. Can accept either the actual mole fractions of the ions, or their concentrations in units of [mg/L] as well for convinience. The default method will locate the most prevalent ion in the type of ion not in excess - and increase it until the two ion types balance. Parameters ---------- anions : list(ChemicalMetadata) List of all negatively charged ions measured as being in the solution; ChemicalMetadata instances or simply objects with the attributes `MW` and `charge`, [-] cations : list(ChemicalMetadata) List of all positively charged ions measured as being in the solution; ChemicalMetadata instances or simply objects with the attributes `MW` and `charge`, [-] anion_zs : list, optional Mole fractions of each anion as measured in the aqueous solution, [-] cation_zs : list, optional Mole fractions of each cation as measured in the aqueous solution, [-] anion_concs : list, optional Concentrations of each anion in the aqueous solution in the units often reported (for convinience only) [mg/L] cation_concs : list, optional Concentrations of each cation in the aqueous solution in the units often reported (for convinience only) [mg/L] rho_w : float, optional Density of the aqueous solutionr at the temperature and pressure the anion and cation concentrations were measured (if specified), [kg/m^3] method : str, optional The method to use to balance the ionimbalance; one of 'dominant', 'decrease dominant', 'increase dominant', 'proportional insufficient ions increase', 'proportional excess ions decrease', 'proportional cation adjustment', 'proportional anion adjustment', 'Na or Cl increase', 'Na or Cl decrease', 'adjust', 'increase', 'decrease', 'makeup']. selected_ion : ChemicalMetadata, optional Some methods adjust only one user-specified ion; this is that input. For the case of the 'makeup' method, this is a tuple of (anion, cation) ChemicalMetadata instances and only the ion type not in excess will be used. Returns ------- anions : list(ChemicalMetadata) List of all negatively charged ions measured as being in the solution; ChemicalMetadata instances after potentially adding in an ion which was not present but specified by the user, [-] cations : list(ChemicalMetadata) List of all positively charged ions measured as being in the solution; ChemicalMetadata instances after potentially adding in an ion which was not present but specified by the user, [-] anion_zs : list, Mole fractions of each anion in the aqueous solution after the charge balance, [-] cation_zs : list Mole fractions of each cation in the aqueous solution after the charge balance, [-] z_water : float Mole fraction of the water in the solution, [-] Notes ----- The methods perform the charge balance as follows: * 'dominant' : The ion with the largest mole fraction in solution has its concentration adjusted up or down as necessary to balance the solution. * 'decrease dominant' : The ion with the largest mole fraction in the type of ion with *excess* charge has its own mole fraction decreased to balance the solution. * 'increase dominant' : The ion with the largest mole fraction in the type of ion with *insufficient* charge has its own mole fraction decreased to balance the solution. * 'proportional insufficient ions increase' : The ion charge type which is present insufficiently has each of the ions mole fractions *increased* proportionally until the solution is balanced. * 'proportional excess ions decrease' : The ion charge type which is present in excess has each of the ions mole fractions *decreased* proportionally until the solution is balanced. * 'proportional cation adjustment' : All *cations* have their mole fractions increased or decreased proportionally as necessary to balance the solution. * 'proportional anion adjustment' : All *anions* have their mole fractions increased or decreased proportionally as necessary to balance the solution. * 'Na or Cl increase' : Either Na+ or Cl- is *added* to the solution until the solution is balanced; the species will be added if they were not present initially as well. * 'Na or Cl decrease' : Either Na+ or Cl- is *removed* from the solution until the solution is balanced; the species will be added if they were not present initially as well. * 'adjust' : An ion specified with the parameter `selected_ion` has its mole fraction *increased or decreased* as necessary to balance the solution. An exception is raised if the specified ion alone cannot balance the solution. * 'increase' : An ion specified with the parameter `selected_ion` has its mole fraction *increased* as necessary to balance the solution. An exception is raised if the specified ion alone cannot balance the solution. * 'decrease' : An ion specified with the parameter `selected_ion` has its mole fraction *decreased* as necessary to balance the solution. An exception is raised if the specified ion alone cannot balance the solution. * 'makeup' : Two ions ase specified as a tuple with the parameter `selected_ion`. Whichever ion type is present in the solution insufficiently is added; i.e. if the ions were Mg+2 and Cl-, and there was too much negative charge in the solution, Mg+2 would be added until the solution was balanced. Examples -------- >>> anions_n = ['Cl-', 'HCO3-', 'SO4-2'] >>> cations_n = ['Na+', 'K+', 'Ca+2', 'Mg+2'] >>> cations = [pubchem_db.search_name(i) for i in cations_n] >>> anions = [pubchem_db.search_name(i) for i in anions_n] >>> an_res, cat_res, an_zs, cat_zs, z_water = balance_ions(anions, cations, ... anion_zs=[0.02557, 0.00039, 0.00026], cation_zs=[0.0233, 0.00075, ... 0.00262, 0.00119], method='proportional excess ions decrease') >>> an_zs [0.02557, 0.00039, 0.00026] >>> cat_zs [0.01948165456267761, 0.0006270918850647299, 0.0021906409851594564, 0.0009949857909693717] >>> z_water 0.9504856267761288 References ---------- ''' anions = list(anions) cations = list(cations) n_anions = len(anions) n_cations = len(cations) ions = anions + cations anion_charges = [i.charge for i in anions] cation_charges = [i.charge for i in cations] charges = anion_charges + cation_charges + [0] MW_water = [18.01528] rho_w = rho_w/1000 # Convert to kg/liter if anion_concs is not None and cation_concs is not None: anion_ws = [i*1E-6/rho_w for i in anion_concs] cation_ws = [i*1E-6/rho_w for i in cation_concs] w_water = 1 - sum(anion_ws) - sum(cation_ws) anion_MWs = [i.MW for i in anions] cation_MWs = [i.MW for i in cations] MWs = anion_MWs + cation_MWs + MW_water zs = ws_to_zs(anion_ws + cation_ws + [w_water], MWs) else: if anion_zs is None or cation_zs is None: raise Exception('Either both of anion_concs and cation_concs or ' 'anion_zs and cation_zs must be specified.') else: zs = anion_zs + cation_zs zs = zs + [1 - sum(zs)] impacts = [zi*ci for zi, ci in zip(zs, charges)] balance_error = sum(impacts) if abs(balance_error) < 1E-7: anion_zs = zs[0:n_anions] cation_zs = zs[n_anions:n_cations+n_anions] z_water = zs[-1] return anions, cations, anion_zs, cation_zs, z_water if 'dominant' in method: anion_zs, cation_zs, z_water = ion_balance_dominant(impacts, balance_error, charges, zs, n_anions, n_cations, method) return anions, cations, anion_zs, cation_zs, z_water elif 'proportional' in method: anion_zs, cation_zs, z_water = ion_balance_proportional( anion_charges, cation_charges, zs, n_anions, n_cations, balance_error, method) return anions, cations, anion_zs, cation_zs, z_water elif method == 'Na or Cl increase': increase = True if balance_error < 0: selected_ion = pubchem_db.search_name('Na+') else: selected_ion = pubchem_db.search_name('Cl-') elif method == 'Na or Cl decrease': increase = False if balance_error > 0: selected_ion = pubchem_db.search_name('Na+') else: selected_ion = pubchem_db.search_name('Cl-') # All of the below work with the variable selected_ion elif method == 'adjust': # A single ion will be increase or decreased to fix the balance automatically increase = None elif method == 'increase': increase = True # Raise exception if approach doesn't work elif method == 'decrease': increase = False # Raise exception if approach doesn't work elif method == 'makeup': # selected ion starts out as a tuple in this case; always adding the compound increase = True if balance_error < 0: selected_ion = selected_ion[1] else: selected_ion = selected_ion[0] else: raise Exception('Method not recognized') if selected_ion is None: raise Exception("For methods 'adjust', 'increase', 'decrease', and " "'makeup', an ion must be specified with the " "`selected_ion` parameter") anion_zs, cation_zs, z_water = ion_balance_adjust_wrapper(charges, zs, n_anions, n_cations, anions, cations, selected_ion, increase=increase) return anions, cations, anion_zs, cation_zs, z_water
[ "def", "balance_ions", "(", "anions", ",", "cations", ",", "anion_zs", "=", "None", ",", "cation_zs", "=", "None", ",", "anion_concs", "=", "None", ",", "cation_concs", "=", "None", ",", "rho_w", "=", "997.1", ",", "method", "=", "'increase dominant'", ","...
46.468182
23.295455
def get_rel_attr(self, attr_name, model): """For a related attribute specification, returns (related model, attribute). Returns (None, None) if model is not found, or (model, None) if attribute is not found. """ rel_attr_name, attr_name = attr_name.split(".", 1) rel_attr = getattr(self.model, rel_attr_name, None) rel_model = None attr = None if rel_attr is not None: rel_model = rel_attr.property.mapper.class_ attr = getattr(rel_model, attr_name, None) return rel_model, attr
[ "def", "get_rel_attr", "(", "self", ",", "attr_name", ",", "model", ")", ":", "rel_attr_name", ",", "attr_name", "=", "attr_name", ".", "split", "(", "\".\"", ",", "1", ")", "rel_attr", "=", "getattr", "(", "self", ".", "model", ",", "rel_attr_name", ","...
33.941176
17.823529
def skip(mapping): """ :param mapping: generator :return: filtered generator """ found = set() for m in mapping: matched_atoms = set(m.values()) if found.intersection(matched_atoms): continue found.update(matched_atoms) yield m
[ "def", "skip", "(", "mapping", ")", ":", "found", "=", "set", "(", ")", "for", "m", "in", "mapping", ":", "matched_atoms", "=", "set", "(", "m", ".", "values", "(", ")", ")", "if", "found", ".", "intersection", "(", "matched_atoms", ")", ":", "cont...
23.666667
11.666667
def getExceptionClass(errorCode): """ Converts the specified error code into the corresponding class object. Raises a KeyError if the errorCode is not found. """ classMap = {} for name, class_ in inspect.getmembers(sys.modules[__name__]): if inspect.isclass(class_) and issubclass(class_, BaseServerException): classMap[class_.getErrorCode()] = class_ return classMap[errorCode]
[ "def", "getExceptionClass", "(", "errorCode", ")", ":", "classMap", "=", "{", "}", "for", "name", ",", "class_", "in", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ")", ":", "if", "inspect", ".", "isclass", "(", "clas...
41.7
16.3
def fetch_source(self) -> None: """Download the tar archive that contains the source code for the library. """ import requests # Do not import at the top that this file can be imported by setup.py with TemporaryFile() as temp_file: # Download the source archive request = requests.get(self.src_tar_gz_url) temp_file.write(request.content) # Rewind the file temp_file.seek(0) # Extract the content of the archive tar_file = tarfile.open(fileobj=temp_file) tar_file.extractall(path=_DEPS_PATH)
[ "def", "fetch_source", "(", "self", ")", "->", "None", ":", "import", "requests", "# Do not import at the top that this file can be imported by setup.py", "with", "TemporaryFile", "(", ")", "as", "temp_file", ":", "# Download the source archive", "request", "=", "requests",...
46.769231
10.538462
def bind(self, graph, reset=True, initialize=True): '''Bind this layer into a computation graph. This method is a wrapper for performing common initialization tasks. It calls :func:`resolve`, :func:`setup`, and :func:`log`. Parameters ---------- graph : :class:`Network <theanets.graph.Network>` A computation network in which this layer is to be bound. reset : bool, optional If ``True`` (the default), reset the resolved layers for this layer. initialize : bool, optional If ``True`` (the default), initialize the parameters for this layer by calling :func:`setup`. Raises ------ theanets.util.ConfigurationError : If an input cannot be resolved. ''' if reset: for k in self._input_shapes: self._input_shapes[k] = None for k in self._output_shapes: self._output_shapes[k] = None self.resolve_inputs(graph.layers) self.resolve_outputs() self.activate = activations.build( self.kwargs.get('activation', 'relu'), self) if initialize: self.setup() self.log()
[ "def", "bind", "(", "self", ",", "graph", ",", "reset", "=", "True", ",", "initialize", "=", "True", ")", ":", "if", "reset", ":", "for", "k", "in", "self", ".", "_input_shapes", ":", "self", ".", "_input_shapes", "[", "k", "]", "=", "None", "for",...
36.69697
18.030303
def observation_input(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type. ''' placeholder = observation_placeholder(ob_space, batch_size, name) return placeholder, encode_observation(ob_space, placeholder)
[ "def", "observation_input", "(", "ob_space", ",", "batch_size", "=", "None", ",", "name", "=", "'Ob'", ")", ":", "placeholder", "=", "observation_placeholder", "(", "ob_space", ",", "batch_size", ",", "name", ")", "return", "placeholder", ",", "encode_observatio...
44.5
32
def patch_string(s): """ Reorganize a String in such a way that surrogates are printable and lonely surrogates are escaped. :param s: input string :return: string with escaped lonely surrogates and 32bit surrogates """ res = '' it = PeekIterator(s) for c in it: if (ord(c) >> 10) == 0b110110: # High surrogate # Check for the next n = it.peek() if n and (ord(n) >> 10) == 0b110111: # Next is a low surrogate! Merge them together res += chr(((ord(c) & 0x3ff) << 10 | (ord(n) & 0x3ff)) + 0x10000) # Skip next char, as we already consumed it next(it) else: # Lonely high surrogate res += "\\u{:04x}".format(ord(c)) elif (ord(c) >> 10) == 0b110111: # Lonely low surrogate res += "\\u{:04x}".format(ord(c)) else: # Looks like a normal char... res += c return res
[ "def", "patch_string", "(", "s", ")", ":", "res", "=", "''", "it", "=", "PeekIterator", "(", "s", ")", "for", "c", "in", "it", ":", "if", "(", "ord", "(", "c", ")", ">>", "10", ")", "==", "0b110110", ":", "# High surrogate", "# Check for the next", ...
33.233333
15.433333
def get_mapping_variable(variable_name, variables_mapping): """ get variable from variables_mapping. Args: variable_name (str): variable name variables_mapping (dict): variables mapping Returns: mapping variable value. Raises: exceptions.VariableNotFound: variable is not found. """ try: return variables_mapping[variable_name] except KeyError: raise exceptions.VariableNotFound("{} is not found.".format(variable_name))
[ "def", "get_mapping_variable", "(", "variable_name", ",", "variables_mapping", ")", ":", "try", ":", "return", "variables_mapping", "[", "variable_name", "]", "except", "KeyError", ":", "raise", "exceptions", ".", "VariableNotFound", "(", "\"{} is not found.\"", ".", ...
26.833333
22.777778
def regions(self): """ This method will return all the available regions within the DigitalOcean cloud. """ json = self.request('/regions', method='GET') status = json.get('status') if status == 'OK': regions_json = json.get('regions', []) regions = [Region.from_json(region) for region in regions_json] return regions else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
[ "def", "regions", "(", "self", ")", ":", "json", "=", "self", ".", "request", "(", "'/regions'", ",", "method", "=", "'GET'", ")", "status", "=", "json", ".", "get", "(", "'status'", ")", "if", "status", "==", "'OK'", ":", "regions_json", "=", "json"...
36.857143
14.571429
def fullmatch(pattern, string, flags=0): """Try to apply the pattern at the start of the string, returning a match object if the whole string matches, or None if no match was found.""" # Build a version of the pattern with a non-capturing group around it. # This is needed to get m.end() to correctly report the size of the # matched expression (as per the final doctest above). grouped_pattern = re.compile("^(?:%s)$" % pattern.pattern, pattern.flags) m = grouped_pattern.match(string) if m and m.end() < len(string): # Incomplete match (which should never happen because of the $ at the # end of the regexp), treat as failure. m = None # pragma no cover return m
[ "def", "fullmatch", "(", "pattern", ",", "string", ",", "flags", "=", "0", ")", ":", "# Build a version of the pattern with a non-capturing group around it.", "# This is needed to get m.end() to correctly report the size of the", "# matched expression (as per the final doctest above).", ...
54.846154
15.769231
def labels(self): """Retrieve or set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels .. note:: The getter for this property returns a dict which is a *copy* of the bucket's labels. Mutating that dict has no effect unless you then re-assign the dict via the setter. E.g.: >>> labels = bucket.labels >>> labels['new_key'] = 'some-label' >>> del labels['old_key'] >>> bucket.labels = labels >>> bucket.update() :setter: Set labels for this bucket. :getter: Gets the labels for this bucket. :rtype: :class:`dict` :returns: Name-value pairs (string->string) labelling the bucket. """ labels = self._properties.get("labels") if labels is None: return {} return copy.deepcopy(labels)
[ "def", "labels", "(", "self", ")", ":", "labels", "=", "self", ".", "_properties", ".", "get", "(", "\"labels\"", ")", "if", "labels", "is", "None", ":", "return", "{", "}", "return", "copy", ".", "deepcopy", "(", "labels", ")" ]
32.357143
20.107143
def demo_update(self): """ Performs a demonstration update by calling the demo optimization operation. Note that the batch data does not have to be fetched from the demo memory as this is now part of the TensorFlow operation of the demo update. """ fetches = self.demo_optimization_output self.monitored_session.run(fetches=fetches)
[ "def", "demo_update", "(", "self", ")", ":", "fetches", "=", "self", ".", "demo_optimization_output", "self", ".", "monitored_session", ".", "run", "(", "fetches", "=", "fetches", ")" ]
42.333333
21.666667
def tree_model_natsort(model, row1, row2, user_data=None): '''用natural sorting算法对TreeModel的一个column进行排序''' sort_column, sort_type = model.get_sort_column_id() value1 = model.get_value(row1, sort_column) value2 = model.get_value(row2, sort_column) sort_list1 = util.natsort(value1) sort_list2 = util.natsort(value2) status = sort_list1 < sort_list2 if sort_list1 < sort_list2: return -1 else: return 1
[ "def", "tree_model_natsort", "(", "model", ",", "row1", ",", "row2", ",", "user_data", "=", "None", ")", ":", "sort_column", ",", "sort_type", "=", "model", ".", "get_sort_column_id", "(", ")", "value1", "=", "model", ".", "get_value", "(", "row1", ",", ...
36.75
12.916667
def parse_mode(mode, default_bitdepth=None): """Parse PIL-style mode and return tuple (grayscale, alpha, bitdeph)""" # few special cases if mode == 'P': # Don't know what is pallette raise Error('Unknown colour mode:' + mode) elif mode == '1': # Logical return (True, False, 1) elif mode == 'I': # Integer return (True, False, 16) # here we go if mode.startswith('L'): grayscale = True mode = mode[1:] elif mode.startswith('RGB'): grayscale = False mode = mode[3:] else: raise Error('Unknown colour mode:' + mode) if mode.startswith('A'): alpha = True mode = mode[1:] else: alpha = False bitdepth = default_bitdepth if mode.startswith(';'): mode = mode[1:] if mode: try: bitdepth = int(mode) except (TypeError, ValueError): raise Error('Unsupported bitdepth mode:' + mode) return (grayscale, alpha, bitdepth)
[ "def", "parse_mode", "(", "mode", ",", "default_bitdepth", "=", "None", ")", ":", "# few special cases", "if", "mode", "==", "'P'", ":", "# Don't know what is pallette", "raise", "Error", "(", "'Unknown colour mode:'", "+", "mode", ")", "elif", "mode", "==", "'1...
26.864865
16.459459
def get_gradebook_column(self, gradebook_column_id): """Gets the ``GradebookColumn`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``GradebookColumn`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to a ``GradebookColumn`` and retained for compatibility. arg: gradebook_column_id (osid.id.Id): ``Id`` of the ``GradebookColumn`` return: (osid.grading.GradebookColumn) - the gradebook column raise: NotFound - ``gradebook_column_id`` not found raise: NullArgument - ``gradebook_column_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('grading', collection='GradebookColumn', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(gradebook_column_id, 'grading').get_identifier())}, **self._view_filter())) return objects.GradebookColumn(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_gradebook_column", "(", "self", ",", "gradebook_column_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resource", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'grad...
53
22.413793
def download_handler(feed, placeholders): import shlex """ Parse and execute the download handler """ value = feed.retrieve_config('downloadhandler', 'greg') if value == 'greg': while os.path.isfile(placeholders.fullpath): placeholders.fullpath = placeholders.fullpath + '_' placeholders.filename = placeholders.filename + '_' urlretrieve(placeholders.link, placeholders.fullpath) else: value_list = shlex.split(value) instruction_list = [substitute_placeholders(part, placeholders) for part in value_list] returncode = subprocess.call(instruction_list) if returncode: raise URLError
[ "def", "download_handler", "(", "feed", ",", "placeholders", ")", ":", "import", "shlex", "value", "=", "feed", ".", "retrieve_config", "(", "'downloadhandler'", ",", "'greg'", ")", "if", "value", "==", "'greg'", ":", "while", "os", ".", "path", ".", "isfi...
39.222222
14.555556
def removePadding(str, blocksize=AES_blocksize, mode='CMS'): ''' Remove padding from string Input: (str) str - String to be padded (int) blocksize - block size of the algorithm (string) mode - padding scheme one in (CMS, Bit, ZeroLen, Null, Space, Random) Return:(string) Decrypted string without padding ''' if mode not in (0,'CMS'): for k in MODES.keys(): if mode in k: return globals()['append'+k[1]+'Padding'](str, blocksize) else: return removeCMSPadding(str, blocksize) else: return removeCMSPadding(str, blocksize)
[ "def", "removePadding", "(", "str", ",", "blocksize", "=", "AES_blocksize", ",", "mode", "=", "'CMS'", ")", ":", "if", "mode", "not", "in", "(", "0", ",", "'CMS'", ")", ":", "for", "k", "in", "MODES", ".", "keys", "(", ")", ":", "if", "mode", "in...
40
18.266667
def _parser_jsonip(text): """Parse response text like the one returned by http://jsonip.com/.""" import json try: return str(json.loads(text).get("ip")) except ValueError as exc: LOG.debug("Text '%s' could not be parsed", exc_info=exc) return None
[ "def", "_parser_jsonip", "(", "text", ")", ":", "import", "json", "try", ":", "return", "str", "(", "json", ".", "loads", "(", "text", ")", ".", "get", "(", "\"ip\"", ")", ")", "except", "ValueError", "as", "exc", ":", "LOG", ".", "debug", "(", "\"...
35
16.75
def cancel_task(all, task_id): """ Executor for `globus task cancel` """ if bool(all) + bool(task_id) != 1: raise click.UsageError( "You must pass EITHER the special --all flag " "to cancel all in-progress tasks OR a single " "task ID to cancel." ) client = get_client() if all: from sys import maxsize task_ids = [ task_row["task_id"] for task_row in client.task_list( filter="type:TRANSFER,DELETE/status:ACTIVE,INACTIVE", fields="task_id", num_results=maxsize, # FIXME want to ask for "unlimited" set ) ] task_count = len(task_ids) if not task_ids: raise click.ClickException("You have no in-progress tasks.") def cancellation_iterator(): for i in task_ids: yield (i, client.cancel_task(i).data) def json_converter(res): return { "results": [x for i, x in cancellation_iterator()], "task_ids": task_ids, } def _custom_text(res): for (i, (task_id, data)) in enumerate(cancellation_iterator(), start=1): safeprint( u"{} ({} of {}): {}".format(task_id, i, task_count, data["message"]) ) # FIXME: this is kind of an abuse of formatted_print because the # text format and json converter are doing their own thing, not really # interacting with the "response data" (None). Is there a better way of # handling this? formatted_print(None, text_format=_custom_text, json_converter=json_converter) else: res = client.cancel_task(task_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
[ "def", "cancel_task", "(", "all", ",", "task_id", ")", ":", "if", "bool", "(", "all", ")", "+", "bool", "(", "task_id", ")", "!=", "1", ":", "raise", "click", ".", "UsageError", "(", "\"You must pass EITHER the special --all flag \"", "\"to cancel all in-progres...
32.214286
23.321429
def wait_for_elements( self, using, value, timeout=10000, interval=1000, asserter=is_displayed): """Wait for elements till satisfy the given condition Support: Android iOS Web(WebView) Args: using(str): The element location strategy. value(str): The value of the location strategy. timeout(int): How long we should be retrying stuff. interval(int): How long between retries. asserter(callable): The asserter func to determine the result. Returns: Return the list of Element if any of them satisfy the condition. Raises: WebDriverException. """ if not callable(asserter): raise TypeError('Asserter must be callable.') @retry( retry_on_exception=lambda ex: isinstance(ex, WebDriverException), stop_max_delay=timeout, wait_fixed=interval ) def _wait_for_elements(ctx, using, value): els = ctx.elements(using, value) if not len(els): raise WebDriverException('no such element') else: el = els[0] asserter(el) return els return _wait_for_elements(self, using, value)
[ "def", "wait_for_elements", "(", "self", ",", "using", ",", "value", ",", "timeout", "=", "10000", ",", "interval", "=", "1000", ",", "asserter", "=", "is_displayed", ")", ":", "if", "not", "callable", "(", "asserter", ")", ":", "raise", "TypeError", "("...
33.605263
19.131579
def write_temp_file(self, content, filename=None, mode='w'): """Write content to a temporary file. Args: content (bytes|str): The file content. If passing binary data the mode needs to be set to 'wb'. filename (str, optional): The filename to use when writing the file. mode (str, optional): The file write mode which could be either 'w' or 'wb'. Returns: str: Fully qualified path name for the file. """ if filename is None: filename = str(uuid.uuid4()) fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename) with open(fqpn, mode) as fh: fh.write(content) return fqpn
[ "def", "write_temp_file", "(", "self", ",", "content", ",", "filename", "=", "None", ",", "mode", "=", "'w'", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "fqpn", "=", "os", ".", ...
39.833333
22.833333