text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def create_all_recommendations(self, cores, ip_views=False): """Calculate the recommendations for all records.""" global _store _store = self.store _create_all_recommendations(cores, ip_views, self.config)
[ "def", "create_all_recommendations", "(", "self", ",", "cores", ",", "ip_views", "=", "False", ")", ":", "global", "_store", "_store", "=", "self", ".", "store", "_create_all_recommendations", "(", "cores", ",", "ip_views", ",", "self", ".", "config", ")" ]
46.6
15.4
def get_from_cache(self, org_id, id): ''' Get an object from the cache Use all cache folders available (primary first, then secondary in order) and look for the ID in the dir if found unpickle and return the object, else return False FIXME: Check for expiry of object! Return false is expired (will auto-refetch and overwrite) ''' current_time = datetime.now() # Check memory cache first if id in self.memory_cache[org_id]: obj = self.memory_cache[org_id][id] if obj.created_at > current_time - self.expire_records_after: return obj for cache in [self.cache_path] + self.secondary_cache_paths: read_path = os.path.join( cache, org_id, id ) try: with open(read_path, 'rb') as f: obj = pickle.load(f) except: # Continue to try the next cache pass else: # It worked so we have obj # Check for expiry date; if it's not expired return it else continue if obj.created_at > current_time - self.expire_records_after: # If we're here it mustn't be in the memory cache self.memory_cache[org_id][id] = obj if len(self.memory_cache[org_id]) > self.max_memory_cache: self.memory_cache[org_id].popitem(last=False) return obj # Else continue looking # We found nothing (or all expired) return None
[ "def", "get_from_cache", "(", "self", ",", "org_id", ",", "id", ")", ":", "current_time", "=", "datetime", ".", "now", "(", ")", "# Check memory cache first", "if", "id", "in", "self", ".", "memory_cache", "[", "org_id", "]", ":", "obj", "=", "self", ".", "memory_cache", "[", "org_id", "]", "[", "id", "]", "if", "obj", ".", "created_at", ">", "current_time", "-", "self", ".", "expire_records_after", ":", "return", "obj", "for", "cache", "in", "[", "self", ".", "cache_path", "]", "+", "self", ".", "secondary_cache_paths", ":", "read_path", "=", "os", ".", "path", ".", "join", "(", "cache", ",", "org_id", ",", "id", ")", "try", ":", "with", "open", "(", "read_path", ",", "'rb'", ")", "as", "f", ":", "obj", "=", "pickle", ".", "load", "(", "f", ")", "except", ":", "# Continue to try the next cache", "pass", "else", ":", "# It worked so we have obj", "# Check for expiry date; if it's not expired return it else continue", "if", "obj", ".", "created_at", ">", "current_time", "-", "self", ".", "expire_records_after", ":", "# If we're here it mustn't be in the memory cache", "self", ".", "memory_cache", "[", "org_id", "]", "[", "id", "]", "=", "obj", "if", "len", "(", "self", ".", "memory_cache", "[", "org_id", "]", ")", ">", "self", ".", "max_memory_cache", ":", "self", ".", "memory_cache", "[", "org_id", "]", ".", "popitem", "(", "last", "=", "False", ")", "return", "obj", "# Else continue looking", "# We found nothing (or all expired)", "return", "None" ]
39.02381
22.785714
def call_checkout_api(self, request_data, action, **kwargs): """This will call the checkout adyen api. xapi key merchant_account, and platform are pulled from root module level and or self object. AdyenResult will be returned on 200 response. Otherwise, an exception is raised. Args: request_data (dict): The dictionary of the request to place. This should be in the structure of the Adyen API. https://docs.adyen.com/developers/checkout/api-integration service (str): This is the API service to be called. action (str): The specific action of the API service to be called """ if not self.http_init: self.http_client = HTTPClient(self.app_name, self.USER_AGENT_SUFFIX, self.LIB_VERSION, self.http_force) self.http_init = True # xapi at self object has highest priority. fallback to root module # and ensure that it is set. if self.xapikey: xapikey = self.xapikey elif 'xapikey' in kwargs: xapikey = kwargs.pop("xapikey") if not xapikey: errorstring = """Please set your webservice xapikey. You can do this by running 'Adyen.xapikey = 'Your xapikey'""" raise AdyenInvalidRequestError(errorstring) # platform at self object has highest priority. fallback to root module # and ensure that it is set to either 'live' or 'test'. if self.platform: platform = self.platform elif 'platform' in kwargs: platform = kwargs.pop('platform') if not isinstance(platform, str): errorstring = "'platform' value must be type of string" raise TypeError(errorstring) elif platform.lower() not in ['live', 'test']: errorstring = "'platform' must be the value of 'live' or 'test'" raise ValueError(errorstring) if not request_data.get('merchantAccount'): request_data['merchantAccount'] = self.merchant_account request_data['applicationInfo'] = { "adyenLibrary": { "name": settings.LIB_NAME, "version": settings.LIB_VERSION } } # Adyen requires this header to be set and uses the combination of # merchant account and merchant reference to determine uniqueness. headers = {} url = self._determine_checkout_url(platform, action) raw_response, raw_request, status_code, headers = \ self.http_client.request(url, json=request_data, xapikey=xapikey, headers=headers, **kwargs) # Creates AdyenResponse if request was successful, raises error if not. adyen_result = self._handle_response(url, raw_response, raw_request, status_code, headers, request_data) return adyen_result
[ "def", "call_checkout_api", "(", "self", ",", "request_data", ",", "action", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "http_init", ":", "self", ".", "http_client", "=", "HTTPClient", "(", "self", ".", "app_name", ",", "self", ".", "USER_AGENT_SUFFIX", ",", "self", ".", "LIB_VERSION", ",", "self", ".", "http_force", ")", "self", ".", "http_init", "=", "True", "# xapi at self object has highest priority. fallback to root module", "# and ensure that it is set.", "if", "self", ".", "xapikey", ":", "xapikey", "=", "self", ".", "xapikey", "elif", "'xapikey'", "in", "kwargs", ":", "xapikey", "=", "kwargs", ".", "pop", "(", "\"xapikey\"", ")", "if", "not", "xapikey", ":", "errorstring", "=", "\"\"\"Please set your webservice xapikey.\n You can do this by running 'Adyen.xapikey = 'Your xapikey'\"\"\"", "raise", "AdyenInvalidRequestError", "(", "errorstring", ")", "# platform at self object has highest priority. fallback to root module", "# and ensure that it is set to either 'live' or 'test'.", "if", "self", ".", "platform", ":", "platform", "=", "self", ".", "platform", "elif", "'platform'", "in", "kwargs", ":", "platform", "=", "kwargs", ".", "pop", "(", "'platform'", ")", "if", "not", "isinstance", "(", "platform", ",", "str", ")", ":", "errorstring", "=", "\"'platform' value must be type of string\"", "raise", "TypeError", "(", "errorstring", ")", "elif", "platform", ".", "lower", "(", ")", "not", "in", "[", "'live'", ",", "'test'", "]", ":", "errorstring", "=", "\"'platform' must be the value of 'live' or 'test'\"", "raise", "ValueError", "(", "errorstring", ")", "if", "not", "request_data", ".", "get", "(", "'merchantAccount'", ")", ":", "request_data", "[", "'merchantAccount'", "]", "=", "self", ".", "merchant_account", "request_data", "[", "'applicationInfo'", "]", "=", "{", "\"adyenLibrary\"", ":", "{", "\"name\"", ":", "settings", ".", "LIB_NAME", ",", "\"version\"", ":", "settings", ".", "LIB_VERSION", "}", "}", "# Adyen requires this header to be set and uses the combination of", "# merchant account and merchant reference to determine uniqueness.", "headers", "=", "{", "}", "url", "=", "self", ".", "_determine_checkout_url", "(", "platform", ",", "action", ")", "raw_response", ",", "raw_request", ",", "status_code", ",", "headers", "=", "self", ".", "http_client", ".", "request", "(", "url", ",", "json", "=", "request_data", ",", "xapikey", "=", "xapikey", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "# Creates AdyenResponse if request was successful, raises error if not.", "adyen_result", "=", "self", ".", "_handle_response", "(", "url", ",", "raw_response", ",", "raw_request", ",", "status_code", ",", "headers", ",", "request_data", ")", "return", "adyen_result" ]
43.25
21.319444
def get_target_list(self, scan_id): """ Get a scan's target list. """ target_list = [] for target, _, _ in self.scans_table[scan_id]['targets']: target_list.append(target) return target_list
[ "def", "get_target_list", "(", "self", ",", "scan_id", ")", ":", "target_list", "=", "[", "]", "for", "target", ",", "_", ",", "_", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'targets'", "]", ":", "target_list", ".", "append", "(", "target", ")", "return", "target_list" ]
32.714286
14.571429
def increase_counter(self, *path, **kwargs): """Increase a counter. This method increases a counter within the application's namespace. Each element of `path` is converted to a string and normalized before joining the elements by periods. The normalization process is little more than replacing periods with dashes. :param path: elements of the metric path to incr :keyword int amount: amount to increase the counter by. If omitted, the counter is increased by one. """ self.application.statsd.send(path, kwargs.get('amount', '1'), 'c')
[ "def", "increase_counter", "(", "self", ",", "*", "path", ",", "*", "*", "kwargs", ")", ":", "self", ".", "application", ".", "statsd", ".", "send", "(", "path", ",", "kwargs", ".", "get", "(", "'amount'", ",", "'1'", ")", ",", "'c'", ")" ]
41.333333
22.6
def profile(event_type, extra_data=None): """Profile a span of time so that it appears in the timeline visualization. Note that this only works in the raylet code path. This function can be used as follows (both on the driver or within a task). .. code-block:: python with ray.profile("custom event", extra_data={'key': 'value'}): # Do some computation here. Optionally, a dictionary can be passed as the "extra_data" argument, and it can have keys "name" and "cname" if you want to override the default timeline display text and box color. Other values will appear at the bottom of the chrome tracing GUI when you click on the box corresponding to this profile span. Args: event_type: A string describing the type of the event. extra_data: This must be a dictionary mapping strings to strings. This data will be added to the json objects that are used to populate the timeline, so if you want to set a particular color, you can simply set the "cname" attribute to an appropriate color. Similarly, if you set the "name" attribute, then that will set the text displayed on the box in the timeline. Returns: An object that can profile a span of time via a "with" statement. """ worker = ray.worker.global_worker return RayLogSpanRaylet(worker.profiler, event_type, extra_data=extra_data)
[ "def", "profile", "(", "event_type", ",", "extra_data", "=", "None", ")", ":", "worker", "=", "ray", ".", "worker", ".", "global_worker", "return", "RayLogSpanRaylet", "(", "worker", ".", "profiler", ",", "event_type", ",", "extra_data", "=", "extra_data", ")" ]
44.40625
27.96875
def find_trigger_value(psd_var, idx, start, sample_rate): """ Find the PSD variation value at a particular time Parameters ---------- psd_var : TimeSeries Time series of the varaibility in the PSD estimation idx : numpy.ndarray Time indices of the triggers start : float GPS start time sample_rate : float Sample rate defined in ini file Returns ------- vals : Array PSD variation value at a particular time """ # Find gps time of the trigger time = start + idx / sample_rate # Find where in the psd variation time series the trigger belongs ind = numpy.digitize(time, psd_var.sample_times) ind -= 1 vals = psd_var[ind] return vals
[ "def", "find_trigger_value", "(", "psd_var", ",", "idx", ",", "start", ",", "sample_rate", ")", ":", "# Find gps time of the trigger", "time", "=", "start", "+", "idx", "/", "sample_rate", "# Find where in the psd variation time series the trigger belongs", "ind", "=", "numpy", ".", "digitize", "(", "time", ",", "psd_var", ".", "sample_times", ")", "ind", "-=", "1", "vals", "=", "psd_var", "[", "ind", "]", "return", "vals" ]
26.666667
19.111111
def ext_publish(self, instance, loop, *args, **kwargs): """If 'external_signaller' is defined, calls it's publish method to notify external event systems. This is for internal usage only, but it's doumented because it's part of the interface with external notification systems. """ if self.external_signaller is not None: # Assumes that the loop is managed by the external handler return self.external_signaller.publish_signal(self, instance, loop, args, kwargs)
[ "def", "ext_publish", "(", "self", ",", "instance", ",", "loop", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "external_signaller", "is", "not", "None", ":", "# Assumes that the loop is managed by the external handler", "return", "self", ".", "external_signaller", ".", "publish_signal", "(", "self", ",", "instance", ",", "loop", ",", "args", ",", "kwargs", ")" ]
53
20.090909
def cmd_cammsg_old(self, args): '''cammsg_old''' print("Sent old DIGICAM_CONTROL") self.master.mav.digicam_control_send( self.settings.target_system, # target_system 0, # target_component 0, 0, 0, 0, 1, 0, 0, 0)
[ "def", "cmd_cammsg_old", "(", "self", ",", "args", ")", ":", "print", "(", "\"Sent old DIGICAM_CONTROL\"", ")", "self", ".", "master", ".", "mav", ".", "digicam_control_send", "(", "self", ".", "settings", ".", "target_system", ",", "# target_system", "0", ",", "# target_component", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "0", ",", "0", ",", "0", ")" ]
33.25
12.5
def InsertIntArg(self, string='', **unused_kwargs): """Inserts an Integer argument.""" try: int_value = int(string) except (TypeError, ValueError): raise errors.ParseError('{0:s} is not a valid integer.'.format(string)) return self.InsertArg(int_value)
[ "def", "InsertIntArg", "(", "self", ",", "string", "=", "''", ",", "*", "*", "unused_kwargs", ")", ":", "try", ":", "int_value", "=", "int", "(", "string", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "errors", ".", "ParseError", "(", "'{0:s} is not a valid integer.'", ".", "format", "(", "string", ")", ")", "return", "self", ".", "InsertArg", "(", "int_value", ")" ]
39.142857
14.285714
def _init_options(self, kwargs): """ Initializes self.options """ self.options = self.task_config.options if self.options is None: self.options = {} if kwargs: self.options.update(kwargs) # Handle dynamic lookup of project_config values via $project_config.attr for option, value in list(self.options.items()): try: if value.startswith("$project_config."): attr = value.replace("$project_config.", "", 1) self.options[option] = getattr(self.project_config, attr, None) except AttributeError: pass
[ "def", "_init_options", "(", "self", ",", "kwargs", ")", ":", "self", ".", "options", "=", "self", ".", "task_config", ".", "options", "if", "self", ".", "options", "is", "None", ":", "self", ".", "options", "=", "{", "}", "if", "kwargs", ":", "self", ".", "options", ".", "update", "(", "kwargs", ")", "# Handle dynamic lookup of project_config values via $project_config.attr", "for", "option", ",", "value", "in", "list", "(", "self", ".", "options", ".", "items", "(", ")", ")", ":", "try", ":", "if", "value", ".", "startswith", "(", "\"$project_config.\"", ")", ":", "attr", "=", "value", ".", "replace", "(", "\"$project_config.\"", ",", "\"\"", ",", "1", ")", "self", ".", "options", "[", "option", "]", "=", "getattr", "(", "self", ".", "project_config", ",", "attr", ",", "None", ")", "except", "AttributeError", ":", "pass" ]
40.625
18.125
def get_account_certificate(self, account_id, cert_id, **kwargs): # noqa: E501 """Get trusted certificate by ID. # noqa: E501 An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str cert_id: The ID of the trusted certificate to be retrieved. (required) :return: TrustedCertificateInternalResp If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501 else: (data) = self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501 return data
[ "def", "get_account_certificate", "(", "self", ",", "account_id", ",", "cert_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_account_certificate_with_http_info", "(", "account_id", ",", "cert_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_account_certificate_with_http_info", "(", "account_id", ",", "cert_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
58.818182
32.272727
def rm_fstab(name, device, config='/etc/fstab'): ''' .. versionchanged:: 2016.3.2 Remove the mount point from the fstab CLI Example: .. code-block:: bash salt '*' mount.rm_fstab /mnt/foo /dev/sdg ''' modified = False if __grains__['kernel'] == 'SunOS': criteria = _vfstab_entry(name=name, device=device) else: criteria = _fstab_entry(name=name, device=device) lines = [] try: with salt.utils.files.fopen(config, 'r') as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) try: if criteria.match(line): modified = True else: lines.append(line) except _fstab_entry.ParseError: lines.append(line) except _vfstab_entry.ParseError: lines.append(line) except (IOError, OSError) as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, exc)) if modified: try: with salt.utils.files.fopen(config, 'wb') as ofile: ofile.writelines(salt.utils.data.encode(lines)) except (IOError, OSError) as exc: msg = "Couldn't write to {0}: {1}" raise CommandExecutionError(msg.format(config, exc)) # Note: not clear why we always return 'True' # --just copying previous behavior at this point... return True
[ "def", "rm_fstab", "(", "name", ",", "device", ",", "config", "=", "'/etc/fstab'", ")", ":", "modified", "=", "False", "if", "__grains__", "[", "'kernel'", "]", "==", "'SunOS'", ":", "criteria", "=", "_vfstab_entry", "(", "name", "=", "name", ",", "device", "=", "device", ")", "else", ":", "criteria", "=", "_fstab_entry", "(", "name", "=", "name", ",", "device", "=", "device", ")", "lines", "=", "[", "]", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "config", ",", "'r'", ")", "as", "ifile", ":", "for", "line", "in", "ifile", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "try", ":", "if", "criteria", ".", "match", "(", "line", ")", ":", "modified", "=", "True", "else", ":", "lines", ".", "append", "(", "line", ")", "except", "_fstab_entry", ".", "ParseError", ":", "lines", ".", "append", "(", "line", ")", "except", "_vfstab_entry", ".", "ParseError", ":", "lines", ".", "append", "(", "line", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "exc", ":", "msg", "=", "\"Couldn't read from {0}: {1}\"", "raise", "CommandExecutionError", "(", "msg", ".", "format", "(", "config", ",", "exc", ")", ")", "if", "modified", ":", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "config", ",", "'wb'", ")", "as", "ofile", ":", "ofile", ".", "writelines", "(", "salt", ".", "utils", ".", "data", ".", "encode", "(", "lines", ")", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "exc", ":", "msg", "=", "\"Couldn't write to {0}: {1}\"", "raise", "CommandExecutionError", "(", "msg", ".", "format", "(", "config", ",", "exc", ")", ")", "# Note: not clear why we always return 'True'", "# --just copying previous behavior at this point...", "return", "True" ]
29.68
19.84
def register(self, collector): """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {0}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names
[ "def", "register", "(", "self", ",", "collector", ")", ":", "with", "self", ".", "_lock", ":", "names", "=", "self", ".", "_get_names", "(", "collector", ")", "duplicates", "=", "set", "(", "self", ".", "_names_to_collectors", ")", ".", "intersection", "(", "names", ")", "if", "duplicates", ":", "raise", "ValueError", "(", "'Duplicated timeseries in CollectorRegistry: {0}'", ".", "format", "(", "duplicates", ")", ")", "for", "name", "in", "names", ":", "self", ".", "_names_to_collectors", "[", "name", "]", "=", "collector", "self", ".", "_collector_to_names", "[", "collector", "]", "=", "names" ]
44.75
14.416667
def query(self, area=None, date=None, raw=None, area_relation='Intersects', order_by=None, limit=None, offset=0, **keywords): """Query the OpenSearch API with the coordinates of an area, a date interval and any other search keywords accepted by the API. Parameters ---------- area : str, optional The area of interest formatted as a Well-Known Text string. date : tuple of (str or datetime) or str, optional A time interval filter based on the Sensing Start Time of the products. Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW"). The timestamps can be either a Python datetime or a string in one of the following formats: - yyyyMMdd - yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601) - yyyy-MM-ddThh:mm:ssZ - NOW - NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.) - NOW+<n>DAY(S) - yyyy-MM-ddThh:mm:ssZ-<n>DAY(S) - NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be used as well. raw : str, optional Additional query text that will be appended to the query. area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional What relation to use for testing the AOI. Case insensitive. - Intersects: true if the AOI and the footprint intersect (default) - Contains: true if the AOI is inside the footprint - IsWithin: true if the footprint is inside the AOI order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. **keywords Additional keywords can be used to specify other query parameters, e.g. `relativeorbitnumber=70`. See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch for a full list. Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`. `None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`. Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query. The time interval formats accepted by the `date` parameter can also be used with any other parameters that expect time intervals (that is: 'beginposition', 'endposition', 'date', 'creationdate', and 'ingestiondate'). Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value. """ query = self.format_query(area, date, raw, area_relation, **keywords) self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s", order_by, limit, offset, query) formatted_order_by = _format_order_by(order_by) response, count = self._load_query(query, formatted_order_by, limit, offset) self.logger.info("Found %s products", count) return _parse_opensearch_response(response)
[ "def", "query", "(", "self", ",", "area", "=", "None", ",", "date", "=", "None", ",", "raw", "=", "None", ",", "area_relation", "=", "'Intersects'", ",", "order_by", "=", "None", ",", "limit", "=", "None", ",", "offset", "=", "0", ",", "*", "*", "keywords", ")", ":", "query", "=", "self", ".", "format_query", "(", "area", ",", "date", ",", "raw", ",", "area_relation", ",", "*", "*", "keywords", ")", "self", ".", "logger", ".", "debug", "(", "\"Running query: order_by=%s, limit=%s, offset=%s, query=%s\"", ",", "order_by", ",", "limit", ",", "offset", ",", "query", ")", "formatted_order_by", "=", "_format_order_by", "(", "order_by", ")", "response", ",", "count", "=", "self", ".", "_load_query", "(", "query", ",", "formatted_order_by", ",", "limit", ",", "offset", ")", "self", ".", "logger", ".", "info", "(", "\"Found %s products\"", ",", "count", ")", "return", "_parse_opensearch_response", "(", "response", ")" ]
50.027397
28.123288
def short_stack(): """Return a string summarizing the call stack.""" stack = inspect.stack()[:0:-1] return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack])
[ "def", "short_stack", "(", ")", ":", "stack", "=", "inspect", ".", "stack", "(", ")", "[", ":", "0", ":", "-", "1", "]", "return", "\"\\n\"", ".", "join", "(", "[", "\"%30s : %s @%d\"", "%", "(", "t", "[", "3", "]", ",", "t", "[", "1", "]", ",", "t", "[", "2", "]", ")", "for", "t", "in", "stack", "]", ")" ]
44.5
15.25
def print_ldamodel_topic_words(topic_word_distrib, vocab, n_top=10, row_labels=DEFAULT_TOPIC_NAME_FMT): """Print `n_top` values from a LDA model's topic-word distributions.""" print_ldamodel_distribution(topic_word_distrib, row_labels=row_labels, val_labels=vocab, top_n=n_top)
[ "def", "print_ldamodel_topic_words", "(", "topic_word_distrib", ",", "vocab", ",", "n_top", "=", "10", ",", "row_labels", "=", "DEFAULT_TOPIC_NAME_FMT", ")", ":", "print_ldamodel_distribution", "(", "topic_word_distrib", ",", "row_labels", "=", "row_labels", ",", "val_labels", "=", "vocab", ",", "top_n", "=", "n_top", ")" ]
78.5
29.75
def polling(self, system_code=0xffff, request_code=0, time_slots=0): """Aquire and identify a card. The Polling command is used to detect the Type 3 Tags in the field. It is also used for initialization and anti-collision. The *system_code* identifies the card system to acquire. A card can have multiple systems. The first system that matches *system_code* will be activated. A value of 0xff for any of the two bytes works as a wildcard, thus 0xffff activates the very first system in the card. The card identification data returned are the Manufacture ID (IDm) and Manufacture Parameter (PMm). The *request_code* tells the card whether it should return additional information. The default value 0 requests no additional information. Request code 1 means that the card shall also return the system code, so polling for system code 0xffff with request code 1 can be used to identify the first system on the card. Request code 2 asks for communication performance data, more precisely a bitmap of possible communication speeds. Not all cards provide that information. The number of *time_slots* determines whether there's a chance to receive a response if multiple Type 3 Tags are in the field. For the reader the number of time slots determines the amount of time to wait for a response. Any Type 3 Tag in the field, i.e. powered by the field, will choose a random time slot to respond. With the default *time_slots* value 0 there will only be one time slot available for all responses and multiple responses would produce a collision. More time slots reduce the chance of collisions (but may result in an application working with a tag that was just accidentially close enough). Only specific values should be used for *time_slots*, those are 0, 1, 3, 7, and 15. Other values may produce unexpected results depending on the tag product. :meth:`polling` returns either the tuple (IDm, PMm) or the tuple (IDm, PMm, *additional information*) depending on the response lengt, all as bytearrays. Command execution errors raise :exc:`~nfc.tag.TagCommandError`. """ log.debug("polling for system 0x{0:04x}".format(system_code)) if time_slots not in (0, 1, 3, 7, 15): log.debug("invalid number of time slots: {0}".format(time_slots)) raise ValueError("invalid number of time slots") if request_code not in (0, 1, 2): log.debug("invalid request code value: {0}".format(request_code)) raise ValueError("invalid request code for polling") timeout = 0.003625 + time_slots * 0.001208 data = pack(">HBB", system_code, request_code, time_slots) data = self.send_cmd_recv_rsp(0x00, data, timeout, send_idm=False) if len(data) != (16 if request_code == 0 else 18): log.debug("unexpected polling response length") raise Type3TagCommandError(DATA_SIZE_ERROR) return (data[0:8], data[8:16]) if len(data) == 16 else \ (data[0:8], data[8:16], data[16:18])
[ "def", "polling", "(", "self", ",", "system_code", "=", "0xffff", ",", "request_code", "=", "0", ",", "time_slots", "=", "0", ")", ":", "log", ".", "debug", "(", "\"polling for system 0x{0:04x}\"", ".", "format", "(", "system_code", ")", ")", "if", "time_slots", "not", "in", "(", "0", ",", "1", ",", "3", ",", "7", ",", "15", ")", ":", "log", ".", "debug", "(", "\"invalid number of time slots: {0}\"", ".", "format", "(", "time_slots", ")", ")", "raise", "ValueError", "(", "\"invalid number of time slots\"", ")", "if", "request_code", "not", "in", "(", "0", ",", "1", ",", "2", ")", ":", "log", ".", "debug", "(", "\"invalid request code value: {0}\"", ".", "format", "(", "request_code", ")", ")", "raise", "ValueError", "(", "\"invalid request code for polling\"", ")", "timeout", "=", "0.003625", "+", "time_slots", "*", "0.001208", "data", "=", "pack", "(", "\">HBB\"", ",", "system_code", ",", "request_code", ",", "time_slots", ")", "data", "=", "self", ".", "send_cmd_recv_rsp", "(", "0x00", ",", "data", ",", "timeout", ",", "send_idm", "=", "False", ")", "if", "len", "(", "data", ")", "!=", "(", "16", "if", "request_code", "==", "0", "else", "18", ")", ":", "log", ".", "debug", "(", "\"unexpected polling response length\"", ")", "raise", "Type3TagCommandError", "(", "DATA_SIZE_ERROR", ")", "return", "(", "data", "[", "0", ":", "8", "]", ",", "data", "[", "8", ":", "16", "]", ")", "if", "len", "(", "data", ")", "==", "16", "else", "(", "data", "[", "0", ":", "8", "]", ",", "data", "[", "8", ":", "16", "]", ",", "data", "[", "16", ":", "18", "]", ")" ]
51.919355
25.83871
def predict_factors(self, counts_df, maxiter=10, ncores=1, random_seed=1, stop_thr=1e-3, return_all=False): """ Gets latent factors for a user given her item counts This is similar to obtaining topics for a document in LDA. Note ---- This function will NOT modify any of the item parameters. Note ---- This function only works with one user at a time. Note ---- This function is prone to producing all NaNs values. Parameters ---------- counts_df : DataFrame or array (nsamples, 2) Data Frame with columns 'ItemId' and 'Count', indicating the non-zero item counts for a user for whom it's desired to obtain latent factors. maxiter : int Maximum number of iterations to run. ncores : int Number of threads/cores to use. With data for only one user, it's unlikely that using multiple threads would give a significant speed-up, and it might even end up making the function slower due to the overhead. If passing -1, it will determine the maximum number of cores in the system and use that. random_seed : int Random seed used to initialize parameters. stop_thr : float If the l2-norm of the difference between values of Theta_{u} between interations is less than this, it will stop. Smaller values of 'k' should require smaller thresholds. return_all : bool Whether to return also the intermediate calculations (Gamma_shp, Gamma_rte). When passing True here, the output will be a tuple containing (Theta, Gamma_shp, Gamma_rte, Phi) Returns ------- latent_factors : array (k,) Calculated latent factors for the user, given the input data """ ncores, random_seed, stop_thr, maxiter = self._check_input_predict_factors(ncores, random_seed, stop_thr, maxiter) ## processing the data counts_df = self._process_data_single(counts_df) ## calculating the latent factors Theta = np.empty(self.k, dtype='float32') temp = cython_loops.calc_user_factors( self.a, self.a_prime, self.b_prime, self.c, self.c_prime, self.d_prime, counts_df.Count.values, counts_df.ItemId.values, Theta, self.Beta, self.Lambda_shp, self.Lambda_rte, cython_loops.cast_ind_type(counts_df.shape[0]), cython_loops.cast_ind_type(self.k), cython_loops.cast_int(int(maxiter)), cython_loops.cast_int(ncores), cython_loops.cast_int(int(random_seed)), cython_loops.cast_float(stop_thr), cython_loops.cast_int(bool(return_all)) ) if np.isnan(Theta).sum() > 0: raise ValueError("NaNs encountered in the result. Failed to produce latent factors.") if return_all: return (Theta, temp[0], temp[1], temp[2]) else: return Theta
[ "def", "predict_factors", "(", "self", ",", "counts_df", ",", "maxiter", "=", "10", ",", "ncores", "=", "1", ",", "random_seed", "=", "1", ",", "stop_thr", "=", "1e-3", ",", "return_all", "=", "False", ")", ":", "ncores", ",", "random_seed", ",", "stop_thr", ",", "maxiter", "=", "self", ".", "_check_input_predict_factors", "(", "ncores", ",", "random_seed", ",", "stop_thr", ",", "maxiter", ")", "## processing the data", "counts_df", "=", "self", ".", "_process_data_single", "(", "counts_df", ")", "## calculating the latent factors", "Theta", "=", "np", ".", "empty", "(", "self", ".", "k", ",", "dtype", "=", "'float32'", ")", "temp", "=", "cython_loops", ".", "calc_user_factors", "(", "self", ".", "a", ",", "self", ".", "a_prime", ",", "self", ".", "b_prime", ",", "self", ".", "c", ",", "self", ".", "c_prime", ",", "self", ".", "d_prime", ",", "counts_df", ".", "Count", ".", "values", ",", "counts_df", ".", "ItemId", ".", "values", ",", "Theta", ",", "self", ".", "Beta", ",", "self", ".", "Lambda_shp", ",", "self", ".", "Lambda_rte", ",", "cython_loops", ".", "cast_ind_type", "(", "counts_df", ".", "shape", "[", "0", "]", ")", ",", "cython_loops", ".", "cast_ind_type", "(", "self", ".", "k", ")", ",", "cython_loops", ".", "cast_int", "(", "int", "(", "maxiter", ")", ")", ",", "cython_loops", ".", "cast_int", "(", "ncores", ")", ",", "cython_loops", ".", "cast_int", "(", "int", "(", "random_seed", ")", ")", ",", "cython_loops", ".", "cast_float", "(", "stop_thr", ")", ",", "cython_loops", ".", "cast_int", "(", "bool", "(", "return_all", ")", ")", ")", "if", "np", ".", "isnan", "(", "Theta", ")", ".", "sum", "(", ")", ">", "0", ":", "raise", "ValueError", "(", "\"NaNs encountered in the result. Failed to produce latent factors.\"", ")", "if", "return_all", ":", "return", "(", "Theta", ",", "temp", "[", "0", "]", ",", "temp", "[", "1", "]", ",", "temp", "[", "2", "]", ")", "else", ":", "return", "Theta" ]
36.082192
26.90411
def centroids(self, instrument, min_abundance=1e-4, points_per_fwhm=25): """ Estimates centroided peaks for a given instrument model. :param instrument: instrument model :param min_abundance: minimum abundance for including a peak :param points_per_fwhm: grid density used for envelope calculation :returns: peaks visible with the instrument used :rtype: TheoreticalSpectrum """ assert self.ptr != ffi.NULL centroids = ims.spectrum_envelope_centroids(self.ptr, instrument.ptr, min_abundance, points_per_fwhm) return _new_spectrum(CentroidedSpectrum, centroids)
[ "def", "centroids", "(", "self", ",", "instrument", ",", "min_abundance", "=", "1e-4", ",", "points_per_fwhm", "=", "25", ")", ":", "assert", "self", ".", "ptr", "!=", "ffi", ".", "NULL", "centroids", "=", "ims", ".", "spectrum_envelope_centroids", "(", "self", ".", "ptr", ",", "instrument", ".", "ptr", ",", "min_abundance", ",", "points_per_fwhm", ")", "return", "_new_spectrum", "(", "CentroidedSpectrum", ",", "centroids", ")" ]
49.142857
20.428571
def _read_stderr(self, encoding='utf-8'): """Reads self.proc.stderr. Usually, this should be read in a thread, to prevent blocking the read from stdout of the stderr buffer is filled, and this function is not called becuase the program is busy in the stderr reading loop. Reads self.proc.stderr (self.proc is the subprocess running the git command), and reads / writes self.failed_message (the message sent to stderr when git fails, usually one line). """ for line in self.proc.stderr: err_line = line.decode(encoding, errors='surrogateescape') if self.proc.returncode != 0: # If the subprocess didn't finish successfully, we expect # the last line in stderr to provide the cause if self.failed_message is not None: # We had a message, there is a newer line, print it logger.debug("Git log stderr: " + self.failed_message) self.failed_message = err_line else: # The subprocess is successfully up to now, print the line logger.debug("Git log stderr: " + err_line)
[ "def", "_read_stderr", "(", "self", ",", "encoding", "=", "'utf-8'", ")", ":", "for", "line", "in", "self", ".", "proc", ".", "stderr", ":", "err_line", "=", "line", ".", "decode", "(", "encoding", ",", "errors", "=", "'surrogateescape'", ")", "if", "self", ".", "proc", ".", "returncode", "!=", "0", ":", "# If the subprocess didn't finish successfully, we expect", "# the last line in stderr to provide the cause", "if", "self", ".", "failed_message", "is", "not", "None", ":", "# We had a message, there is a newer line, print it", "logger", ".", "debug", "(", "\"Git log stderr: \"", "+", "self", ".", "failed_message", ")", "self", ".", "failed_message", "=", "err_line", "else", ":", "# The subprocess is successfully up to now, print the line", "logger", ".", "debug", "(", "\"Git log stderr: \"", "+", "err_line", ")" ]
47.72
21.76
def drop_namespaces(self): """Drop all namespaces.""" self.session.query(NamespaceEntry).delete() self.session.query(Namespace).delete() self.session.commit()
[ "def", "drop_namespaces", "(", "self", ")", ":", "self", ".", "session", ".", "query", "(", "NamespaceEntry", ")", ".", "delete", "(", ")", "self", ".", "session", ".", "query", "(", "Namespace", ")", ".", "delete", "(", ")", "self", ".", "session", ".", "commit", "(", ")" ]
37.2
8.4
def from_list(cls, l): """Return a Point instance from a given list""" if len(l) == 3: x, y, z = map(float, l) return cls(x, y, z) elif len(l) == 2: x, y = map(float, l) return cls(x, y) else: raise AttributeError
[ "def", "from_list", "(", "cls", ",", "l", ")", ":", "if", "len", "(", "l", ")", "==", "3", ":", "x", ",", "y", ",", "z", "=", "map", "(", "float", ",", "l", ")", "return", "cls", "(", "x", ",", "y", ",", "z", ")", "elif", "len", "(", "l", ")", "==", "2", ":", "x", ",", "y", "=", "map", "(", "float", ",", "l", ")", "return", "cls", "(", "x", ",", "y", ")", "else", ":", "raise", "AttributeError" ]
30.4
11.1
def doStuff(ABFfolder,analyze=False,convert=False,index=True,overwrite=True, launch=True): """Inelegant for now, but lets you manually analyze every ABF in a folder.""" IN=INDEX(ABFfolder) if analyze: IN.analyzeAll() if convert: IN.convertImages()
[ "def", "doStuff", "(", "ABFfolder", ",", "analyze", "=", "False", ",", "convert", "=", "False", ",", "index", "=", "True", ",", "overwrite", "=", "True", ",", "launch", "=", "True", ")", ":", "IN", "=", "INDEX", "(", "ABFfolder", ")", "if", "analyze", ":", "IN", ".", "analyzeAll", "(", ")", "if", "convert", ":", "IN", ".", "convertImages", "(", ")" ]
35.5
18.625
def _check_iso9660_filename(fullname, interchange_level): # type: (bytes, int) -> None ''' A function to check that a file identifier conforms to the ISO9660 rules for a particular interchange level. Parameters: fullname - The name to check. interchange_level - The interchange level to check against. Returns: Nothing. ''' # Check to ensure the name is a valid filename for the ISO according to # Ecma-119 7.5. (name, extension, version) = _split_iso9660_filename(fullname) # Ecma-119 says that filenames must end with a semicolon-number, but I have # found CDs (Ubuntu 14.04 Desktop i386, for instance) that do not follow # this. Thus we allow for names both with and without the semi+version. # Ecma-119 says that filenames must have a version number, but I have # found CDs (FreeBSD 10.1 amd64) that do not have any version number. # Allow for this. if version != b'' and (int(version) < 1 or int(version) > 32767): raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must have a version between 1 and 32767') # Ecma-119 section 7.5.1 specifies that filenames must have at least one # character in either the name or the extension. if not name and not extension: raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must have a non-empty name or extension') if b';' in name or b';' in extension: raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must contain exactly one semicolon') if interchange_level == 1: # According to Ecma-119, section 10.1, at level 1 the filename can # only be up to 8 d-characters or d1-characters, and the extension can # only be up to 3 d-characters or 3 d1-characters. if len(name) > 8 or len(extension) > 3: raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames at interchange level 1 cannot have more than 8 characters or 3 characters in the extension') else: # For all other interchange levels, the maximum filename length is # specified in Ecma-119 7.5.2. However, I have found CDs (Ubuntu 14.04 # Desktop i386, for instance) that don't conform to this. Skip the # check until we know how long is allowed. pass # Ecma-119 section 7.5.1 says that the file name and extension each contain # zero or more d-characters or d1-characters. While the definition of # d-characters and d1-characters is not specified in Ecma-119, # http://wiki.osdev.org/ISO_9660 suggests that this consists of A-Z, 0-9, _ # which seems to correlate with empirical evidence. Thus we check for that # here. if interchange_level < 4: _check_d1_characters(name) _check_d1_characters(extension)
[ "def", "_check_iso9660_filename", "(", "fullname", ",", "interchange_level", ")", ":", "# type: (bytes, int) -> None", "# Check to ensure the name is a valid filename for the ISO according to", "# Ecma-119 7.5.", "(", "name", ",", "extension", ",", "version", ")", "=", "_split_iso9660_filename", "(", "fullname", ")", "# Ecma-119 says that filenames must end with a semicolon-number, but I have", "# found CDs (Ubuntu 14.04 Desktop i386, for instance) that do not follow", "# this. Thus we allow for names both with and without the semi+version.", "# Ecma-119 says that filenames must have a version number, but I have", "# found CDs (FreeBSD 10.1 amd64) that do not have any version number.", "# Allow for this.", "if", "version", "!=", "b''", "and", "(", "int", "(", "version", ")", "<", "1", "or", "int", "(", "version", ")", ">", "32767", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'ISO9660 filenames must have a version between 1 and 32767'", ")", "# Ecma-119 section 7.5.1 specifies that filenames must have at least one", "# character in either the name or the extension.", "if", "not", "name", "and", "not", "extension", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'ISO9660 filenames must have a non-empty name or extension'", ")", "if", "b';'", "in", "name", "or", "b';'", "in", "extension", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'ISO9660 filenames must contain exactly one semicolon'", ")", "if", "interchange_level", "==", "1", ":", "# According to Ecma-119, section 10.1, at level 1 the filename can", "# only be up to 8 d-characters or d1-characters, and the extension can", "# only be up to 3 d-characters or 3 d1-characters.", "if", "len", "(", "name", ")", ">", "8", "or", "len", "(", "extension", ")", ">", "3", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'ISO9660 filenames at interchange level 1 cannot have more than 8 characters or 3 characters in the extension'", ")", "else", ":", "# For all other interchange levels, the maximum filename length is", "# specified in Ecma-119 7.5.2. However, I have found CDs (Ubuntu 14.04", "# Desktop i386, for instance) that don't conform to this. Skip the", "# check until we know how long is allowed.", "pass", "# Ecma-119 section 7.5.1 says that the file name and extension each contain", "# zero or more d-characters or d1-characters. While the definition of", "# d-characters and d1-characters is not specified in Ecma-119,", "# http://wiki.osdev.org/ISO_9660 suggests that this consists of A-Z, 0-9, _", "# which seems to correlate with empirical evidence. Thus we check for that", "# here.", "if", "interchange_level", "<", "4", ":", "_check_d1_characters", "(", "name", ")", "_check_d1_characters", "(", "extension", ")" ]
46.711864
31.220339
def format_output(self, rendered_widgets): """ Render the ``icekit_events/recurrence_rule_widget/format_output.html`` template with the following context: preset A choice field for preset recurrence rules. natural An input field for natural language recurrence rules. rfc A text field for RFC compliant recurrence rules. The default template positions the ``preset`` field above the ``natural`` and ``rfc`` fields. """ template = loader.get_template( 'icekit_events/recurrence_rule_widget/format_output.html') preset, natural, rfc = rendered_widgets context = Context({ 'preset': preset, 'natural': natural, 'rfc': rfc, }) return template.render(context)
[ "def", "format_output", "(", "self", ",", "rendered_widgets", ")", ":", "template", "=", "loader", ".", "get_template", "(", "'icekit_events/recurrence_rule_widget/format_output.html'", ")", "preset", ",", "natural", ",", "rfc", "=", "rendered_widgets", "context", "=", "Context", "(", "{", "'preset'", ":", "preset", ",", "'natural'", ":", "natural", ",", "'rfc'", ":", "rfc", ",", "}", ")", "return", "template", ".", "render", "(", "context", ")" ]
35.541667
17.208333
def iter(self, keyed=False, extended=False, cast=True, relations=False): """https://github.com/frictionlessdata/tableschema-py#schema """ # Prepare unique checks if cast: unique_fields_cache = {} if self.schema: unique_fields_cache = _create_unique_fields_cache(self.schema) # Open/iterate stream self.__stream.open() iterator = self.__stream.iter(extended=True) iterator = self.__apply_processors(iterator, cast=cast) for row_number, headers, row in iterator: # Get headers if not self.__headers: self.__headers = headers # Check headers if cast: if self.schema and self.headers: if self.headers != self.schema.field_names: self.__stream.close() message = 'Table headers don\'t match schema field names' raise exceptions.CastError(message) # Check unique if cast: for indexes, cache in unique_fields_cache.items(): values = tuple(value for i, value in enumerate(row) if i in indexes) if not all(map(lambda value: value is None, values)): if values in cache['data']: self.__stream.close() message = 'Field(s) "%s" duplicates in row "%s"' message = message % (cache['name'], row_number) raise exceptions.CastError(message) cache['data'].add(values) # Resolve relations if relations: if self.schema: for foreign_key in self.schema.foreign_keys: row = _resolve_relations(row, headers, relations, foreign_key) if row is None: self.__stream.close() message = 'Foreign key "%s" violation in row "%s"' message = message % (foreign_key['fields'], row_number) raise exceptions.RelationError(message) # Form row if extended: yield (row_number, headers, row) elif keyed: yield dict(zip(headers, row)) else: yield row # Close stream self.__stream.close()
[ "def", "iter", "(", "self", ",", "keyed", "=", "False", ",", "extended", "=", "False", ",", "cast", "=", "True", ",", "relations", "=", "False", ")", ":", "# Prepare unique checks", "if", "cast", ":", "unique_fields_cache", "=", "{", "}", "if", "self", ".", "schema", ":", "unique_fields_cache", "=", "_create_unique_fields_cache", "(", "self", ".", "schema", ")", "# Open/iterate stream", "self", ".", "__stream", ".", "open", "(", ")", "iterator", "=", "self", ".", "__stream", ".", "iter", "(", "extended", "=", "True", ")", "iterator", "=", "self", ".", "__apply_processors", "(", "iterator", ",", "cast", "=", "cast", ")", "for", "row_number", ",", "headers", ",", "row", "in", "iterator", ":", "# Get headers", "if", "not", "self", ".", "__headers", ":", "self", ".", "__headers", "=", "headers", "# Check headers", "if", "cast", ":", "if", "self", ".", "schema", "and", "self", ".", "headers", ":", "if", "self", ".", "headers", "!=", "self", ".", "schema", ".", "field_names", ":", "self", ".", "__stream", ".", "close", "(", ")", "message", "=", "'Table headers don\\'t match schema field names'", "raise", "exceptions", ".", "CastError", "(", "message", ")", "# Check unique", "if", "cast", ":", "for", "indexes", ",", "cache", "in", "unique_fields_cache", ".", "items", "(", ")", ":", "values", "=", "tuple", "(", "value", "for", "i", ",", "value", "in", "enumerate", "(", "row", ")", "if", "i", "in", "indexes", ")", "if", "not", "all", "(", "map", "(", "lambda", "value", ":", "value", "is", "None", ",", "values", ")", ")", ":", "if", "values", "in", "cache", "[", "'data'", "]", ":", "self", ".", "__stream", ".", "close", "(", ")", "message", "=", "'Field(s) \"%s\" duplicates in row \"%s\"'", "message", "=", "message", "%", "(", "cache", "[", "'name'", "]", ",", "row_number", ")", "raise", "exceptions", ".", "CastError", "(", "message", ")", "cache", "[", "'data'", "]", ".", "add", "(", "values", ")", "# Resolve relations", "if", "relations", ":", "if", "self", ".", "schema", ":", "for", "foreign_key", "in", "self", ".", "schema", ".", "foreign_keys", ":", "row", "=", "_resolve_relations", "(", "row", ",", "headers", ",", "relations", ",", "foreign_key", ")", "if", "row", "is", "None", ":", "self", ".", "__stream", ".", "close", "(", ")", "message", "=", "'Foreign key \"%s\" violation in row \"%s\"'", "message", "=", "message", "%", "(", "foreign_key", "[", "'fields'", "]", ",", "row_number", ")", "raise", "exceptions", ".", "RelationError", "(", "message", ")", "# Form row", "if", "extended", ":", "yield", "(", "row_number", ",", "headers", ",", "row", ")", "elif", "keyed", ":", "yield", "dict", "(", "zip", "(", "headers", ",", "row", ")", ")", "else", ":", "yield", "row", "# Close stream", "self", ".", "__stream", ".", "close", "(", ")" ]
40.098361
20.868852
def query_entities(self, table_name, filter=None, select=None, num_results=None, marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None): ''' Returns a generator to list the entities in the table specified. The generator will lazily follow the continuation tokens returned by the service and stop when all entities have been returned or num_results is reached. If num_results is specified and the account has more than that number of entities, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int num_results: The maximum number of entities to return. :param marker: An opaque continuation object. This value can be retrieved from the next_marker field of a previous generator object if max_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :type marker: obj :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. :return: A generator which produces :class:`~azure.storage.table.models.Entity` objects. :rtype: :class:`~azure.storage.common.models.ListGenerator` ''' operation_context = _OperationContext(location_lock=True) if self.key_encryption_key is not None or self.key_resolver_function is not None: # If query already requests all properties, no need to add the metadata columns if select is not None and select != '*': select += ',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2' args = (table_name,) kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker, 'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout, '_context': operation_context} resp = self._query_entities(*args, **kwargs) return ListGenerator(resp, self._query_entities, args, kwargs)
[ "def", "query_entities", "(", "self", ",", "table_name", ",", "filter", "=", "None", ",", "select", "=", "None", ",", "num_results", "=", "None", ",", "marker", "=", "None", ",", "accept", "=", "TablePayloadFormat", ".", "JSON_MINIMAL_METADATA", ",", "property_resolver", "=", "None", ",", "timeout", "=", "None", ")", ":", "operation_context", "=", "_OperationContext", "(", "location_lock", "=", "True", ")", "if", "self", ".", "key_encryption_key", "is", "not", "None", "or", "self", ".", "key_resolver_function", "is", "not", "None", ":", "# If query already requests all properties, no need to add the metadata columns", "if", "select", "is", "not", "None", "and", "select", "!=", "'*'", ":", "select", "+=", "',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2'", "args", "=", "(", "table_name", ",", ")", "kwargs", "=", "{", "'filter'", ":", "filter", ",", "'select'", ":", "select", ",", "'max_results'", ":", "num_results", ",", "'marker'", ":", "marker", ",", "'accept'", ":", "accept", ",", "'property_resolver'", ":", "property_resolver", ",", "'timeout'", ":", "timeout", ",", "'_context'", ":", "operation_context", "}", "resp", "=", "self", ".", "_query_entities", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ListGenerator", "(", "resp", ",", "self", ".", "_query_entities", ",", "args", ",", "kwargs", ")" ]
56.095238
30.349206
def create_new_sub_account(self, account_id, account_name, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_user_storage_quota_mb=None, account_sis_account_id=None): """ Create a new sub-account. Add a new sub-account to a given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - account[name] """The name of the new sub-account.""" data["account[name]"] = account_name # OPTIONAL - account[sis_account_id] """The account's identifier in the Student Information System.""" if account_sis_account_id is not None: data["account[sis_account_id]"] = account_sis_account_id # OPTIONAL - account[default_storage_quota_mb] """The default course storage quota to be used, if not otherwise specified.""" if account_default_storage_quota_mb is not None: data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb # OPTIONAL - account[default_user_storage_quota_mb] """The default user storage quota to be used, if not otherwise specified.""" if account_default_user_storage_quota_mb is not None: data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb # OPTIONAL - account[default_group_storage_quota_mb] """The default group storage quota to be used, if not otherwise specified.""" if account_default_group_storage_quota_mb is not None: data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb self.logger.debug("POST /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, single_item=True)
[ "def", "create_new_sub_account", "(", "self", ",", "account_id", ",", "account_name", ",", "account_default_group_storage_quota_mb", "=", "None", ",", "account_default_storage_quota_mb", "=", "None", ",", "account_default_user_storage_quota_mb", "=", "None", ",", "account_sis_account_id", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"account_id\"", "]", "=", "account_id", "# REQUIRED - account[name]\r", "\"\"\"The name of the new sub-account.\"\"\"", "data", "[", "\"account[name]\"", "]", "=", "account_name", "# OPTIONAL - account[sis_account_id]\r", "\"\"\"The account's identifier in the Student Information System.\"\"\"", "if", "account_sis_account_id", "is", "not", "None", ":", "data", "[", "\"account[sis_account_id]\"", "]", "=", "account_sis_account_id", "# OPTIONAL - account[default_storage_quota_mb]\r", "\"\"\"The default course storage quota to be used, if not otherwise specified.\"\"\"", "if", "account_default_storage_quota_mb", "is", "not", "None", ":", "data", "[", "\"account[default_storage_quota_mb]\"", "]", "=", "account_default_storage_quota_mb", "# OPTIONAL - account[default_user_storage_quota_mb]\r", "\"\"\"The default user storage quota to be used, if not otherwise specified.\"\"\"", "if", "account_default_user_storage_quota_mb", "is", "not", "None", ":", "data", "[", "\"account[default_user_storage_quota_mb]\"", "]", "=", "account_default_user_storage_quota_mb", "# OPTIONAL - account[default_group_storage_quota_mb]\r", "\"\"\"The default group storage quota to be used, if not otherwise specified.\"\"\"", "if", "account_default_group_storage_quota_mb", "is", "not", "None", ":", "data", "[", "\"account[default_group_storage_quota_mb]\"", "]", "=", "account_default_group_storage_quota_mb", "self", ".", "logger", ".", "debug", "(", "\"POST /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"POST\"", ",", "\"/api/v1/accounts/{account_id}/sub_accounts\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
51.7
28.6
def get_all(): ''' Return all installed services CLI Example: .. code-block:: bash salt '*' service.get_all ''' ret = set() cmd = '/usr/bin/svcs -aH -o FMRI,STATE -s FMRI' lines = __salt__['cmd.run'](cmd).splitlines() for line in lines: comps = line.split() if not comps: continue ret.add(comps[0]) return sorted(ret)
[ "def", "get_all", "(", ")", ":", "ret", "=", "set", "(", ")", "cmd", "=", "'/usr/bin/svcs -aH -o FMRI,STATE -s FMRI'", "lines", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "comps", "=", "line", ".", "split", "(", ")", "if", "not", "comps", ":", "continue", "ret", ".", "add", "(", "comps", "[", "0", "]", ")", "return", "sorted", "(", "ret", ")" ]
20.315789
21.789474
def get_pull_request(project, num, auth=False): """get pull request info by number """ url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num) if auth: header = make_auth_header() else: header = None response = requests.get(url, headers=header) response.raise_for_status() return json.loads(response.text, object_hook=Obj)
[ "def", "get_pull_request", "(", "project", ",", "num", ",", "auth", "=", "False", ")", ":", "url", "=", "\"https://api.github.com/repos/{project}/pulls/{num}\"", ".", "format", "(", "project", "=", "project", ",", "num", "=", "num", ")", "if", "auth", ":", "header", "=", "make_auth_header", "(", ")", "else", ":", "header", "=", "None", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "header", ")", "response", ".", "raise_for_status", "(", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ",", "object_hook", "=", "Obj", ")" ]
36.090909
15.909091
def from_dir(cls, ID, datadir, parser, pattern='*.fcs', recursive=False, readdata_kwargs={}, readmeta_kwargs={}, **ID_kwargs): """ Create a Collection of measurements from data files contained in a directory. Parameters ---------- ID : hashable Collection ID datadir : str Path of directory containing the data files. pattern : str Only files matching the pattern will be used to create measurements. recursive : bool Recursively look for files matching pattern in subdirectories. {_bases_filename_parser} {_bases_ID_kwargs} """ datafiles = get_files(datadir, pattern, recursive) return cls.from_files(ID, datafiles, parser, readdata_kwargs=readdata_kwargs, readmeta_kwargs=readmeta_kwargs, **ID_kwargs)
[ "def", "from_dir", "(", "cls", ",", "ID", ",", "datadir", ",", "parser", ",", "pattern", "=", "'*.fcs'", ",", "recursive", "=", "False", ",", "readdata_kwargs", "=", "{", "}", ",", "readmeta_kwargs", "=", "{", "}", ",", "*", "*", "ID_kwargs", ")", ":", "datafiles", "=", "get_files", "(", "datadir", ",", "pattern", ",", "recursive", ")", "return", "cls", ".", "from_files", "(", "ID", ",", "datafiles", ",", "parser", ",", "readdata_kwargs", "=", "readdata_kwargs", ",", "readmeta_kwargs", "=", "readmeta_kwargs", ",", "*", "*", "ID_kwargs", ")" ]
41.454545
21.727273
def density_angle(rho0: Density, rho1: Density) -> bk.BKTensor: """The Fubini-Study angle between density matrices""" return fubini_study_angle(rho0.vec, rho1.vec)
[ "def", "density_angle", "(", "rho0", ":", "Density", ",", "rho1", ":", "Density", ")", "->", "bk", ".", "BKTensor", ":", "return", "fubini_study_angle", "(", "rho0", ".", "vec", ",", "rho1", ".", "vec", ")" ]
56.333333
10.666667
def write(self, data): """ write data on the OUT endpoint associated to the HID interface """ for _ in range(64 - len(data)): data.append(0) #logging.debug("send: %s", data) self.device.write(bytearray([0]) + data) return
[ "def", "write", "(", "self", ",", "data", ")", ":", "for", "_", "in", "range", "(", "64", "-", "len", "(", "data", ")", ")", ":", "data", ".", "append", "(", "0", ")", "#logging.debug(\"send: %s\", data)", "self", ".", "device", ".", "write", "(", "bytearray", "(", "[", "0", "]", ")", "+", "data", ")", "return" ]
31.222222
10.777778
def value_matrix(self): """Converted rows of tabular data. Returns: |list| or |tuple|: Table rows. """ if self.__value_matrix: return self.__value_matrix self.__value_matrix = [ [value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix ] return self.__value_matrix
[ "def", "value_matrix", "(", "self", ")", ":", "if", "self", ".", "__value_matrix", ":", "return", "self", ".", "__value_matrix", "self", ".", "__value_matrix", "=", "[", "[", "value_dp", ".", "data", "for", "value_dp", "in", "value_dp_list", "]", "for", "value_dp_list", "in", "self", ".", "value_dp_matrix", "]", "return", "self", ".", "__value_matrix" ]
25.066667
21.266667
def response_hook(self, response, **kwargs) -> HTMLResponse: """ Change response enconding and replace it by a HTMLResponse. """ if not response.encoding: response.encoding = DEFAULT_ENCODING return HTMLResponse._from_response(response, self)
[ "def", "response_hook", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", "->", "HTMLResponse", ":", "if", "not", "response", ".", "encoding", ":", "response", ".", "encoding", "=", "DEFAULT_ENCODING", "return", "HTMLResponse", ".", "_from_response", "(", "response", ",", "self", ")" ]
54.8
10.6
def path(self, filename=None, ext='tsv', digest=False, shard=False, encoding='utf-8'): """ Return the path for this class with a certain set of parameters. `ext` sets the extension of the file. If `hash` is true, the filename (w/o extenstion) will be hashed. If `shard` is true, the files are placed in shards, based on the first two chars of the filename (hashed). """ if self.BASE is NotImplemented: raise RuntimeError('BASE directory must be set.') params = dict(self.get_params()) if filename is None: parts = [] for name, param in self.get_params(): if not param.significant: continue if name == 'date' and is_closest_date_parameter(self, 'date'): parts.append('date-%s' % self.closest()) continue if hasattr(param, 'is_list') and param.is_list: es = '-'.join([str(v) for v in getattr(self, name)]) parts.append('%s-%s' % (name, es)) continue val = getattr(self, name) if isinstance(val, datetime.datetime): val = val.strftime('%Y-%m-%dT%H%M%S') elif isinstance(val, datetime.date): val = val.strftime('%Y-%m-%d') parts.append('%s-%s' % (name, val)) name = '-'.join(sorted(parts)) if len(name) == 0: name = 'output' if digest: name = hashlib.sha1(name.encode(encoding)).hexdigest() if not ext: filename = '{fn}'.format(ext=ext, fn=name) else: filename = '{fn}.{ext}'.format(ext=ext, fn=name) if shard: prefix = hashlib.sha1(filename.encode(encoding)).hexdigest()[:2] return os.path.join(self.BASE, self.TAG, self.task_family, prefix, filename) return os.path.join(self.BASE, self.TAG, self.task_family, filename)
[ "def", "path", "(", "self", ",", "filename", "=", "None", ",", "ext", "=", "'tsv'", ",", "digest", "=", "False", ",", "shard", "=", "False", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "self", ".", "BASE", "is", "NotImplemented", ":", "raise", "RuntimeError", "(", "'BASE directory must be set.'", ")", "params", "=", "dict", "(", "self", ".", "get_params", "(", ")", ")", "if", "filename", "is", "None", ":", "parts", "=", "[", "]", "for", "name", ",", "param", "in", "self", ".", "get_params", "(", ")", ":", "if", "not", "param", ".", "significant", ":", "continue", "if", "name", "==", "'date'", "and", "is_closest_date_parameter", "(", "self", ",", "'date'", ")", ":", "parts", ".", "append", "(", "'date-%s'", "%", "self", ".", "closest", "(", ")", ")", "continue", "if", "hasattr", "(", "param", ",", "'is_list'", ")", "and", "param", ".", "is_list", ":", "es", "=", "'-'", ".", "join", "(", "[", "str", "(", "v", ")", "for", "v", "in", "getattr", "(", "self", ",", "name", ")", "]", ")", "parts", ".", "append", "(", "'%s-%s'", "%", "(", "name", ",", "es", ")", ")", "continue", "val", "=", "getattr", "(", "self", ",", "name", ")", "if", "isinstance", "(", "val", ",", "datetime", ".", "datetime", ")", ":", "val", "=", "val", ".", "strftime", "(", "'%Y-%m-%dT%H%M%S'", ")", "elif", "isinstance", "(", "val", ",", "datetime", ".", "date", ")", ":", "val", "=", "val", ".", "strftime", "(", "'%Y-%m-%d'", ")", "parts", ".", "append", "(", "'%s-%s'", "%", "(", "name", ",", "val", ")", ")", "name", "=", "'-'", ".", "join", "(", "sorted", "(", "parts", ")", ")", "if", "len", "(", "name", ")", "==", "0", ":", "name", "=", "'output'", "if", "digest", ":", "name", "=", "hashlib", ".", "sha1", "(", "name", ".", "encode", "(", "encoding", ")", ")", ".", "hexdigest", "(", ")", "if", "not", "ext", ":", "filename", "=", "'{fn}'", ".", "format", "(", "ext", "=", "ext", ",", "fn", "=", "name", ")", "else", ":", "filename", "=", "'{fn}.{ext}'", ".", "format", "(", "ext", "=", "ext", ",", "fn", "=", "name", ")", "if", "shard", ":", "prefix", "=", "hashlib", ".", "sha1", "(", "filename", ".", "encode", "(", "encoding", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "2", "]", "return", "os", ".", "path", ".", "join", "(", "self", ".", "BASE", ",", "self", ".", "TAG", ",", "self", ".", "task_family", ",", "prefix", ",", "filename", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "BASE", ",", "self", ".", "TAG", ",", "self", ".", "task_family", ",", "filename", ")" ]
41.44
20.64
def protein_map_from_twg(twg): """Build map of entity texts to validate protein grounding. Looks at the grounding of the entity texts extracted from the statements and finds proteins where there is grounding to a human protein that maps to an HGNC name that is an exact match to the entity text. Returns a dict that can be used to update/expand the grounding map. Parameters ---------- twg : list of tuple list of tuples of the form output by agent_texts_with_grounding Returns ------- protein_map : dict dict keyed on agent text with associated values {'TEXT': agent_text, 'UP': uniprot_id}. Entries are for agent texts where the grounding map was able to find human protein grounded to this agent_text in Uniprot. """ protein_map = {} unmatched = 0 matched = 0 logger.info('Building grounding map for human proteins') for agent_text, grounding_list, _ in twg: # If 'UP' (Uniprot) not one of the grounding entries for this text, # then we skip it. if 'UP' not in [entry[0] for entry in grounding_list]: continue # Otherwise, collect all the Uniprot IDs for this protein. uniprot_ids = [entry[1] for entry in grounding_list if entry[0] == 'UP'] # For each Uniprot ID, look up the species for uniprot_id in uniprot_ids: # If it's not a human protein, skip it mnemonic = uniprot_client.get_mnemonic(uniprot_id) if mnemonic is None or not mnemonic.endswith('_HUMAN'): continue # Otherwise, look up the gene name in HGNC and match against the # agent text gene_name = uniprot_client.get_gene_name(uniprot_id) if gene_name is None: unmatched += 1 continue if agent_text.upper() == gene_name.upper(): matched += 1 protein_map[agent_text] = {'TEXT': agent_text, 'UP': uniprot_id} else: unmatched += 1 logger.info('Exact matches for %d proteins' % matched) logger.info('No match (or no gene name) for %d proteins' % unmatched) return protein_map
[ "def", "protein_map_from_twg", "(", "twg", ")", ":", "protein_map", "=", "{", "}", "unmatched", "=", "0", "matched", "=", "0", "logger", ".", "info", "(", "'Building grounding map for human proteins'", ")", "for", "agent_text", ",", "grounding_list", ",", "_", "in", "twg", ":", "# If 'UP' (Uniprot) not one of the grounding entries for this text,", "# then we skip it.", "if", "'UP'", "not", "in", "[", "entry", "[", "0", "]", "for", "entry", "in", "grounding_list", "]", ":", "continue", "# Otherwise, collect all the Uniprot IDs for this protein.", "uniprot_ids", "=", "[", "entry", "[", "1", "]", "for", "entry", "in", "grounding_list", "if", "entry", "[", "0", "]", "==", "'UP'", "]", "# For each Uniprot ID, look up the species", "for", "uniprot_id", "in", "uniprot_ids", ":", "# If it's not a human protein, skip it", "mnemonic", "=", "uniprot_client", ".", "get_mnemonic", "(", "uniprot_id", ")", "if", "mnemonic", "is", "None", "or", "not", "mnemonic", ".", "endswith", "(", "'_HUMAN'", ")", ":", "continue", "# Otherwise, look up the gene name in HGNC and match against the", "# agent text", "gene_name", "=", "uniprot_client", ".", "get_gene_name", "(", "uniprot_id", ")", "if", "gene_name", "is", "None", ":", "unmatched", "+=", "1", "continue", "if", "agent_text", ".", "upper", "(", ")", "==", "gene_name", ".", "upper", "(", ")", ":", "matched", "+=", "1", "protein_map", "[", "agent_text", "]", "=", "{", "'TEXT'", ":", "agent_text", ",", "'UP'", ":", "uniprot_id", "}", "else", ":", "unmatched", "+=", "1", "logger", ".", "info", "(", "'Exact matches for %d proteins'", "%", "matched", ")", "logger", ".", "info", "(", "'No match (or no gene name) for %d proteins'", "%", "unmatched", ")", "return", "protein_map" ]
40.836364
21.072727
def lsl(hdfs_path, user=None, recursive=False): """ Return a list of dictionaries of file properties. If ``hdfs_path`` is a file, there is only one item corresponding to the file itself; if it is a directory and ``recursive`` is :obj:`False`, each list item corresponds to a file or directory contained by it; if it is a directory and ``recursive`` is :obj:`True`, the list contains one item for every file or directory in the tree rooted at ``hdfs_path``. """ host, port, path_ = path.split(hdfs_path, user) fs = hdfs(host, port, user) if not recursive: dir_list = fs.list_directory(path_) else: treewalk = fs.walk(path_) top = next(treewalk) if top['kind'] == 'directory': dir_list = list(treewalk) else: dir_list = [top] fs.close() return dir_list
[ "def", "lsl", "(", "hdfs_path", ",", "user", "=", "None", ",", "recursive", "=", "False", ")", ":", "host", ",", "port", ",", "path_", "=", "path", ".", "split", "(", "hdfs_path", ",", "user", ")", "fs", "=", "hdfs", "(", "host", ",", "port", ",", "user", ")", "if", "not", "recursive", ":", "dir_list", "=", "fs", ".", "list_directory", "(", "path_", ")", "else", ":", "treewalk", "=", "fs", ".", "walk", "(", "path_", ")", "top", "=", "next", "(", "treewalk", ")", "if", "top", "[", "'kind'", "]", "==", "'directory'", ":", "dir_list", "=", "list", "(", "treewalk", ")", "else", ":", "dir_list", "=", "[", "top", "]", "fs", ".", "close", "(", ")", "return", "dir_list" ]
35.5
15.666667
def cluster_info(ipyclient, spacer=""): """ reports host and engine info for an ipyclient """ ## get engine data, skips busy engines. hosts = [] for eid in ipyclient.ids: engine = ipyclient[eid] if not engine.outstanding: hosts.append(engine.apply(_socket.gethostname)) ## report it hosts = [i.get() for i in hosts] result = [] for hostname in set(hosts): result.append("{}host compute node: [{} cores] on {}"\ .format(spacer, hosts.count(hostname), hostname)) print "\n".join(result)
[ "def", "cluster_info", "(", "ipyclient", ",", "spacer", "=", "\"\"", ")", ":", "## get engine data, skips busy engines. ", "hosts", "=", "[", "]", "for", "eid", "in", "ipyclient", ".", "ids", ":", "engine", "=", "ipyclient", "[", "eid", "]", "if", "not", "engine", ".", "outstanding", ":", "hosts", ".", "append", "(", "engine", ".", "apply", "(", "_socket", ".", "gethostname", ")", ")", "## report it", "hosts", "=", "[", "i", ".", "get", "(", ")", "for", "i", "in", "hosts", "]", "result", "=", "[", "]", "for", "hostname", "in", "set", "(", "hosts", ")", ":", "result", ".", "append", "(", "\"{}host compute node: [{} cores] on {}\"", ".", "format", "(", "spacer", ",", "hosts", ".", "count", "(", "hostname", ")", ",", "hostname", ")", ")", "print", "\"\\n\"", ".", "join", "(", "result", ")" ]
35.125
14.8125
def add_logging_level(name, value, method_name=None): ''' Comprehensively adds a new logging level to the ``logging`` module and the currently configured logging class. Derived from: https://stackoverflow.com/a/35804945/450917 ''' if not method_name: method_name = name.lower() # set levels logging.addLevelName(value, name) setattr(logging, name, value) level_map[name.lower()] = value if value == getattr(logging, 'EXCEPT', None): # needs traceback added def logForLevel(self, message='', *args, **kwargs): if self.isEnabledFor(value): message = (message + ' ▾\n').lstrip() + traceback.format_exc() self._log(value, message, args, **kwargs) else: def logForLevel(self, message, *args, **kwargs): if self.isEnabledFor(value): self._log(value, message, args, **kwargs) def logToRoot(message, *args, **kwargs): # may not need logging.log(value, message, *args, **kwargs) # set functions setattr(logging.getLoggerClass(), method_name, logForLevel) setattr(logging, method_name, logToRoot)
[ "def", "add_logging_level", "(", "name", ",", "value", ",", "method_name", "=", "None", ")", ":", "if", "not", "method_name", ":", "method_name", "=", "name", ".", "lower", "(", ")", "# set levels", "logging", ".", "addLevelName", "(", "value", ",", "name", ")", "setattr", "(", "logging", ",", "name", ",", "value", ")", "level_map", "[", "name", ".", "lower", "(", ")", "]", "=", "value", "if", "value", "==", "getattr", "(", "logging", ",", "'EXCEPT'", ",", "None", ")", ":", "# needs traceback added", "def", "logForLevel", "(", "self", ",", "message", "=", "''", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "isEnabledFor", "(", "value", ")", ":", "message", "=", "(", "message", "+", "' ▾\\n').", "l", "s", "trip()", " ", "+", "t", "aceback.f", "o", "rmat_exc()", "", "", "self", ".", "_log", "(", "value", ",", "message", ",", "args", ",", "*", "*", "kwargs", ")", "else", ":", "def", "logForLevel", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "isEnabledFor", "(", "value", ")", ":", "self", ".", "_log", "(", "value", ",", "message", ",", "args", ",", "*", "*", "kwargs", ")", "def", "logToRoot", "(", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# may not need", "logging", ".", "log", "(", "value", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# set functions", "setattr", "(", "logging", ".", "getLoggerClass", "(", ")", ",", "method_name", ",", "logForLevel", ")", "setattr", "(", "logging", ",", "method_name", ",", "logToRoot", ")" ]
37.866667
21
def handle_default_options(options): """ Pass in a Values instance from OptionParser. Handle settings and pythonpath options - Values from OptionParser """ if options.settings: #Set the percept_settings_module (picked up by settings in conf.base) os.environ['PERCEPT_SETTINGS_MODULE'] = options.settings if options.pythonpath: #Append the pythonpath and the directory one up from the pythonpath to sys.path for importing options.pythonpath = os.path.abspath(os.path.expanduser(options.pythonpath)) up_one_path = os.path.abspath(os.path.join(options.pythonpath, "..")) sys.path.append(options.pythonpath) sys.path.append(up_one_path) return options
[ "def", "handle_default_options", "(", "options", ")", ":", "if", "options", ".", "settings", ":", "#Set the percept_settings_module (picked up by settings in conf.base)", "os", ".", "environ", "[", "'PERCEPT_SETTINGS_MODULE'", "]", "=", "options", ".", "settings", "if", "options", ".", "pythonpath", ":", "#Append the pythonpath and the directory one up from the pythonpath to sys.path for importing", "options", ".", "pythonpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "options", ".", "pythonpath", ")", ")", "up_one_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "options", ".", "pythonpath", ",", "\"..\"", ")", ")", "sys", ".", "path", ".", "append", "(", "options", ".", "pythonpath", ")", "sys", ".", "path", ".", "append", "(", "up_one_path", ")", "return", "options" ]
44.875
21.75
def setup_contracts_or_exit( config: Dict[str, Any], network_id: int, ) -> Dict[str, Any]: """Sets the contract deployment data depending on the network id and environment type If an invalid combination of network id and environment type is provided, exits the program with an error """ environment_type = config['environment_type'] not_allowed = ( # for now we only disallow mainnet with test configuration network_id == 1 and environment_type == Environment.DEVELOPMENT ) if not_allowed: click.secho( f'The chosen network ({ID_TO_NETWORKNAME[network_id]}) is not a testnet, ' f'but the "development" environment was selected.\n' f'This is not allowed. Please start again with a safe environment setting ' f'(--environment production).', fg='red', ) sys.exit(1) contracts = dict() contracts_version = environment_type_to_contracts_version(environment_type) config['contracts_path'] = contracts_precompiled_path(contracts_version) if network_id in ID_TO_NETWORKNAME and ID_TO_NETWORKNAME[network_id] != 'smoketest': try: deployment_data = get_contracts_deployment_info( chain_id=network_id, version=contracts_version, ) except ValueError: return contracts, False contracts = deployment_data['contracts'] return contracts
[ "def", "setup_contracts_or_exit", "(", "config", ":", "Dict", "[", "str", ",", "Any", "]", ",", "network_id", ":", "int", ",", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "environment_type", "=", "config", "[", "'environment_type'", "]", "not_allowed", "=", "(", "# for now we only disallow mainnet with test configuration", "network_id", "==", "1", "and", "environment_type", "==", "Environment", ".", "DEVELOPMENT", ")", "if", "not_allowed", ":", "click", ".", "secho", "(", "f'The chosen network ({ID_TO_NETWORKNAME[network_id]}) is not a testnet, '", "f'but the \"development\" environment was selected.\\n'", "f'This is not allowed. Please start again with a safe environment setting '", "f'(--environment production).'", ",", "fg", "=", "'red'", ",", ")", "sys", ".", "exit", "(", "1", ")", "contracts", "=", "dict", "(", ")", "contracts_version", "=", "environment_type_to_contracts_version", "(", "environment_type", ")", "config", "[", "'contracts_path'", "]", "=", "contracts_precompiled_path", "(", "contracts_version", ")", "if", "network_id", "in", "ID_TO_NETWORKNAME", "and", "ID_TO_NETWORKNAME", "[", "network_id", "]", "!=", "'smoketest'", ":", "try", ":", "deployment_data", "=", "get_contracts_deployment_info", "(", "chain_id", "=", "network_id", ",", "version", "=", "contracts_version", ",", ")", "except", "ValueError", ":", "return", "contracts", ",", "False", "contracts", "=", "deployment_data", "[", "'contracts'", "]", "return", "contracts" ]
34.452381
23.785714
def iter_all_children(obj, skipContainers=False): """ Returns an iterator over all childen and nested children using obj's get_children() method if skipContainers is true, only childless objects are returned. """ if hasattr(obj, 'get_children') and len(obj.get_children()) > 0: for child in obj.get_children(): if not skipContainers: yield child # could use `yield from` in python 3... for grandchild in iter_all_children(child, skipContainers): yield grandchild else: yield obj
[ "def", "iter_all_children", "(", "obj", ",", "skipContainers", "=", "False", ")", ":", "if", "hasattr", "(", "obj", ",", "'get_children'", ")", "and", "len", "(", "obj", ".", "get_children", "(", ")", ")", ">", "0", ":", "for", "child", "in", "obj", ".", "get_children", "(", ")", ":", "if", "not", "skipContainers", ":", "yield", "child", "# could use `yield from` in python 3...", "for", "grandchild", "in", "iter_all_children", "(", "child", ",", "skipContainers", ")", ":", "yield", "grandchild", "else", ":", "yield", "obj" ]
36
16.375
def search(self, index, query): """ Begin a map/reduce operation using a Search. This command will return an error unless executed against a Riak Search cluster. :param index: The Solr index used in the search :type index: string :param query: The search query :type query: string :rtype: :class:`RiakMapReduce` """ self._input_mode = 'query' self._inputs = {'bucket': index, 'index': index, 'query': query} return self
[ "def", "search", "(", "self", ",", "index", ",", "query", ")", ":", "self", ".", "_input_mode", "=", "'query'", "self", ".", "_inputs", "=", "{", "'bucket'", ":", "index", ",", "'index'", ":", "index", ",", "'query'", ":", "query", "}", "return", "self" ]
34.3125
11.4375
def parse(self, xml_data): """ Parse XML data """ # parse tree try: root = ET.fromstring(xml_data) except StdlibParseError as e: raise ParseError(str(e)) self.origin = root.attrib['origin'] for child in root: component = Component() component.parse(child) self.components[component.id] = component
[ "def", "parse", "(", "self", ",", "xml_data", ")", ":", "# parse tree", "try", ":", "root", "=", "ET", ".", "fromstring", "(", "xml_data", ")", "except", "StdlibParseError", "as", "e", ":", "raise", "ParseError", "(", "str", "(", "e", ")", ")", "self", ".", "origin", "=", "root", ".", "attrib", "[", "'origin'", "]", "for", "child", "in", "root", ":", "component", "=", "Component", "(", ")", "component", ".", "parse", "(", "child", ")", "self", ".", "components", "[", "component", ".", "id", "]", "=", "component" ]
26.266667
15.466667
def _getSortedString(o): """ Returns a string describing o, sorting the contents (case-insensitive on keys) if o is a dict. """ # todo: replace this with something like pprint on Python upgrade # We assume here that any container type is either list or tuple which may not always hold if isinstance(o, (dict)): pkeys = sorted(o.keys(), key=_lowercaseToStr) l = [] for k in pkeys: l.append(str(k) + ":" + _getSortedString(o[k])) return "{" + join(l, ",") + "}" else: return str(o)
[ "def", "_getSortedString", "(", "o", ")", ":", "# todo: replace this with something like pprint on Python upgrade", "# We assume here that any container type is either list or tuple which may not always hold", "if", "isinstance", "(", "o", ",", "(", "dict", ")", ")", ":", "pkeys", "=", "sorted", "(", "o", ".", "keys", "(", ")", ",", "key", "=", "_lowercaseToStr", ")", "l", "=", "[", "]", "for", "k", "in", "pkeys", ":", "l", ".", "append", "(", "str", "(", "k", ")", "+", "\":\"", "+", "_getSortedString", "(", "o", "[", "k", "]", ")", ")", "return", "\"{\"", "+", "join", "(", "l", ",", "\",\"", ")", "+", "\"}\"", "else", ":", "return", "str", "(", "o", ")" ]
34.714286
21.428571
def mkdir(self, directory, exists_okay=False): """Create the specified directory. Note this cannot create a recursive hierarchy of directories, instead each one should be created separately. """ # Execute os.mkdir command on the board. command = """ try: import os except ImportError: import uos as os os.mkdir('{0}') """.format( directory ) self._pyboard.enter_raw_repl() try: out = self._pyboard.exec_(textwrap.dedent(command)) except PyboardError as ex: # Check if this is an OSError #17, i.e. directory already exists. if ex.args[2].decode("utf-8").find("OSError: [Errno 17] EEXIST") != -1: if not exists_okay: raise DirectoryExistsError( "Directory already exists: {0}".format(directory) ) else: raise ex self._pyboard.exit_raw_repl()
[ "def", "mkdir", "(", "self", ",", "directory", ",", "exists_okay", "=", "False", ")", ":", "# Execute os.mkdir command on the board.", "command", "=", "\"\"\"\n try:\n import os\n except ImportError:\n import uos as os\n os.mkdir('{0}')\n \"\"\"", ".", "format", "(", "directory", ")", "self", ".", "_pyboard", ".", "enter_raw_repl", "(", ")", "try", ":", "out", "=", "self", ".", "_pyboard", ".", "exec_", "(", "textwrap", ".", "dedent", "(", "command", ")", ")", "except", "PyboardError", "as", "ex", ":", "# Check if this is an OSError #17, i.e. directory already exists.", "if", "ex", ".", "args", "[", "2", "]", ".", "decode", "(", "\"utf-8\"", ")", ".", "find", "(", "\"OSError: [Errno 17] EEXIST\"", ")", "!=", "-", "1", ":", "if", "not", "exists_okay", ":", "raise", "DirectoryExistsError", "(", "\"Directory already exists: {0}\"", ".", "format", "(", "directory", ")", ")", "else", ":", "raise", "ex", "self", ".", "_pyboard", ".", "exit_raw_repl", "(", ")" ]
38
15.481481
async def dump_tuple(self, elem, elem_type, params=None): """ Dumps tuple of elements to the writer. :param elem: :param elem_type: :param params: :return: """ if len(elem) != len(elem_type.f_specs()): raise ValueError('Fixed size tuple has not defined size: %s' % len(elem_type.f_specs())) elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() for idx, elem in enumerate(elem): try: self.tracker.push_index(idx) await self._dump_field(elem, elem_fields[idx], params[1:] if params else None) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e
[ "async", "def", "dump_tuple", "(", "self", ",", "elem", ",", "elem_type", ",", "params", "=", "None", ")", ":", "if", "len", "(", "elem", ")", "!=", "len", "(", "elem_type", ".", "f_specs", "(", ")", ")", ":", "raise", "ValueError", "(", "'Fixed size tuple has not defined size: %s'", "%", "len", "(", "elem_type", ".", "f_specs", "(", ")", ")", ")", "elem_fields", "=", "params", "[", "0", "]", "if", "params", "else", "None", "if", "elem_fields", "is", "None", ":", "elem_fields", "=", "elem_type", ".", "f_specs", "(", ")", "for", "idx", ",", "elem", "in", "enumerate", "(", "elem", ")", ":", "try", ":", "self", ".", "tracker", ".", "push_index", "(", "idx", ")", "await", "self", ".", "_dump_field", "(", "elem", ",", "elem_fields", "[", "idx", "]", ",", "params", "[", "1", ":", "]", "if", "params", "else", "None", ")", "self", ".", "tracker", ".", "pop", "(", ")", "except", "Exception", "as", "e", ":", "raise", "helpers", ".", "ArchiveException", "(", "e", ",", "tracker", "=", "self", ".", "tracker", ")", "from", "e" ]
35.869565
19.434783
def AddDigitalShortIdRecord(site_service, tag, time_value, value, status_string="OK ", warn=False, chattering=False, unreliable=False, manual=False): """ This function will add a digital value to the specified eDNA service and tag, including all default point status definitions. :param site_service: The site.service where data will be pushed :param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01) :param time_value: The time of the point, which MUST be in UTC Epoch format. For example, "1483926416" not "2016/01/01 01:01:01". :param value: should be either TRUE or FALSE :param status_string: a string that must be EXACTLY 16 characters :param warn: TRUE if the point is in a warning state :param chattering: TRUE if the point is in a chattering state :param unreliable: TRUE if the point is in an unreliable state :param manual: TRUE if the point was manually set :return: 0, if the data push is successful """ # Define all required variables in the correct ctypes format szService = c_char_p(site_service.encode('utf-8')) szPointId = c_char_p(tag.encode('utf-8')) tTime = c_long(int(time_value)) # TODO- check if the string is exactly 16 characters and convert szStatus = create_string_buffer(status_string.encode('utf-8')) bSet = c_int(int(value)) bDigitalWarning = c_int(int(warn)) bDigitalChattering = c_int(int(chattering)) bUnreliable = c_int(int(unreliable)) bManual = c_int(int(manual)) # Try to push the data. Function will return 0 if successful. nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId, tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering, bUnreliable, bManual) return nRet
[ "def", "AddDigitalShortIdRecord", "(", "site_service", ",", "tag", ",", "time_value", ",", "value", ",", "status_string", "=", "\"OK \"", ",", "warn", "=", "False", ",", "chattering", "=", "False", ",", "unreliable", "=", "False", ",", "manual", "=", "False", ")", ":", "# Define all required variables in the correct ctypes format", "szService", "=", "c_char_p", "(", "site_service", ".", "encode", "(", "'utf-8'", ")", ")", "szPointId", "=", "c_char_p", "(", "tag", ".", "encode", "(", "'utf-8'", ")", ")", "tTime", "=", "c_long", "(", "int", "(", "time_value", ")", ")", "# TODO- check if the string is exactly 16 characters and convert", "szStatus", "=", "create_string_buffer", "(", "status_string", ".", "encode", "(", "'utf-8'", ")", ")", "bSet", "=", "c_int", "(", "int", "(", "value", ")", ")", "bDigitalWarning", "=", "c_int", "(", "int", "(", "warn", ")", ")", "bDigitalChattering", "=", "c_int", "(", "int", "(", "chattering", ")", ")", "bUnreliable", "=", "c_int", "(", "int", "(", "unreliable", ")", ")", "bManual", "=", "c_int", "(", "int", "(", "manual", ")", ")", "# Try to push the data. Function will return 0 if successful.", "nRet", "=", "dnaserv_dll", ".", "DnaAddDigitalShortIdRecord", "(", "szService", ",", "szPointId", ",", "tTime", ",", "bSet", ",", "szStatus", ",", "bDigitalWarning", ",", "bDigitalChattering", ",", "bUnreliable", ",", "bManual", ")", "return", "nRet" ]
49.416667
18.972222
def delete(self, instance): '''Delete an instance''' flushdb(self.client) if flushdb else self.client.flushdb()
[ "def", "delete", "(", "self", ",", "instance", ")", ":", "flushdb", "(", "self", ".", "client", ")", "if", "flushdb", "else", "self", ".", "client", ".", "flushdb", "(", ")" ]
42.333333
15
def dice(edge=15, fn=32): """ dice """ edge = float(edge) # dice c = ops.Cube(edge, center=True) s = ops.Sphere(edge * 3 / 4, center=True) dice = c & s # points c = ops.Circle(edge / 12, _fn=fn) h = 0.7 point = c.linear_extrude(height=h) point1 = point.translate([0, 0, edge / 2 - h / 2]) point2_1 = point1.rotate(a=90, v=[1, 0, 0]).translate([edge / 6, 0, edge / 6]) point2_2 = point2_1.mirror([-edge / 6, 0, -edge / 6]) point2 = point2_1 + point2_2 point3 = point2.rotate(a=90, v=[0, 0, 1]) + point1.rotate(a=90, v=[0, 1, 0]) point4_12 = point2.rotate(a=-90, v=[0, 0, 1]) point4 = point4_12 + point4_12.mirror([0, 1, 0]) point5_123 = point3.rotate(a=90, v=[0, 0, 1]) point5 = point5_123 + point5_123.mirror([1, 0, 0]) point6_1 = point.translate([0, 0, -(edge / 2 + h / 2)]).translate([0, edge / 6, 0]) point6_2 = point6_1.translate([edge / 4, 0, 0]) point6_3 = point6_1.translate([-edge / 4, 0, 0]) point6_123 = point6_1 + point6_2 + point6_3 point6_456 = point6_123.mirror([0, 1, 0]) point6 = point6_123 + point6_456 dice_with_holes = dice - point1 - point2 - point3 - point4 - point5 - point6 dice_with_holes = dice_with_holes.mirror([0, 0, 1]) return(dice_with_holes)
[ "def", "dice", "(", "edge", "=", "15", ",", "fn", "=", "32", ")", ":", "edge", "=", "float", "(", "edge", ")", "# dice", "c", "=", "ops", ".", "Cube", "(", "edge", ",", "center", "=", "True", ")", "s", "=", "ops", ".", "Sphere", "(", "edge", "*", "3", "/", "4", ",", "center", "=", "True", ")", "dice", "=", "c", "&", "s", "# points", "c", "=", "ops", ".", "Circle", "(", "edge", "/", "12", ",", "_fn", "=", "fn", ")", "h", "=", "0.7", "point", "=", "c", ".", "linear_extrude", "(", "height", "=", "h", ")", "point1", "=", "point", ".", "translate", "(", "[", "0", ",", "0", ",", "edge", "/", "2", "-", "h", "/", "2", "]", ")", "point2_1", "=", "point1", ".", "rotate", "(", "a", "=", "90", ",", "v", "=", "[", "1", ",", "0", ",", "0", "]", ")", ".", "translate", "(", "[", "edge", "/", "6", ",", "0", ",", "edge", "/", "6", "]", ")", "point2_2", "=", "point2_1", ".", "mirror", "(", "[", "-", "edge", "/", "6", ",", "0", ",", "-", "edge", "/", "6", "]", ")", "point2", "=", "point2_1", "+", "point2_2", "point3", "=", "point2", ".", "rotate", "(", "a", "=", "90", ",", "v", "=", "[", "0", ",", "0", ",", "1", "]", ")", "+", "point1", ".", "rotate", "(", "a", "=", "90", ",", "v", "=", "[", "0", ",", "1", ",", "0", "]", ")", "point4_12", "=", "point2", ".", "rotate", "(", "a", "=", "-", "90", ",", "v", "=", "[", "0", ",", "0", ",", "1", "]", ")", "point4", "=", "point4_12", "+", "point4_12", ".", "mirror", "(", "[", "0", ",", "1", ",", "0", "]", ")", "point5_123", "=", "point3", ".", "rotate", "(", "a", "=", "90", ",", "v", "=", "[", "0", ",", "0", ",", "1", "]", ")", "point5", "=", "point5_123", "+", "point5_123", ".", "mirror", "(", "[", "1", ",", "0", ",", "0", "]", ")", "point6_1", "=", "point", ".", "translate", "(", "[", "0", ",", "0", ",", "-", "(", "edge", "/", "2", "+", "h", "/", "2", ")", "]", ")", ".", "translate", "(", "[", "0", ",", "edge", "/", "6", ",", "0", "]", ")", "point6_2", "=", "point6_1", ".", "translate", "(", "[", "edge", "/", "4", ",", "0", ",", "0", "]", ")", "point6_3", "=", "point6_1", ".", "translate", "(", "[", "-", "edge", "/", "4", ",", "0", ",", "0", "]", ")", "point6_123", "=", "point6_1", "+", "point6_2", "+", "point6_3", "point6_456", "=", "point6_123", ".", "mirror", "(", "[", "0", ",", "1", ",", "0", "]", ")", "point6", "=", "point6_123", "+", "point6_456", "dice_with_holes", "=", "dice", "-", "point1", "-", "point2", "-", "point3", "-", "point4", "-", "point5", "-", "point6", "dice_with_holes", "=", "dice_with_holes", ".", "mirror", "(", "[", "0", ",", "0", ",", "1", "]", ")", "return", "(", "dice_with_holes", ")" ]
45.833333
16.8
def group(self, index, chunked=False): """ Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black. """ if index < 0 or index > len(self.pattern.groups): raise IndexError("no such group") if index > 0 and index <= len(self.pattern.groups): g = self.pattern.groups[index-1] if index == 0: g = self.pattern.sequence if chunked is True: return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g])) return Group(self, [w for w in self.words if self.constraint(w) in g])
[ "def", "group", "(", "self", ",", "index", ",", "chunked", "=", "False", ")", ":", "if", "index", "<", "0", "or", "index", ">", "len", "(", "self", ".", "pattern", ".", "groups", ")", ":", "raise", "IndexError", "(", "\"no such group\"", ")", "if", "index", ">", "0", "and", "index", "<=", "len", "(", "self", ".", "pattern", ".", "groups", ")", ":", "g", "=", "self", ".", "pattern", ".", "groups", "[", "index", "-", "1", "]", "if", "index", "==", "0", ":", "g", "=", "self", ".", "pattern", ".", "sequence", "if", "chunked", "is", "True", ":", "return", "Group", "(", "self", ",", "self", ".", "constituents", "(", "constraint", "=", "[", "self", ".", "pattern", ".", "sequence", ".", "index", "(", "x", ")", "for", "x", "in", "g", "]", ")", ")", "return", "Group", "(", "self", ",", "[", "w", "for", "w", "in", "self", ".", "words", "if", "self", ".", "constraint", "(", "w", ")", "in", "g", "]", ")" ]
57.066667
21.8
def get_monitor_ping(request): """MNCore.ping() → Boolean.""" response = d1_gmn.app.views.util.http_response_with_boolean_true_type() d1_gmn.app.views.headers.add_http_date_header(response) return response
[ "def", "get_monitor_ping", "(", "request", ")", ":", "response", "=", "d1_gmn", ".", "app", ".", "views", ".", "util", ".", "http_response_with_boolean_true_type", "(", ")", "d1_gmn", ".", "app", ".", "views", ".", "headers", ".", "add_http_date_header", "(", "response", ")", "return", "response" ]
43.4
17
def _subclass_must_implement(self, fn): """ Returns a NotImplementedError for a function that should be implemented. :param fn: name of the function """ m = "Missing function implementation in {}: {}".format(type(self), fn) return NotImplementedError(m)
[ "def", "_subclass_must_implement", "(", "self", ",", "fn", ")", ":", "m", "=", "\"Missing function implementation in {}: {}\"", ".", "format", "(", "type", "(", "self", ")", ",", "fn", ")", "return", "NotImplementedError", "(", "m", ")" ]
42.142857
11.857143
def refreshCompositeOf(self, single_keywords, composite_keywords, store=None, namespace=None): """Re-check sub-parts of this keyword. This should be called after the whole RDF was processed, because it is using a cache of single keywords and if that one is incomplete, you will not identify all parts. """ def _get_ckw_components(new_vals, label): if label in single_keywords: new_vals.append(single_keywords[label]) elif ('Composite.%s' % label) in composite_keywords: for l in composite_keywords['Composite.{0}'.format(label)].compositeof: # noqa _get_ckw_components(new_vals, l) elif label in composite_keywords: for l in composite_keywords[label].compositeof: _get_ckw_components(new_vals, l) else: # One single or composite keyword is missing from the taxonomy. # This is due to an error in the taxonomy description. message = "The composite term \"%s\""\ " should be made of single keywords,"\ " but at least one is missing." % self.id if store is not None: message += "Needed components: %s"\ % list(store.objects(self.id, namespace["compositeOf"])) message += " Missing is: %s" % label raise TaxonomyError(message) if self.compositeof: new_vals = [] try: for label in self.compositeof: _get_ckw_components(new_vals, label) self.compositeof = new_vals except TaxonomyError as err: # the composites will be empty # (better than to have confusing, partial matches) self.compositeof = [] current_app.logger.error(err)
[ "def", "refreshCompositeOf", "(", "self", ",", "single_keywords", ",", "composite_keywords", ",", "store", "=", "None", ",", "namespace", "=", "None", ")", ":", "def", "_get_ckw_components", "(", "new_vals", ",", "label", ")", ":", "if", "label", "in", "single_keywords", ":", "new_vals", ".", "append", "(", "single_keywords", "[", "label", "]", ")", "elif", "(", "'Composite.%s'", "%", "label", ")", "in", "composite_keywords", ":", "for", "l", "in", "composite_keywords", "[", "'Composite.{0}'", ".", "format", "(", "label", ")", "]", ".", "compositeof", ":", "# noqa", "_get_ckw_components", "(", "new_vals", ",", "l", ")", "elif", "label", "in", "composite_keywords", ":", "for", "l", "in", "composite_keywords", "[", "label", "]", ".", "compositeof", ":", "_get_ckw_components", "(", "new_vals", ",", "l", ")", "else", ":", "# One single or composite keyword is missing from the taxonomy.", "# This is due to an error in the taxonomy description.", "message", "=", "\"The composite term \\\"%s\\\"\"", "\" should be made of single keywords,\"", "\" but at least one is missing.\"", "%", "self", ".", "id", "if", "store", "is", "not", "None", ":", "message", "+=", "\"Needed components: %s\"", "%", "list", "(", "store", ".", "objects", "(", "self", ".", "id", ",", "namespace", "[", "\"compositeOf\"", "]", ")", ")", "message", "+=", "\" Missing is: %s\"", "%", "label", "raise", "TaxonomyError", "(", "message", ")", "if", "self", ".", "compositeof", ":", "new_vals", "=", "[", "]", "try", ":", "for", "label", "in", "self", ".", "compositeof", ":", "_get_ckw_components", "(", "new_vals", ",", "label", ")", "self", ".", "compositeof", "=", "new_vals", "except", "TaxonomyError", "as", "err", ":", "# the composites will be empty", "# (better than to have confusing, partial matches)", "self", ".", "compositeof", "=", "[", "]", "current_app", ".", "logger", ".", "error", "(", "err", ")" ]
48.926829
17.292683
def get_diff_str(self, element, length): '''get_diff_str High-level api: Produce a string that indicates the difference between two models. Parameters ---------- element : `Element` A node in model tree. length : `int` String length that has been consumed. Returns ------- str A string that indicates the difference between two models. ''' spaces = ' '*(self.get_width(element) - length) return spaces + element.get('diff')
[ "def", "get_diff_str", "(", "self", ",", "element", ",", "length", ")", ":", "spaces", "=", "' '", "*", "(", "self", ".", "get_width", "(", "element", ")", "-", "length", ")", "return", "spaces", "+", "element", ".", "get", "(", "'diff'", ")" ]
22.791667
25.125
def genes_with_peak(self, peaks, transform_func=None, split=False, intersect_kwargs=None, id_attribute='ID', *args, **kwargs): """ Returns a boolean index of genes that have a peak nearby. Parameters ---------- peaks : string or pybedtools.BedTool If string, then assume it's a filename to a BED/GFF/GTF file of intervals; otherwise use the pybedtools.BedTool object directly. transform_func : callable This function will be applied to each gene object returned by self.features(). Additional args and kwargs are passed to `transform_func`. For example, if you're looking for peaks within 1kb upstream of TSSs, then pybedtools.featurefuncs.TSS would be a useful `transform_func`, and you could supply additional kwargs of `upstream=1000` and `downstream=0`. This function can return iterables of features, too. For example, you might want to look for peaks falling within the exons of a gene. In this case, `transform_func` should return an iterable of pybedtools.Interval objects. The only requirement is that the `name` field of any feature matches the index of the dataframe. intersect_kwargs : dict kwargs passed to pybedtools.BedTool.intersect. id_attribute : str The attribute in the GTF or GFF file that contains the id of the gene. For meaningful results to be returned, a gene's ID be also found in the index of the dataframe. For GFF files, typically you'd use `id_attribute="ID"`. For GTF files, you'd typically use `id_attribute="gene_id"`. """ def _transform_func(x): """ In order to support transform funcs that return a single feature or an iterable of features, we need to wrap it """ result = transform_func(x) if isinstance(result, pybedtools.Interval): result = [result] for i in result: if i: yield result intersect_kwargs = intersect_kwargs or {} if not self._cached_features: self._cached_features = pybedtools\ .BedTool(self.features())\ .saveas() if transform_func: if split: features = self._cached_features\ .split(_transform_func, *args, **kwargs) else: features = self._cached_features\ .each(transform_func, *args, **kwargs) else: features = self._cached_features hits = list(set([i[id_attribute] for i in features.intersect( peaks, **intersect_kwargs)])) return self.data.index.isin(hits)
[ "def", "genes_with_peak", "(", "self", ",", "peaks", ",", "transform_func", "=", "None", ",", "split", "=", "False", ",", "intersect_kwargs", "=", "None", ",", "id_attribute", "=", "'ID'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "_transform_func", "(", "x", ")", ":", "\"\"\"\n In order to support transform funcs that return a single feature or\n an iterable of features, we need to wrap it\n \"\"\"", "result", "=", "transform_func", "(", "x", ")", "if", "isinstance", "(", "result", ",", "pybedtools", ".", "Interval", ")", ":", "result", "=", "[", "result", "]", "for", "i", "in", "result", ":", "if", "i", ":", "yield", "result", "intersect_kwargs", "=", "intersect_kwargs", "or", "{", "}", "if", "not", "self", ".", "_cached_features", ":", "self", ".", "_cached_features", "=", "pybedtools", ".", "BedTool", "(", "self", ".", "features", "(", ")", ")", ".", "saveas", "(", ")", "if", "transform_func", ":", "if", "split", ":", "features", "=", "self", ".", "_cached_features", ".", "split", "(", "_transform_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "features", "=", "self", ".", "_cached_features", ".", "each", "(", "transform_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "features", "=", "self", ".", "_cached_features", "hits", "=", "list", "(", "set", "(", "[", "i", "[", "id_attribute", "]", "for", "i", "in", "features", ".", "intersect", "(", "peaks", ",", "*", "*", "intersect_kwargs", ")", "]", ")", ")", "return", "self", ".", "data", ".", "index", ".", "isin", "(", "hits", ")" ]
41.565217
21.623188
def add_arguments(self, parser): """ Entry point for subclassed commands to add custom arguments. """ subparsers = parser.add_subparsers(help='sub-command help', dest='command') add_parser = partial(_add_subparser, subparsers, parser) add_parser('list', help="list concurrency triggers") add_parser('drop', help="drop concurrency triggers") add_parser('create', help="create concurrency triggers") parser.add_argument('-d', '--database', action='store', dest='database', default=None, help='limit to this database') parser.add_argument('-t', '--trigger', action='store', dest='trigger', default=None, help='limit to this trigger name')
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "subparsers", "=", "parser", ".", "add_subparsers", "(", "help", "=", "'sub-command help'", ",", "dest", "=", "'command'", ")", "add_parser", "=", "partial", "(", "_add_subparser", ",", "subparsers", ",", "parser", ")", "add_parser", "(", "'list'", ",", "help", "=", "\"list concurrency triggers\"", ")", "add_parser", "(", "'drop'", ",", "help", "=", "\"drop concurrency triggers\"", ")", "add_parser", "(", "'create'", ",", "help", "=", "\"create concurrency triggers\"", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--database'", ",", "action", "=", "'store'", ",", "dest", "=", "'database'", ",", "default", "=", "None", ",", "help", "=", "'limit to this database'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--trigger'", ",", "action", "=", "'store'", ",", "dest", "=", "'trigger'", ",", "default", "=", "None", ",", "help", "=", "'limit to this trigger name'", ")" ]
40.166667
16.583333
def find_peaks(sig): """ Find hard peaks and soft peaks in a signal, defined as follows: - Hard peak: a peak that is either /\ or \/ - Soft peak: a peak that is either /-*\ or \-*/ In this case we define the middle as the peak Parameters ---------- sig : np array The 1d signal array Returns ------- hard_peaks : numpy array Array containing the indices of the hard peaks: soft_peaks : numpy array Array containing the indices of the soft peaks """ if len(sig) == 0: return np.empty([0]), np.empty([0]) tmp = sig[1:] tmp = np.append(tmp, [sig[-1]]) tmp = sig - tmp tmp[np.where(tmp>0)] = 1 tmp[np.where(tmp==0)] = 0 tmp[np.where(tmp<0)] = -1 tmp2 = tmp[1:] tmp2 = np.append(tmp2, [0]) tmp = tmp-tmp2 hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1 soft_peaks = [] for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]: t = tmp[iv] i = iv+1 while True: if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2: break if tmp[i] == t: soft_peaks.append(int(iv + (i - iv)/2)) break i += 1 soft_peaks = np.array(soft_peaks, dtype='int') + 1 return hard_peaks, soft_peaks
[ "def", "find_peaks", "(", "sig", ")", ":", "if", "len", "(", "sig", ")", "==", "0", ":", "return", "np", ".", "empty", "(", "[", "0", "]", ")", ",", "np", ".", "empty", "(", "[", "0", "]", ")", "tmp", "=", "sig", "[", "1", ":", "]", "tmp", "=", "np", ".", "append", "(", "tmp", ",", "[", "sig", "[", "-", "1", "]", "]", ")", "tmp", "=", "sig", "-", "tmp", "tmp", "[", "np", ".", "where", "(", "tmp", ">", "0", ")", "]", "=", "1", "tmp", "[", "np", ".", "where", "(", "tmp", "==", "0", ")", "]", "=", "0", "tmp", "[", "np", ".", "where", "(", "tmp", "<", "0", ")", "]", "=", "-", "1", "tmp2", "=", "tmp", "[", "1", ":", "]", "tmp2", "=", "np", ".", "append", "(", "tmp2", ",", "[", "0", "]", ")", "tmp", "=", "tmp", "-", "tmp2", "hard_peaks", "=", "np", ".", "where", "(", "np", ".", "logical_or", "(", "tmp", "==", "-", "2", ",", "tmp", "==", "+", "2", ")", ")", "[", "0", "]", "+", "1", "soft_peaks", "=", "[", "]", "for", "iv", "in", "np", ".", "where", "(", "np", ".", "logical_or", "(", "tmp", "==", "-", "1", ",", "tmp", "==", "+", "1", ")", ")", "[", "0", "]", ":", "t", "=", "tmp", "[", "iv", "]", "i", "=", "iv", "+", "1", "while", "True", ":", "if", "i", "==", "len", "(", "tmp", ")", "or", "tmp", "[", "i", "]", "==", "-", "t", "or", "tmp", "[", "i", "]", "==", "-", "2", "or", "tmp", "[", "i", "]", "==", "2", ":", "break", "if", "tmp", "[", "i", "]", "==", "t", ":", "soft_peaks", ".", "append", "(", "int", "(", "iv", "+", "(", "i", "-", "iv", ")", "/", "2", ")", ")", "break", "i", "+=", "1", "soft_peaks", "=", "np", ".", "array", "(", "soft_peaks", ",", "dtype", "=", "'int'", ")", "+", "1", "return", "hard_peaks", ",", "soft_peaks" ]
25.94
20.5
def _get_nsamps_samples_n(res): """ Helper function for calculating the number of samples Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. Returns ------- nsamps: int The total number of samples samples_n: array Number of live points at a given iteration """ try: # Check if the number of live points explicitly changes. samples_n = res.samples_n nsamps = len(samples_n) except: # If the number of live points is constant, compute `samples_n`. niter = res.niter nlive = res.nlive nsamps = len(res.logvol) if nsamps == niter: samples_n = np.ones(niter, dtype='int') * nlive elif nsamps == (niter + nlive): samples_n = np.append(np.ones(niter, dtype='int') * nlive, np.arange(1, nlive + 1)[::-1]) else: raise ValueError("Final number of samples differs from number of " "iterations and number of live points.") return nsamps, samples_n
[ "def", "_get_nsamps_samples_n", "(", "res", ")", ":", "try", ":", "# Check if the number of live points explicitly changes.", "samples_n", "=", "res", ".", "samples_n", "nsamps", "=", "len", "(", "samples_n", ")", "except", ":", "# If the number of live points is constant, compute `samples_n`.", "niter", "=", "res", ".", "niter", "nlive", "=", "res", ".", "nlive", "nsamps", "=", "len", "(", "res", ".", "logvol", ")", "if", "nsamps", "==", "niter", ":", "samples_n", "=", "np", ".", "ones", "(", "niter", ",", "dtype", "=", "'int'", ")", "*", "nlive", "elif", "nsamps", "==", "(", "niter", "+", "nlive", ")", ":", "samples_n", "=", "np", ".", "append", "(", "np", ".", "ones", "(", "niter", ",", "dtype", "=", "'int'", ")", "*", "nlive", ",", "np", ".", "arange", "(", "1", ",", "nlive", "+", "1", ")", "[", ":", ":", "-", "1", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Final number of samples differs from number of \"", "\"iterations and number of live points.\"", ")", "return", "nsamps", ",", "samples_n" ]
33.514286
20.657143
def QA_fetch_future_min( code, start, end, format='numpy', frequence='1min', collections=DATABASE.future_min): '获取股票分钟线' if frequence in ['1min', '1m']: frequence = '1min' elif frequence in ['5min', '5m']: frequence = '5min' elif frequence in ['15min', '15m']: frequence = '15min' elif frequence in ['30min', '30m']: frequence = '30min' elif frequence in ['60min', '60m']: frequence = '60min' __data = [] code = QA_util_code_tolist(code, auto_fill=False) cursor = collections.find({ 'code': {'$in': code}, "time_stamp": { "$gte": QA_util_time_stamp(start), "$lte": QA_util_time_stamp(end) }, 'type': frequence }, batch_size=10000) if format in ['dict', 'json']: return [data for data in cursor] for item in cursor: __data.append([str(item['code']), float(item['open']), float(item['high']), float( item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']), item['datetime'], item['tradetime'], item['time_stamp'], item['date'], item['type']]) __data = DataFrame(__data, columns=[ 'code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'datetime', 'tradetime', 'time_stamp', 'date', 'type']) __data['datetime'] = pd.to_datetime(__data['datetime']) __data = __data.set_index('datetime', drop=False) if format in ['numpy', 'np', 'n']: return numpy.asarray(__data) elif format in ['list', 'l', 'L']: return numpy.asarray(__data).tolist() elif format in ['P', 'p', 'pandas', 'pd']: return __data
[ "def", "QA_fetch_future_min", "(", "code", ",", "start", ",", "end", ",", "format", "=", "'numpy'", ",", "frequence", "=", "'1min'", ",", "collections", "=", "DATABASE", ".", "future_min", ")", ":", "if", "frequence", "in", "[", "'1min'", ",", "'1m'", "]", ":", "frequence", "=", "'1min'", "elif", "frequence", "in", "[", "'5min'", ",", "'5m'", "]", ":", "frequence", "=", "'5min'", "elif", "frequence", "in", "[", "'15min'", ",", "'15m'", "]", ":", "frequence", "=", "'15min'", "elif", "frequence", "in", "[", "'30min'", ",", "'30m'", "]", ":", "frequence", "=", "'30min'", "elif", "frequence", "in", "[", "'60min'", ",", "'60m'", "]", ":", "frequence", "=", "'60min'", "__data", "=", "[", "]", "code", "=", "QA_util_code_tolist", "(", "code", ",", "auto_fill", "=", "False", ")", "cursor", "=", "collections", ".", "find", "(", "{", "'code'", ":", "{", "'$in'", ":", "code", "}", ",", "\"time_stamp\"", ":", "{", "\"$gte\"", ":", "QA_util_time_stamp", "(", "start", ")", ",", "\"$lte\"", ":", "QA_util_time_stamp", "(", "end", ")", "}", ",", "'type'", ":", "frequence", "}", ",", "batch_size", "=", "10000", ")", "if", "format", "in", "[", "'dict'", ",", "'json'", "]", ":", "return", "[", "data", "for", "data", "in", "cursor", "]", "for", "item", "in", "cursor", ":", "__data", ".", "append", "(", "[", "str", "(", "item", "[", "'code'", "]", ")", ",", "float", "(", "item", "[", "'open'", "]", ")", ",", "float", "(", "item", "[", "'high'", "]", ")", ",", "float", "(", "item", "[", "'low'", "]", ")", ",", "float", "(", "item", "[", "'close'", "]", ")", ",", "float", "(", "item", "[", "'position'", "]", ")", ",", "float", "(", "item", "[", "'price'", "]", ")", ",", "float", "(", "item", "[", "'trade'", "]", ")", ",", "item", "[", "'datetime'", "]", ",", "item", "[", "'tradetime'", "]", ",", "item", "[", "'time_stamp'", "]", ",", "item", "[", "'date'", "]", ",", "item", "[", "'type'", "]", "]", ")", "__data", "=", "DataFrame", "(", "__data", ",", "columns", "=", "[", "'code'", ",", "'open'", ",", "'high'", ",", "'low'", ",", "'close'", ",", "'position'", ",", "'price'", ",", "'trade'", ",", "'datetime'", ",", "'tradetime'", ",", "'time_stamp'", ",", "'date'", ",", "'type'", "]", ")", "__data", "[", "'datetime'", "]", "=", "pd", ".", "to_datetime", "(", "__data", "[", "'datetime'", "]", ")", "__data", "=", "__data", ".", "set_index", "(", "'datetime'", ",", "drop", "=", "False", ")", "if", "format", "in", "[", "'numpy'", ",", "'np'", ",", "'n'", "]", ":", "return", "numpy", ".", "asarray", "(", "__data", ")", "elif", "format", "in", "[", "'list'", ",", "'l'", ",", "'L'", "]", ":", "return", "numpy", ".", "asarray", "(", "__data", ")", ".", "tolist", "(", ")", "elif", "format", "in", "[", "'P'", ",", "'p'", ",", "'pandas'", ",", "'pd'", "]", ":", "return", "__data" ]
38.090909
17.772727
def decode_response(client_message, to_object=None): """ Decode response from client message""" parameters = dict(response=None) if not client_message.read_bool(): parameters['response'] = to_object(client_message.read_data()) return parameters
[ "def", "decode_response", "(", "client_message", ",", "to_object", "=", "None", ")", ":", "parameters", "=", "dict", "(", "response", "=", "None", ")", "if", "not", "client_message", ".", "read_bool", "(", ")", ":", "parameters", "[", "'response'", "]", "=", "to_object", "(", "client_message", ".", "read_data", "(", ")", ")", "return", "parameters" ]
43.833333
11.166667
def get_include_fields(request): """Retrieve include_fields values from the request """ include_fields = [] rif = request.get("include_fields", "") if "include_fields" in request: include_fields = [x.strip() for x in rif.split(",") if x.strip()] if "include_fields[]" in request: include_fields = request['include_fields[]'] return include_fields
[ "def", "get_include_fields", "(", "request", ")", ":", "include_fields", "=", "[", "]", "rif", "=", "request", ".", "get", "(", "\"include_fields\"", ",", "\"\"", ")", "if", "\"include_fields\"", "in", "request", ":", "include_fields", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "rif", ".", "split", "(", "\",\"", ")", "if", "x", ".", "strip", "(", ")", "]", "if", "\"include_fields[]\"", "in", "request", ":", "include_fields", "=", "request", "[", "'include_fields[]'", "]", "return", "include_fields" ]
35.916667
6.5
def Reorder(x, params, output=None, **kwargs): """Reorder a tuple into another tuple. For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y). The output argument specifies how to re-order, using integers that refer to indices in the input tuple. For example, if input = (x, y, z) then Reorder(input, output=(1, 0, 2)) = (y, x, z) Reorder(input, output=(0, 0)) = (x, x) Reorder(input, output=(0, (1, 1))) = (x, (y, y)) Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y)) By default (if no output is given) Reorder does nothing (Identity). Args: x: the input tuple to re-order. params: layer parameters (unused). output: the specification of the output tuple: a nested tuple of ints. **kwargs: other arguments (unused). Returns: The re-ordered tuple with the same shape as output. """ del params, kwargs if output is None: return x return base.nested_map(output, lambda i: x[i])
[ "def", "Reorder", "(", "x", ",", "params", ",", "output", "=", "None", ",", "*", "*", "kwargs", ")", ":", "del", "params", ",", "kwargs", "if", "output", "is", "None", ":", "return", "x", "return", "base", ".", "nested_map", "(", "output", ",", "lambda", "i", ":", "x", "[", "i", "]", ")" ]
30.677419
22.258065
def automain(self, function): """ Decorator that defines *and runs* the main function of the experiment. The decorated function is marked as the default command for this experiment, and the command-line interface is automatically run when the file is executed. The method decorated by this should be last in the file because is equivalent to: .. code-block:: python @ex.main def my_main(): pass if __name__ == '__main__': ex.run_commandline() """ captured = self.main(function) if function.__module__ == '__main__': # Ensure that automain is not used in interactive mode. import inspect main_filename = inspect.getfile(function) if (main_filename == '<stdin>' or (main_filename.startswith('<ipython-input-') and main_filename.endswith('>'))): raise RuntimeError('Cannot use @ex.automain decorator in ' 'interactive mode. Use @ex.main instead.') self.run_commandline() return captured
[ "def", "automain", "(", "self", ",", "function", ")", ":", "captured", "=", "self", ".", "main", "(", "function", ")", "if", "function", ".", "__module__", "==", "'__main__'", ":", "# Ensure that automain is not used in interactive mode.", "import", "inspect", "main_filename", "=", "inspect", ".", "getfile", "(", "function", ")", "if", "(", "main_filename", "==", "'<stdin>'", "or", "(", "main_filename", ".", "startswith", "(", "'<ipython-input-'", ")", "and", "main_filename", ".", "endswith", "(", "'>'", ")", ")", ")", ":", "raise", "RuntimeError", "(", "'Cannot use @ex.automain decorator in '", "'interactive mode. Use @ex.main instead.'", ")", "self", ".", "run_commandline", "(", ")", "return", "captured" ]
35.545455
20.878788
def _expand_address(addy): ''' Convert the libcloud GCEAddress object into something more serializable. ''' ret = {} ret.update(addy.__dict__) ret['extra']['zone'] = addy.region.name return ret
[ "def", "_expand_address", "(", "addy", ")", ":", "ret", "=", "{", "}", "ret", ".", "update", "(", "addy", ".", "__dict__", ")", "ret", "[", "'extra'", "]", "[", "'zone'", "]", "=", "addy", ".", "region", ".", "name", "return", "ret" ]
26.75
23
def flavor_create(self, name, # pylint: disable=C0103 flavor_id=0, # pylint: disable=C0103 ram=0, disk=0, vcpus=1, is_public=True): ''' Create a flavor ''' nt_ks = self.compute_conn nt_ks.flavors.create( name=name, flavorid=flavor_id, ram=ram, disk=disk, vcpus=vcpus, is_public=is_public ) return {'name': name, 'id': flavor_id, 'ram': ram, 'disk': disk, 'vcpus': vcpus, 'is_public': is_public}
[ "def", "flavor_create", "(", "self", ",", "name", ",", "# pylint: disable=C0103", "flavor_id", "=", "0", ",", "# pylint: disable=C0103", "ram", "=", "0", ",", "disk", "=", "0", ",", "vcpus", "=", "1", ",", "is_public", "=", "True", ")", ":", "nt_ks", "=", "self", ".", "compute_conn", "nt_ks", ".", "flavors", ".", "create", "(", "name", "=", "name", ",", "flavorid", "=", "flavor_id", ",", "ram", "=", "ram", ",", "disk", "=", "disk", ",", "vcpus", "=", "vcpus", ",", "is_public", "=", "is_public", ")", "return", "{", "'name'", ":", "name", ",", "'id'", ":", "flavor_id", ",", "'ram'", ":", "ram", ",", "'disk'", ":", "disk", ",", "'vcpus'", ":", "vcpus", ",", "'is_public'", ":", "is_public", "}" ]
33.6
16.5
def merge(self, other): # type: (TentativeType) -> None """ Merge two TentativeType instances """ for hashables in other.types_hashable: self.add(hashables) for non_hashbles in other.types: self.add(non_hashbles)
[ "def", "merge", "(", "self", ",", "other", ")", ":", "# type: (TentativeType) -> None", "for", "hashables", "in", "other", ".", "types_hashable", ":", "self", ".", "add", "(", "hashables", ")", "for", "non_hashbles", "in", "other", ".", "types", ":", "self", ".", "add", "(", "non_hashbles", ")" ]
30.666667
4.444444
def state_to_modelparams(self, state): """ Converts a QuTiP-represented state into a model parameter vector. :param qutip.Qobj state: State to be converted. :rtype: :class:`np.ndarray` :return: The representation of the given state in this basis, as a vector of real parameters. """ basis = self.flat() data = state.data.todense().view(np.ndarray).flatten() # NB: assumes Hermitian state and basis! return np.real(np.dot(basis.conj(), data))
[ "def", "state_to_modelparams", "(", "self", ",", "state", ")", ":", "basis", "=", "self", ".", "flat", "(", ")", "data", "=", "state", ".", "data", ".", "todense", "(", ")", ".", "view", "(", "np", ".", "ndarray", ")", ".", "flatten", "(", ")", "# NB: assumes Hermitian state and basis!", "return", "np", ".", "real", "(", "np", ".", "dot", "(", "basis", ".", "conj", "(", ")", ",", "data", ")", ")" ]
37.285714
15.714286
def CreateAttachment(self, document_link, attachment, options=None): """Creates an attachment in a document. :param str document_link: The link to the document. :param dict attachment: The Azure Cosmos attachment to create. :param dict options: The request options for the request. :return: The created Attachment. :rtype: dict """ if options is None: options = {} document_id, path = self._GetItemIdWithPathForAttachment(attachment, document_link) return self.Create(attachment, path, 'attachments', document_id, None, options)
[ "def", "CreateAttachment", "(", "self", ",", "document_link", ",", "attachment", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "document_id", ",", "path", "=", "self", ".", "_GetItemIdWithPathForAttachment", "(", "attachment", ",", "document_link", ")", "return", "self", ".", "Create", "(", "attachment", ",", "path", ",", "'attachments'", ",", "document_id", ",", "None", ",", "options", ")" ]
30.538462
16.153846
def recover_and_supervise(recovery_file): """ Retrieve monitor data from recovery_file and resume monitoring """ try: logging.info("Attempting to recover Supervisor data from " + recovery_file) with open(recovery_file) as rf: recovery_data = json.load(rf) monitor_data = recovery_data['monitor_data'] dependencies = recovery_data['dependencies'] args = recovery_data['args'] except: logging.error("Could not recover monitor data, exiting...") return 1 logging.info("Data successfully loaded, resuming Supervisor") supervise_until_complete(monitor_data, dependencies, args, recovery_file)
[ "def", "recover_and_supervise", "(", "recovery_file", ")", ":", "try", ":", "logging", ".", "info", "(", "\"Attempting to recover Supervisor data from \"", "+", "recovery_file", ")", "with", "open", "(", "recovery_file", ")", "as", "rf", ":", "recovery_data", "=", "json", ".", "load", "(", "rf", ")", "monitor_data", "=", "recovery_data", "[", "'monitor_data'", "]", "dependencies", "=", "recovery_data", "[", "'dependencies'", "]", "args", "=", "recovery_data", "[", "'args'", "]", "except", ":", "logging", ".", "error", "(", "\"Could not recover monitor data, exiting...\"", ")", "return", "1", "logging", ".", "info", "(", "\"Data successfully loaded, resuming Supervisor\"", ")", "supervise_until_complete", "(", "monitor_data", ",", "dependencies", ",", "args", ",", "recovery_file", ")" ]
42.125
20.75
def swap_buffers(self): """ Headless window currently don't support double buffering. We only increment the frame counter here. """ self.frames += 1 if self.headless_frames and self.frames >= self.headless_frames: self.close()
[ "def", "swap_buffers", "(", "self", ")", ":", "self", ".", "frames", "+=", "1", "if", "self", ".", "headless_frames", "and", "self", ".", "frames", ">=", "self", ".", "headless_frames", ":", "self", ".", "close", "(", ")" ]
31
17.222222
def split_lines(tokenlist): """ Take a single list of (Token, text) tuples and yield one such list for each line. Just like str.split, this will yield at least one item. :param tokenlist: List of (token, text) or (token, text, mouse_handler) tuples. """ line = [] for item in tokenlist: # For (token, text) tuples. if len(item) == 2: token, string = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part)) yield line line = [] line.append((token, parts[-1])) # Note that parts[-1] can be empty, and that's fine. It happens # in the case of [(Token.SetCursorPosition, '')]. # For (token, text, mouse_handler) tuples. # I know, partly copy/paste, but understandable and more efficient # than many tests. else: token, string, mouse_handler = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part, mouse_handler)) yield line line = [] line.append((token, parts[-1], mouse_handler)) # Always yield the last line, even when this is an empty line. This ensures # that when `tokenlist` ends with a newline character, an additional empty # line is yielded. (Otherwise, there's no way to differentiate between the # cases where `tokenlist` does and doesn't end with a newline.) yield line
[ "def", "split_lines", "(", "tokenlist", ")", ":", "line", "=", "[", "]", "for", "item", "in", "tokenlist", ":", "# For (token, text) tuples.", "if", "len", "(", "item", ")", "==", "2", ":", "token", ",", "string", "=", "item", "parts", "=", "string", ".", "split", "(", "'\\n'", ")", "for", "part", "in", "parts", "[", ":", "-", "1", "]", ":", "if", "part", ":", "line", ".", "append", "(", "(", "token", ",", "part", ")", ")", "yield", "line", "line", "=", "[", "]", "line", ".", "append", "(", "(", "token", ",", "parts", "[", "-", "1", "]", ")", ")", "# Note that parts[-1] can be empty, and that's fine. It happens", "# in the case of [(Token.SetCursorPosition, '')].", "# For (token, text, mouse_handler) tuples.", "# I know, partly copy/paste, but understandable and more efficient", "# than many tests.", "else", ":", "token", ",", "string", ",", "mouse_handler", "=", "item", "parts", "=", "string", ".", "split", "(", "'\\n'", ")", "for", "part", "in", "parts", "[", ":", "-", "1", "]", ":", "if", "part", ":", "line", ".", "append", "(", "(", "token", ",", "part", ",", "mouse_handler", ")", ")", "yield", "line", "line", "=", "[", "]", "line", ".", "append", "(", "(", "token", ",", "parts", "[", "-", "1", "]", ",", "mouse_handler", ")", ")", "# Always yield the last line, even when this is an empty line. This ensures", "# that when `tokenlist` ends with a newline character, an additional empty", "# line is yielded. (Otherwise, there's no way to differentiate between the", "# cases where `tokenlist` does and doesn't end with a newline.)", "yield", "line" ]
34.847826
21.456522
def modify(self, fd, eventmask): """ Change the bit-mask of events associated with a previously-registered descriptor. :param fd: The descriptor to modify. :param eventmask: New bit-mask of events that will be monitored. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. """ if self._epfd < 0: _err_closed() ev = epoll_event() ev.events = eventmask ev.data.fd = fd epoll_ctl(self._epfd, EPOLL_CTL_MOD, fd, byref(ev))
[ "def", "modify", "(", "self", ",", "fd", ",", "eventmask", ")", ":", "if", "self", ".", "_epfd", "<", "0", ":", "_err_closed", "(", ")", "ev", "=", "epoll_event", "(", ")", "ev", ".", "events", "=", "eventmask", "ev", ".", "data", ".", "fd", "=", "fd", "epoll_ctl", "(", "self", ".", "_epfd", ",", "EPOLL_CTL_MOD", ",", "fd", ",", "byref", "(", "ev", ")", ")" ]
32.761905
15.52381
def ensure_topic(self): """Verify the pub/sub topic exists. Returns the topic qualified name. """ client = self.session.client('pubsub', 'v1', 'projects.topics') topic = self.get_topic_param() try: client.execute_command('get', {'topic': topic}) except HttpError as e: if e.resp.status != 404: raise else: return topic # bug in discovery doc.. apis say body must be empty but its required in the # discovery api for create. client.execute_command('create', {'name': topic, 'body': {}}) return topic
[ "def", "ensure_topic", "(", "self", ")", ":", "client", "=", "self", ".", "session", ".", "client", "(", "'pubsub'", ",", "'v1'", ",", "'projects.topics'", ")", "topic", "=", "self", ".", "get_topic_param", "(", ")", "try", ":", "client", ".", "execute_command", "(", "'get'", ",", "{", "'topic'", ":", "topic", "}", ")", "except", "HttpError", "as", "e", ":", "if", "e", ".", "resp", ".", "status", "!=", "404", ":", "raise", "else", ":", "return", "topic", "# bug in discovery doc.. apis say body must be empty but its required in the", "# discovery api for create.", "client", ".", "execute_command", "(", "'create'", ",", "{", "'name'", ":", "topic", ",", "'body'", ":", "{", "}", "}", ")", "return", "topic" ]
33.157895
18.526316
def Ctrl_W(self, delay=0): """Ctrl + W shortcut. """ self._delay(delay) self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1))) self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.W, 1))) self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
[ "def", "Ctrl_W", "(", "self", ",", "delay", "=", "0", ")", ":", "self", ".", "_delay", "(", "delay", ")", "self", ".", "add", "(", "Command", "(", "\"KeyDown\"", ",", "'KeyDown \"%s\", %s'", "%", "(", "BoardKey", ".", "Ctrl", ",", "1", ")", ")", ")", "self", ".", "add", "(", "Command", "(", "\"KeyPress\"", ",", "'KeyPress \"%s\", %s'", "%", "(", "BoardKey", ".", "W", ",", "1", ")", ")", ")", "self", ".", "add", "(", "Command", "(", "\"KeyUp\"", ",", "'KeyUp \"%s\", %s'", "%", "(", "BoardKey", ".", "Ctrl", ",", "1", ")", ")", ")" ]
45.428571
19.142857
def create_effect(self, label: str, name: str, *args, **kwargs) -> Effect: """ Create an effect instance adding it to the internal effects dictionary using the label as key. Args: label (str): The unique label for the effect instance name (str): Name or full python path to the effect class we want to instantiate args: Positional arguments to the effect initializer kwargs: Keyword arguments to the effect initializer Returns: The newly created Effect instance """ effect_cls = effects.find_effect_class(name) effect = effect_cls(*args, **kwargs) effect._label = label if label in self._effects: raise ValueError("An effect with label '{}' already exists".format(label)) self._effects[label] = effect return effect
[ "def", "create_effect", "(", "self", ",", "label", ":", "str", ",", "name", ":", "str", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "Effect", ":", "effect_cls", "=", "effects", ".", "find_effect_class", "(", "name", ")", "effect", "=", "effect_cls", "(", "*", "args", ",", "*", "*", "kwargs", ")", "effect", ".", "_label", "=", "label", "if", "label", "in", "self", ".", "_effects", ":", "raise", "ValueError", "(", "\"An effect with label '{}' already exists\"", ".", "format", "(", "label", ")", ")", "self", ".", "_effects", "[", "label", "]", "=", "effect", "return", "effect" ]
38.26087
25.043478
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''): """Override `log()` if bot is not initialized with a database connection. Do not call this method directly. """ today = datetime.utcnow() # TODO: Use self.locale['timezone'] for changing time date = today.strftime(self.locale['date_fmt']) time = today.strftime(self.locale['time_fmt']) datetime_s = today.strftime(self.locale['short_datetime_fmt']) datetime_l = today.strftime(self.locale['long_datetime_fmt']) if target == self.nickname and etype in ('privmsg', 'action'): target = source if etype in self.log_formats: file_path = os.path.expanduser(self.log_file.format(**locals())) file_dir = os.path.dirname(file_path) if not os.path.isdir(file_dir): os.makedirs(file_dir) line = self.log_formats[etype].format(**locals()) with open(file_path, 'a') as log: log.write(line+'\n')
[ "def", "_log_to_file", "(", "self", ",", "etype", ",", "source", ",", "msg", "=", "''", ",", "target", "=", "''", ",", "hostmask", "=", "''", ",", "params", "=", "''", ")", ":", "today", "=", "datetime", ".", "utcnow", "(", ")", "# TODO: Use self.locale['timezone'] for changing time", "date", "=", "today", ".", "strftime", "(", "self", ".", "locale", "[", "'date_fmt'", "]", ")", "time", "=", "today", ".", "strftime", "(", "self", ".", "locale", "[", "'time_fmt'", "]", ")", "datetime_s", "=", "today", ".", "strftime", "(", "self", ".", "locale", "[", "'short_datetime_fmt'", "]", ")", "datetime_l", "=", "today", ".", "strftime", "(", "self", ".", "locale", "[", "'long_datetime_fmt'", "]", ")", "if", "target", "==", "self", ".", "nickname", "and", "etype", "in", "(", "'privmsg'", ",", "'action'", ")", ":", "target", "=", "source", "if", "etype", "in", "self", ".", "log_formats", ":", "file_path", "=", "os", ".", "path", ".", "expanduser", "(", "self", ".", "log_file", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "file_dir", "=", "os", ".", "path", ".", "dirname", "(", "file_path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "file_dir", ")", ":", "os", ".", "makedirs", "(", "file_dir", ")", "line", "=", "self", ".", "log_formats", "[", "etype", "]", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "with", "open", "(", "file_path", ",", "'a'", ")", "as", "log", ":", "log", ".", "write", "(", "line", "+", "'\\n'", ")" ]
45.391304
17.73913
def _inherit_parent_kwargs(self, kwargs): """Extract any necessary attributes from parent serializer to propagate down to child serializer. """ if not self.parent or not self._is_dynamic: return kwargs if 'request_fields' not in kwargs: # If 'request_fields' isn't explicitly set, pull it from the # parent serializer. request_fields = self._get_request_fields_from_parent() if request_fields is None: # Default to 'id_only' for nested serializers. request_fields = True kwargs['request_fields'] = request_fields if self.embed and kwargs.get('request_fields') is True: # If 'embed' then make sure we fetch the full object. kwargs['request_fields'] = {} if hasattr(self.parent, 'sideloading'): kwargs['sideloading'] = self.parent.sideloading if hasattr(self.parent, 'debug'): kwargs['debug'] = self.parent.debug return kwargs
[ "def", "_inherit_parent_kwargs", "(", "self", ",", "kwargs", ")", ":", "if", "not", "self", ".", "parent", "or", "not", "self", ".", "_is_dynamic", ":", "return", "kwargs", "if", "'request_fields'", "not", "in", "kwargs", ":", "# If 'request_fields' isn't explicitly set, pull it from the", "# parent serializer.", "request_fields", "=", "self", ".", "_get_request_fields_from_parent", "(", ")", "if", "request_fields", "is", "None", ":", "# Default to 'id_only' for nested serializers.", "request_fields", "=", "True", "kwargs", "[", "'request_fields'", "]", "=", "request_fields", "if", "self", ".", "embed", "and", "kwargs", ".", "get", "(", "'request_fields'", ")", "is", "True", ":", "# If 'embed' then make sure we fetch the full object.", "kwargs", "[", "'request_fields'", "]", "=", "{", "}", "if", "hasattr", "(", "self", ".", "parent", ",", "'sideloading'", ")", ":", "kwargs", "[", "'sideloading'", "]", "=", "self", ".", "parent", ".", "sideloading", "if", "hasattr", "(", "self", ".", "parent", ",", "'debug'", ")", ":", "kwargs", "[", "'debug'", "]", "=", "self", ".", "parent", ".", "debug", "return", "kwargs" ]
36.678571
17.178571
async def listen_notifications(self, fallback_callback=None): """Listen for notifications from the device forever. Use :func:on_notification: to register what notifications to listen to. """ tasks = [] async def handle_notification(notification): if type(notification) not in self.callbacks: if not fallback_callback: _LOGGER.debug("No callbacks for %s", notification) # _LOGGER.debug("Existing callbacks for: %s" % self.callbacks) else: await fallback_callback(notification) return for cb in self.callbacks[type(notification)]: await cb(notification) for serv in self.services.values(): tasks.append( asyncio.ensure_future( serv.listen_all_notifications(handle_notification) ) ) try: print(await asyncio.gather(*tasks)) except Exception as ex: # TODO: do a slightly restricted exception handling? # Notify about disconnect await handle_notification(ConnectChange(connected=False, exception=ex)) return
[ "async", "def", "listen_notifications", "(", "self", ",", "fallback_callback", "=", "None", ")", ":", "tasks", "=", "[", "]", "async", "def", "handle_notification", "(", "notification", ")", ":", "if", "type", "(", "notification", ")", "not", "in", "self", ".", "callbacks", ":", "if", "not", "fallback_callback", ":", "_LOGGER", ".", "debug", "(", "\"No callbacks for %s\"", ",", "notification", ")", "# _LOGGER.debug(\"Existing callbacks for: %s\" % self.callbacks)", "else", ":", "await", "fallback_callback", "(", "notification", ")", "return", "for", "cb", "in", "self", ".", "callbacks", "[", "type", "(", "notification", ")", "]", ":", "await", "cb", "(", "notification", ")", "for", "serv", "in", "self", ".", "services", ".", "values", "(", ")", ":", "tasks", ".", "append", "(", "asyncio", ".", "ensure_future", "(", "serv", ".", "listen_all_notifications", "(", "handle_notification", ")", ")", ")", "try", ":", "print", "(", "await", "asyncio", ".", "gather", "(", "*", "tasks", ")", ")", "except", "Exception", "as", "ex", ":", "# TODO: do a slightly restricted exception handling?", "# Notify about disconnect", "await", "handle_notification", "(", "ConnectChange", "(", "connected", "=", "False", ",", "exception", "=", "ex", ")", ")", "return" ]
38.21875
20.375
def binary(self): """Returns the binary that builds the pex for this lambda.""" dependencies = self.dependencies if len(dependencies) != 1: raise TargetDefinitionException(self, 'An app must define exactly one binary ' 'dependency, have: {}'.format(dependencies)) binary = dependencies[0] if not isinstance(binary, PythonBinary): raise TargetDefinitionException(self, 'Expected binary dependency to be a python_binary ' 'target, found {}'.format(binary)) return binary
[ "def", "binary", "(", "self", ")", ":", "dependencies", "=", "self", ".", "dependencies", "if", "len", "(", "dependencies", ")", "!=", "1", ":", "raise", "TargetDefinitionException", "(", "self", ",", "'An app must define exactly one binary '", "'dependency, have: {}'", ".", "format", "(", "dependencies", ")", ")", "binary", "=", "dependencies", "[", "0", "]", "if", "not", "isinstance", "(", "binary", ",", "PythonBinary", ")", ":", "raise", "TargetDefinitionException", "(", "self", ",", "'Expected binary dependency to be a python_binary '", "'target, found {}'", ".", "format", "(", "binary", ")", ")", "return", "binary" ]
52.909091
23.727273
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'cell_id') and self.cell_id is not None: _dict['cell_id'] = self.cell_id if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'row_index_begin') and self.row_index_begin is not None: _dict['row_index_begin'] = self.row_index_begin if hasattr(self, 'row_index_end') and self.row_index_end is not None: _dict['row_index_end'] = self.row_index_end if hasattr( self, 'column_index_begin') and self.column_index_begin is not None: _dict['column_index_begin'] = self.column_index_begin if hasattr(self, 'column_index_end') and self.column_index_end is not None: _dict['column_index_end'] = self.column_index_end return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'cell_id'", ")", "and", "self", ".", "cell_id", "is", "not", "None", ":", "_dict", "[", "'cell_id'", "]", "=", "self", ".", "cell_id", "if", "hasattr", "(", "self", ",", "'location'", ")", "and", "self", ".", "location", "is", "not", "None", ":", "_dict", "[", "'location'", "]", "=", "self", ".", "location", "if", "hasattr", "(", "self", ",", "'text'", ")", "and", "self", ".", "text", "is", "not", "None", ":", "_dict", "[", "'text'", "]", "=", "self", ".", "text", "if", "hasattr", "(", "self", ",", "'row_index_begin'", ")", "and", "self", ".", "row_index_begin", "is", "not", "None", ":", "_dict", "[", "'row_index_begin'", "]", "=", "self", ".", "row_index_begin", "if", "hasattr", "(", "self", ",", "'row_index_end'", ")", "and", "self", ".", "row_index_end", "is", "not", "None", ":", "_dict", "[", "'row_index_end'", "]", "=", "self", ".", "row_index_end", "if", "hasattr", "(", "self", ",", "'column_index_begin'", ")", "and", "self", ".", "column_index_begin", "is", "not", "None", ":", "_dict", "[", "'column_index_begin'", "]", "=", "self", ".", "column_index_begin", "if", "hasattr", "(", "self", ",", "'column_index_end'", ")", "and", "self", ".", "column_index_end", "is", "not", "None", ":", "_dict", "[", "'column_index_end'", "]", "=", "self", ".", "column_index_end", "return", "_dict" ]
48.681818
20.181818
def inherit_doc(cls): """ A decorator that makes a class inherit documentation from its parents. """ for name, func in vars(cls).items(): # only inherit docstring for public functions if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls
[ "def", "inherit_doc", "(", "cls", ")", ":", "for", "name", ",", "func", "in", "vars", "(", "cls", ")", ".", "items", "(", ")", ":", "# only inherit docstring for public functions", "if", "name", ".", "startswith", "(", "\"_\"", ")", ":", "continue", "if", "not", "func", ".", "__doc__", ":", "for", "parent", "in", "cls", ".", "__bases__", ":", "parent_func", "=", "getattr", "(", "parent", ",", "name", ",", "None", ")", "if", "parent_func", "and", "getattr", "(", "parent_func", ",", "\"__doc__\"", ",", "None", ")", ":", "func", ".", "__doc__", "=", "parent_func", ".", "__doc__", "break", "return", "cls" ]
36.333333
14.066667
def read_core_state_eigen(self): """ Read the core state eigenenergies at each ionic step. Returns: A list of dict over the atom such as [{"AO":[core state eig]}]. The core state eigenenergie list for each AO is over all ionic step. Example: The core state eigenenergie of the 2s AO of the 6th atom of the structure at the last ionic step is [5]["2s"][-1] """ with zopen(self.filename, "rt") as foutcar: line = foutcar.readline() while line != "": line = foutcar.readline() if "NIONS =" in line: natom = int(line.split("NIONS =")[1]) cl = [defaultdict(list) for i in range(natom)] if "the core state eigen" in line: iat = -1 while line != "": line = foutcar.readline() # don't know number of lines to parse without knowing # specific species, so stop parsing when we reach # "E-fermi" instead if "E-fermi" in line: break data = line.split() # data will contain odd number of elements if it is # the start of a new entry, or even number of elements # if it continues the previous entry if len(data) % 2 == 1: iat += 1 # started parsing a new ion data = data[1:] # remove element with ion number for i in range(0, len(data), 2): cl[iat][data[i]].append(float(data[i + 1])) return cl
[ "def", "read_core_state_eigen", "(", "self", ")", ":", "with", "zopen", "(", "self", ".", "filename", ",", "\"rt\"", ")", "as", "foutcar", ":", "line", "=", "foutcar", ".", "readline", "(", ")", "while", "line", "!=", "\"\"", ":", "line", "=", "foutcar", ".", "readline", "(", ")", "if", "\"NIONS =\"", "in", "line", ":", "natom", "=", "int", "(", "line", ".", "split", "(", "\"NIONS =\"", ")", "[", "1", "]", ")", "cl", "=", "[", "defaultdict", "(", "list", ")", "for", "i", "in", "range", "(", "natom", ")", "]", "if", "\"the core state eigen\"", "in", "line", ":", "iat", "=", "-", "1", "while", "line", "!=", "\"\"", ":", "line", "=", "foutcar", ".", "readline", "(", ")", "# don't know number of lines to parse without knowing", "# specific species, so stop parsing when we reach", "# \"E-fermi\" instead", "if", "\"E-fermi\"", "in", "line", ":", "break", "data", "=", "line", ".", "split", "(", ")", "# data will contain odd number of elements if it is", "# the start of a new entry, or even number of elements", "# if it continues the previous entry", "if", "len", "(", "data", ")", "%", "2", "==", "1", ":", "iat", "+=", "1", "# started parsing a new ion", "data", "=", "data", "[", "1", ":", "]", "# remove element with ion number", "for", "i", "in", "range", "(", "0", ",", "len", "(", "data", ")", ",", "2", ")", ":", "cl", "[", "iat", "]", "[", "data", "[", "i", "]", "]", ".", "append", "(", "float", "(", "data", "[", "i", "+", "1", "]", ")", ")", "return", "cl" ]
44.75
19.25
def list_subadressen_by_huisnummer(self, huisnummer): ''' List all `subadressen` for a :class:`Huisnummer`. :param huisnummer: The :class:`Huisnummer` for which the \ `subadressen` are wanted. OR A huisnummer id. :rtype: A :class:`list` of :class:`Gebouw` ''' try: id = huisnummer.id except AttributeError: id = huisnummer def creator(): res = crab_gateway_request( self.client, 'ListSubadressenWithStatusByHuisnummerId', id ) try: return [ Subadres( r.SubadresId, r.Subadres, r.StatusSubadres )for r in res.SubadresWithStatusItem ] except AttributeError: return [] if self.caches['short'].is_configured: key = 'ListSubadressenWithStatusByHuisnummerId#%s' % (id) subadressen = self.caches['short'].get_or_create(key, creator) else: subadressen = creator() for s in subadressen: s.set_gateway(self) return subadressen
[ "def", "list_subadressen_by_huisnummer", "(", "self", ",", "huisnummer", ")", ":", "try", ":", "id", "=", "huisnummer", ".", "id", "except", "AttributeError", ":", "id", "=", "huisnummer", "def", "creator", "(", ")", ":", "res", "=", "crab_gateway_request", "(", "self", ".", "client", ",", "'ListSubadressenWithStatusByHuisnummerId'", ",", "id", ")", "try", ":", "return", "[", "Subadres", "(", "r", ".", "SubadresId", ",", "r", ".", "Subadres", ",", "r", ".", "StatusSubadres", ")", "for", "r", "in", "res", ".", "SubadresWithStatusItem", "]", "except", "AttributeError", ":", "return", "[", "]", "if", "self", ".", "caches", "[", "'short'", "]", ".", "is_configured", ":", "key", "=", "'ListSubadressenWithStatusByHuisnummerId#%s'", "%", "(", "id", ")", "subadressen", "=", "self", ".", "caches", "[", "'short'", "]", ".", "get_or_create", "(", "key", ",", "creator", ")", "else", ":", "subadressen", "=", "creator", "(", ")", "for", "s", "in", "subadressen", ":", "s", ".", "set_gateway", "(", "self", ")", "return", "subadressen" ]
34.484848
17.636364
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes): '''http://www.w3.org/TR/xml-exc-c14n/ InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that are handled in the manner described by the Canonical XML Recommendation''' inclusive = [] if node.prefix: usedPrefixes = ['xmlns:%s' %node.prefix] else: usedPrefixes = ['xmlns'] for a in _attrs(node): if a.nodeName.startswith('xmlns') or not a.prefix: continue usedPrefixes.append('xmlns:%s' %a.prefix) unused_namespace_dict = {} for attr in context: n = attr.nodeName if n in unsuppressedPrefixes: inclusive.append(attr) elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes: inclusive.append(attr) elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes: inclusive.append(attr) elif attr.nodeName in usedPrefixes: inclusive.append(attr) elif n.startswith('xmlns:'): unused_namespace_dict[n] = attr.value return inclusive, unused_namespace_dict
[ "def", "_inclusiveNamespacePrefixes", "(", "node", ",", "context", ",", "unsuppressedPrefixes", ")", ":", "inclusive", "=", "[", "]", "if", "node", ".", "prefix", ":", "usedPrefixes", "=", "[", "'xmlns:%s'", "%", "node", ".", "prefix", "]", "else", ":", "usedPrefixes", "=", "[", "'xmlns'", "]", "for", "a", "in", "_attrs", "(", "node", ")", ":", "if", "a", ".", "nodeName", ".", "startswith", "(", "'xmlns'", ")", "or", "not", "a", ".", "prefix", ":", "continue", "usedPrefixes", ".", "append", "(", "'xmlns:%s'", "%", "a", ".", "prefix", ")", "unused_namespace_dict", "=", "{", "}", "for", "attr", "in", "context", ":", "n", "=", "attr", ".", "nodeName", "if", "n", "in", "unsuppressedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "n", ".", "startswith", "(", "'xmlns:'", ")", "and", "n", "[", "6", ":", "]", "in", "unsuppressedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "n", ".", "startswith", "(", "'xmlns'", ")", "and", "n", "[", "5", ":", "]", "in", "unsuppressedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "attr", ".", "nodeName", "in", "usedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "n", ".", "startswith", "(", "'xmlns:'", ")", ":", "unused_namespace_dict", "[", "n", "]", "=", "attr", ".", "value", "return", "inclusive", ",", "unused_namespace_dict" ]
37.965517
17.827586
def GET_AUTH(self, courseid, f=None, t=None): # pylint: disable=arguments-differ """ GET request """ course, __ = self.get_course_and_check_rights(courseid) tasks = course.get_tasks() now = datetime.now().replace(minute=0, second=0, microsecond=0) error = None if f == None and t == None: daterange = [now - timedelta(days=14), now] else: try: daterange = [datetime.strptime(x[0:16], "%Y-%m-%dT%H:%M") for x in (f,t)] except: error = "Invalid dates" daterange = [now - timedelta(days=14), now] stats_tasks = self._tasks_stats(courseid, tasks, daterange) stats_users = self._users_stats(courseid, daterange) stats_graph = self._graph_stats(courseid, daterange) return self.template_helper.get_renderer().course_admin.stats(course, stats_graph, stats_tasks, stats_users, daterange, error)
[ "def", "GET_AUTH", "(", "self", ",", "courseid", ",", "f", "=", "None", ",", "t", "=", "None", ")", ":", "# pylint: disable=arguments-differ", "course", ",", "__", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ")", "tasks", "=", "course", ".", "get_tasks", "(", ")", "now", "=", "datetime", ".", "now", "(", ")", ".", "replace", "(", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "error", "=", "None", "if", "f", "==", "None", "and", "t", "==", "None", ":", "daterange", "=", "[", "now", "-", "timedelta", "(", "days", "=", "14", ")", ",", "now", "]", "else", ":", "try", ":", "daterange", "=", "[", "datetime", ".", "strptime", "(", "x", "[", "0", ":", "16", "]", ",", "\"%Y-%m-%dT%H:%M\"", ")", "for", "x", "in", "(", "f", ",", "t", ")", "]", "except", ":", "error", "=", "\"Invalid dates\"", "daterange", "=", "[", "now", "-", "timedelta", "(", "days", "=", "14", ")", ",", "now", "]", "stats_tasks", "=", "self", ".", "_tasks_stats", "(", "courseid", ",", "tasks", ",", "daterange", ")", "stats_users", "=", "self", ".", "_users_stats", "(", "courseid", ",", "daterange", ")", "stats_graph", "=", "self", ".", "_graph_stats", "(", "courseid", ",", "daterange", ")", "return", "self", ".", "template_helper", ".", "get_renderer", "(", ")", ".", "course_admin", ".", "stats", "(", "course", ",", "stats_graph", ",", "stats_tasks", ",", "stats_users", ",", "daterange", ",", "error", ")" ]
44.857143
26.809524
def end_date(self): """账户的交易结束日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description] """ if self.start_==None: if len(self.time_index_max) > 0: return str(max(self.time_index_max))[0:10] else: print( RuntimeWarning( 'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE' ) ) else: return self.end_
[ "def", "end_date", "(", "self", ")", ":", "if", "self", ".", "start_", "==", "None", ":", "if", "len", "(", "self", ".", "time_index_max", ")", ">", "0", ":", "return", "str", "(", "max", "(", "self", ".", "time_index_max", ")", ")", "[", "0", ":", "10", "]", "else", ":", "print", "(", "RuntimeWarning", "(", "'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'", ")", ")", "else", ":", "return", "self", ".", "end_" ]
26.2
17.5
def get_music_service_information(self, search_type, search, start=0, max_items=100): """Search for music service information items. :param search_type: The type of search to perform, possible values are: 'artists', 'albums', 'tracks' and 'playlists' :type search_type: str :param search: The search string to use :type search: str :param start: The starting index of the returned items :type start: int :param max_items: The maximum number of returned items :type max_items: int Note: Un-intuitively the playlist search returns MSAlbumList items. See note in class doc string for details. """ # Check input if search_type not in ['artists', 'albums', 'tracks', 'playlists']: message = 'The requested search {} is not valid'\ .format(search_type) raise ValueError(message) # Transform search: tracks -> tracksearch search_type = '{}earch'.format(search_type) parent_id = SEARCH_PREFIX.format(search_type=search_type, search=search) # Perform search body = self._search_body(search_type, search, start, max_items) headers = _get_header('search') response = _post(self._url, headers, body, **self._http_vars) self._check_for_errors(response) result_dom = XML.fromstring(response.text.encode('utf-8')) # Parse results search_result = result_dom.find('.//' + _ns_tag('', 'searchResult')) out = {'item_list': []} for element in ['index', 'count', 'total']: out[element] = search_result.findtext(_ns_tag('', element)) if search_type == 'tracksearch': item_name = 'mediaMetadata' else: item_name = 'mediaCollection' for element in search_result.findall(_ns_tag('', item_name)): out['item_list'].append(get_ms_item(element, self, parent_id)) return out
[ "def", "get_music_service_information", "(", "self", ",", "search_type", ",", "search", ",", "start", "=", "0", ",", "max_items", "=", "100", ")", ":", "# Check input", "if", "search_type", "not", "in", "[", "'artists'", ",", "'albums'", ",", "'tracks'", ",", "'playlists'", "]", ":", "message", "=", "'The requested search {} is not valid'", ".", "format", "(", "search_type", ")", "raise", "ValueError", "(", "message", ")", "# Transform search: tracks -> tracksearch", "search_type", "=", "'{}earch'", ".", "format", "(", "search_type", ")", "parent_id", "=", "SEARCH_PREFIX", ".", "format", "(", "search_type", "=", "search_type", ",", "search", "=", "search", ")", "# Perform search", "body", "=", "self", ".", "_search_body", "(", "search_type", ",", "search", ",", "start", ",", "max_items", ")", "headers", "=", "_get_header", "(", "'search'", ")", "response", "=", "_post", "(", "self", ".", "_url", ",", "headers", ",", "body", ",", "*", "*", "self", ".", "_http_vars", ")", "self", ".", "_check_for_errors", "(", "response", ")", "result_dom", "=", "XML", ".", "fromstring", "(", "response", ".", "text", ".", "encode", "(", "'utf-8'", ")", ")", "# Parse results", "search_result", "=", "result_dom", ".", "find", "(", "'.//'", "+", "_ns_tag", "(", "''", ",", "'searchResult'", ")", ")", "out", "=", "{", "'item_list'", ":", "[", "]", "}", "for", "element", "in", "[", "'index'", ",", "'count'", ",", "'total'", "]", ":", "out", "[", "element", "]", "=", "search_result", ".", "findtext", "(", "_ns_tag", "(", "''", ",", "element", ")", ")", "if", "search_type", "==", "'tracksearch'", ":", "item_name", "=", "'mediaMetadata'", "else", ":", "item_name", "=", "'mediaCollection'", "for", "element", "in", "search_result", ".", "findall", "(", "_ns_tag", "(", "''", ",", "item_name", ")", ")", ":", "out", "[", "'item_list'", "]", ".", "append", "(", "get_ms_item", "(", "element", ",", "self", ",", "parent_id", ")", ")", "return", "out" ]
41.734694
19.959184
def exchange(self, send_data, timeout): """Exchange data with an activated target (*send_data* is a command frame) or as an activated target (*send_data* is a response frame). Returns a target response frame (if data is send to an activated target) or a next command frame (if data is send from an activated target). Returns None if the communication link broke during exchange (if data is sent as a target). The timeout is the number of seconds to wait for data to return, if the timeout expires an nfc.clf.TimeoutException is raised. Other nfc.clf.CommunicationError exceptions may be raised if an error is detected during communication. """ with self.lock: if self.device is None: raise IOError(errno.ENODEV, os.strerror(errno.ENODEV)) log.debug(">>> %s timeout=%s", print_data(send_data), str(timeout)) if isinstance(self.target, RemoteTarget): exchange = self.device.send_cmd_recv_rsp elif isinstance(self.target, LocalTarget): exchange = self.device.send_rsp_recv_cmd else: log.error("no target for data exchange") return None send_time = time.time() rcvd_data = exchange(self.target, send_data, timeout) recv_time = time.time() - send_time log.debug("<<< %s %.3fs", print_data(rcvd_data), recv_time) return rcvd_data
[ "def", "exchange", "(", "self", ",", "send_data", ",", "timeout", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "device", "is", "None", ":", "raise", "IOError", "(", "errno", ".", "ENODEV", ",", "os", ".", "strerror", "(", "errno", ".", "ENODEV", ")", ")", "log", ".", "debug", "(", "\">>> %s timeout=%s\"", ",", "print_data", "(", "send_data", ")", ",", "str", "(", "timeout", ")", ")", "if", "isinstance", "(", "self", ".", "target", ",", "RemoteTarget", ")", ":", "exchange", "=", "self", ".", "device", ".", "send_cmd_recv_rsp", "elif", "isinstance", "(", "self", ".", "target", ",", "LocalTarget", ")", ":", "exchange", "=", "self", ".", "device", ".", "send_rsp_recv_cmd", "else", ":", "log", ".", "error", "(", "\"no target for data exchange\"", ")", "return", "None", "send_time", "=", "time", ".", "time", "(", ")", "rcvd_data", "=", "exchange", "(", "self", ".", "target", ",", "send_data", ",", "timeout", ")", "recv_time", "=", "time", ".", "time", "(", ")", "-", "send_time", "log", ".", "debug", "(", "\"<<< %s %.3fs\"", ",", "print_data", "(", "rcvd_data", ")", ",", "recv_time", ")", "return", "rcvd_data" ]
45.212121
21.757576
def decrop_image(cropped_image, full_image): """ The inverse function for `ants.crop_image` ANTsR function: `decropImage` Arguments --------- cropped_image : ANTsImage cropped image full_image : ANTsImage image in which the cropped image will be put back Returns ------- ANTsImage Example ------- >>> import ants >>> fi = ants.image_read(ants.get_ants_data('r16')) >>> mask = ants.get_mask(fi) >>> cropped = ants.crop_image(fi, mask, 1) >>> cropped = ants.smooth_image(cropped, 1) >>> decropped = ants.decrop_image(cropped, fi) """ inpixeltype = 'float' if cropped_image.pixeltype != 'float': inpixeltype= cropped_image.pixeltype cropped_image = cropped_image.clone('float') if full_image.pixeltype != 'float': full_image = full_image.clone('float') libfn = utils.get_lib_fn('cropImageF%i' % cropped_image.dimension) itkimage = libfn(cropped_image.pointer, full_image.pointer, 1, 1, [], []) ants_image = iio.ANTsImage(pixeltype='float', dimension=cropped_image.dimension, components=cropped_image.components, pointer=itkimage) if inpixeltype != 'float': ants_image = ants_image.clone(inpixeltype) return ants_image
[ "def", "decrop_image", "(", "cropped_image", ",", "full_image", ")", ":", "inpixeltype", "=", "'float'", "if", "cropped_image", ".", "pixeltype", "!=", "'float'", ":", "inpixeltype", "=", "cropped_image", ".", "pixeltype", "cropped_image", "=", "cropped_image", ".", "clone", "(", "'float'", ")", "if", "full_image", ".", "pixeltype", "!=", "'float'", ":", "full_image", "=", "full_image", ".", "clone", "(", "'float'", ")", "libfn", "=", "utils", ".", "get_lib_fn", "(", "'cropImageF%i'", "%", "cropped_image", ".", "dimension", ")", "itkimage", "=", "libfn", "(", "cropped_image", ".", "pointer", ",", "full_image", ".", "pointer", ",", "1", ",", "1", ",", "[", "]", ",", "[", "]", ")", "ants_image", "=", "iio", ".", "ANTsImage", "(", "pixeltype", "=", "'float'", ",", "dimension", "=", "cropped_image", ".", "dimension", ",", "components", "=", "cropped_image", ".", "components", ",", "pointer", "=", "itkimage", ")", "if", "inpixeltype", "!=", "'float'", ":", "ants_image", "=", "ants_image", ".", "clone", "(", "inpixeltype", ")", "return", "ants_image" ]
30.190476
20.047619
def log(self, message, level=None): """Log message, optionally providing a logging level It is compatible with StreamParse API. :type message: str :param message: the log message to send :type level: str :param level: the logging level, one of: trace (=debug), debug, info, warn or error (default: info) """ if level is None: _log_level = logging.INFO else: if level == "trace" or level == "debug": _log_level = logging.DEBUG elif level == "info": _log_level = logging.INFO elif level == "warn": _log_level = logging.WARNING elif level == "error": _log_level = logging.ERROR else: raise ValueError("%s is not supported as logging level" % str(level)) self.logger.log(_log_level, message)
[ "def", "log", "(", "self", ",", "message", ",", "level", "=", "None", ")", ":", "if", "level", "is", "None", ":", "_log_level", "=", "logging", ".", "INFO", "else", ":", "if", "level", "==", "\"trace\"", "or", "level", "==", "\"debug\"", ":", "_log_level", "=", "logging", ".", "DEBUG", "elif", "level", "==", "\"info\"", ":", "_log_level", "=", "logging", ".", "INFO", "elif", "level", "==", "\"warn\"", ":", "_log_level", "=", "logging", ".", "WARNING", "elif", "level", "==", "\"error\"", ":", "_log_level", "=", "logging", ".", "ERROR", "else", ":", "raise", "ValueError", "(", "\"%s is not supported as logging level\"", "%", "str", "(", "level", ")", ")", "self", ".", "logger", ".", "log", "(", "_log_level", ",", "message", ")" ]
30.730769
15.692308
def _offset_from_spaces(dom, ran): """Return index offset corresponding to given spaces.""" affected = np.not_equal(dom.shape, ran.shape) diff_l = np.abs(ran.grid.min() - dom.grid.min()) offset_float = diff_l / dom.cell_sides offset = np.around(offset_float).astype(int) for i in range(dom.ndim): if affected[i] and not np.isclose(offset[i], offset_float[i]): raise ValueError('in axis {}: range is shifted relative to domain ' 'by a non-multiple {} of cell_sides' ''.format(i, offset_float[i] - offset[i])) offset[~affected] = 0 return tuple(offset)
[ "def", "_offset_from_spaces", "(", "dom", ",", "ran", ")", ":", "affected", "=", "np", ".", "not_equal", "(", "dom", ".", "shape", ",", "ran", ".", "shape", ")", "diff_l", "=", "np", ".", "abs", "(", "ran", ".", "grid", ".", "min", "(", ")", "-", "dom", ".", "grid", ".", "min", "(", ")", ")", "offset_float", "=", "diff_l", "/", "dom", ".", "cell_sides", "offset", "=", "np", ".", "around", "(", "offset_float", ")", ".", "astype", "(", "int", ")", "for", "i", "in", "range", "(", "dom", ".", "ndim", ")", ":", "if", "affected", "[", "i", "]", "and", "not", "np", ".", "isclose", "(", "offset", "[", "i", "]", ",", "offset_float", "[", "i", "]", ")", ":", "raise", "ValueError", "(", "'in axis {}: range is shifted relative to domain '", "'by a non-multiple {} of cell_sides'", "''", ".", "format", "(", "i", ",", "offset_float", "[", "i", "]", "-", "offset", "[", "i", "]", ")", ")", "offset", "[", "~", "affected", "]", "=", "0", "return", "tuple", "(", "offset", ")" ]
49.846154
15.692308
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter): """ Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter """ rect = QRect(0, top, self.sizeHint().width(), self.sizeHint().height()) if self._native_icons: opt = QStyleOptionViewItem() opt.rect = rect opt.state = (QStyle.State_Active | QStyle.State_Item | QStyle.State_Children) if not collapsed: opt.state |= QStyle.State_Open if mouse_over: opt.state |= (QStyle.State_MouseOver | QStyle.State_Enabled | QStyle.State_Selected) opt.palette.setBrush(QPalette.Window, self.palette().highlight()) opt.rect.translate(-2, 0) self.style().drawPrimitive(QStyle.PE_IndicatorBranch, opt, painter, self) else: index = 0 if not collapsed: index = 2 if mouse_over: index += 1 ima.icon(self._indicators_icons[index]).paint(painter, rect)
[ "def", "_draw_fold_indicator", "(", "self", ",", "top", ",", "mouse_over", ",", "collapsed", ",", "painter", ")", ":", "rect", "=", "QRect", "(", "0", ",", "top", ",", "self", ".", "sizeHint", "(", ")", ".", "width", "(", ")", ",", "self", ".", "sizeHint", "(", ")", ".", "height", "(", ")", ")", "if", "self", ".", "_native_icons", ":", "opt", "=", "QStyleOptionViewItem", "(", ")", "opt", ".", "rect", "=", "rect", "opt", ".", "state", "=", "(", "QStyle", ".", "State_Active", "|", "QStyle", ".", "State_Item", "|", "QStyle", ".", "State_Children", ")", "if", "not", "collapsed", ":", "opt", ".", "state", "|=", "QStyle", ".", "State_Open", "if", "mouse_over", ":", "opt", ".", "state", "|=", "(", "QStyle", ".", "State_MouseOver", "|", "QStyle", ".", "State_Enabled", "|", "QStyle", ".", "State_Selected", ")", "opt", ".", "palette", ".", "setBrush", "(", "QPalette", ".", "Window", ",", "self", ".", "palette", "(", ")", ".", "highlight", "(", ")", ")", "opt", ".", "rect", ".", "translate", "(", "-", "2", ",", "0", ")", "self", ".", "style", "(", ")", ".", "drawPrimitive", "(", "QStyle", ".", "PE_IndicatorBranch", ",", "opt", ",", "painter", ",", "self", ")", "else", ":", "index", "=", "0", "if", "not", "collapsed", ":", "index", "=", "2", "if", "mouse_over", ":", "index", "+=", "1", "ima", ".", "icon", "(", "self", ".", "_indicators_icons", "[", "index", "]", ")", ".", "paint", "(", "painter", ",", "rect", ")" ]
39.5
14.833333
def filter_report(self, filt=None, analytes=None, savedir=None, nbin=5): """ Visualise effect of data filters. Parameters ---------- filt : str Exact or partial name of filter to plot. Supports partial matching. i.e. if 'cluster' is specified, all filters with 'cluster' in the name will be plotted. Defaults to all filters. analyte : str Name of analyte to plot. save : str file path to save the plot Returns ------- (fig, axes) """ return plot.filter_report(self, filt, analytes, savedir, nbin)
[ "def", "filter_report", "(", "self", ",", "filt", "=", "None", ",", "analytes", "=", "None", ",", "savedir", "=", "None", ",", "nbin", "=", "5", ")", ":", "return", "plot", ".", "filter_report", "(", "self", ",", "filt", ",", "analytes", ",", "savedir", ",", "nbin", ")" ]
30.761905
19.047619
def which_bin(exes): ''' Decorator wrapper for salt.utils.path.which_bin ''' def wrapper(function): def wrapped(*args, **kwargs): if salt.utils.path.which_bin(exes) is None: raise CommandNotFoundError( 'None of provided binaries({0}) was not found ' 'in $PATH.'.format( ['\'{0}\''.format(exe) for exe in exes] ) ) return function(*args, **kwargs) return identical_signature_wrapper(function, wrapped) return wrapper
[ "def", "which_bin", "(", "exes", ")", ":", "def", "wrapper", "(", "function", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "salt", ".", "utils", ".", "path", ".", "which_bin", "(", "exes", ")", "is", "None", ":", "raise", "CommandNotFoundError", "(", "'None of provided binaries({0}) was not found '", "'in $PATH.'", ".", "format", "(", "[", "'\\'{0}\\''", ".", "format", "(", "exe", ")", "for", "exe", "in", "exes", "]", ")", ")", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "identical_signature_wrapper", "(", "function", ",", "wrapped", ")", "return", "wrapper" ]
36
17
def parse_hashes(self): # type: () -> None """ Parse hashes from *self.line* and set them on the current object. :returns: Nothing :rtype: None """ line, hashes = self.split_hashes(self.line) self.hashes = hashes self.line = line
[ "def", "parse_hashes", "(", "self", ")", ":", "# type: () -> None", "line", ",", "hashes", "=", "self", ".", "split_hashes", "(", "self", ".", "line", ")", "self", ".", "hashes", "=", "hashes", "self", ".", "line", "=", "line" ]
26.545455
16.181818
def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None): """ Returns a new datetime.datetime or asn1crypto.util.extended_datetime object with the specified components replaced :return: A datetime.datetime or asn1crypto.util.extended_datetime object """ if year is None: year = self.year if month is None: month = self.month if day is None: day = self.day if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is None: tzinfo = self.tzinfo if year > 0: cls = datetime else: cls = extended_datetime return cls( year, month, day, hour, minute, second, microsecond, tzinfo )
[ "def", "replace", "(", "self", ",", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hour", "=", "None", ",", "minute", "=", "None", ",", "second", "=", "None", ",", "microsecond", "=", "None", ",", "tzinfo", "=", "None", ")", ":", "if", "year", "is", "None", ":", "year", "=", "self", ".", "year", "if", "month", "is", "None", ":", "month", "=", "self", ".", "month", "if", "day", "is", "None", ":", "day", "=", "self", ".", "day", "if", "hour", "is", "None", ":", "hour", "=", "self", ".", "hour", "if", "minute", "is", "None", ":", "minute", "=", "self", ".", "minute", "if", "second", "is", "None", ":", "second", "=", "self", ".", "second", "if", "microsecond", "is", "None", ":", "microsecond", "=", "self", ".", "microsecond", "if", "tzinfo", "is", "None", ":", "tzinfo", "=", "self", ".", "tzinfo", "if", "year", ">", "0", ":", "cls", "=", "datetime", "else", ":", "cls", "=", "extended_datetime", "return", "cls", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ",", "microsecond", ",", "tzinfo", ")" ]
26.214286
19.071429
def decode(vol, filename, content): """Decode content according to settings in a cloudvolume instance.""" bbox = Bbox.from_filename(filename) content_len = len(content) if content is not None else 0 if not content: if vol.fill_missing: content = '' else: raise EmptyVolumeException(filename) shape = list(bbox.size3()) + [ vol.num_channels ] try: return chunks.decode( content, encoding=vol.encoding, shape=shape, dtype=vol.dtype, block_size=vol.compressed_segmentation_block_size, ) except Exception as error: print(red('File Read Error: {} bytes, {}, {}, errors: {}'.format( content_len, bbox, filename, error))) raise
[ "def", "decode", "(", "vol", ",", "filename", ",", "content", ")", ":", "bbox", "=", "Bbox", ".", "from_filename", "(", "filename", ")", "content_len", "=", "len", "(", "content", ")", "if", "content", "is", "not", "None", "else", "0", "if", "not", "content", ":", "if", "vol", ".", "fill_missing", ":", "content", "=", "''", "else", ":", "raise", "EmptyVolumeException", "(", "filename", ")", "shape", "=", "list", "(", "bbox", ".", "size3", "(", ")", ")", "+", "[", "vol", ".", "num_channels", "]", "try", ":", "return", "chunks", ".", "decode", "(", "content", ",", "encoding", "=", "vol", ".", "encoding", ",", "shape", "=", "shape", ",", "dtype", "=", "vol", ".", "dtype", ",", "block_size", "=", "vol", ".", "compressed_segmentation_block_size", ",", ")", "except", "Exception", "as", "error", ":", "print", "(", "red", "(", "'File Read Error: {} bytes, {}, {}, errors: {}'", ".", "format", "(", "content_len", ",", "bbox", ",", "filename", ",", "error", ")", ")", ")", "raise" ]
27.64
20.08