text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def from_missing_values(cls, is_leap_year=False): """Initalize an EPW object with all data missing or empty. Note that this classmethod is intended for workflows where one plans to set all of the data within the EPW object. The EPW file written out from the use of this method is not simulate-abe or useful since all hourly data slots just possess the missing value for that data type. To obtain a EPW that is simulate-able in EnergyPlus, one must at least set the following properties: location dry_bulb_temperature dew_point_temperature relative_humidity atmospheric_station_pressure direct_normal_radiation diffuse_horizontal_radiation wind_direction wind_speed total_sky_cover opaque_sky_cover or horizontal_infrared_radiation_intensity Args: is_leap_year: A boolean to set whether the EPW object is for a leap year. Usage: from ladybug.epw import EPW from ladybug.location import Location epw = EPW.from_missing_values() epw.location = Location('Denver Golden','CO','USA',39.74,-105.18,-7.0,1829.0) epw.dry_bulb_temperature.values = [20] * 8760 """ # Initialize the class with all data missing epw_obj = cls(None) epw_obj._is_leap_year = is_leap_year epw_obj._location = Location() # create an annual analysis period analysis_period = AnalysisPeriod(is_leap_year=is_leap_year) # create headers and an empty list for each field in epw file headers = [] for field_number in xrange(epw_obj._num_of_fields): field = EPWFields.field_by_number(field_number) header = Header(data_type=field.name, unit=field.unit, analysis_period=analysis_period) headers.append(header) epw_obj._data.append([]) # fill in missing datetime values and uncertainty flags. uncertainty = '?9?9?9?9E0?9?9?9?9?9?9?9?9?9?9?9?9?9?9?9*9*9?9?9?9' for dt in analysis_period.datetimes: hr = dt.hour if dt.hour != 0 else 24 epw_obj._data[0].append(dt.year) epw_obj._data[1].append(dt.month) epw_obj._data[2].append(dt.day) epw_obj._data[3].append(hr) epw_obj._data[4].append(0) epw_obj._data[5].append(uncertainty) # generate missing hourly data calc_length = len(analysis_period.datetimes) for field_number in xrange(6, epw_obj._num_of_fields): field = EPWFields.field_by_number(field_number) mis_val = field.missing if field.missing is not None else 0 for dt in xrange(calc_length): epw_obj._data[field_number].append(mis_val) # finally, build the data collection objects from the headers and data for i in xrange(epw_obj._num_of_fields): epw_obj._data[i] = HourlyContinuousCollection(headers[i], epw_obj._data[i]) epw_obj._is_header_loaded = True epw_obj._is_data_loaded = True return epw_obj
[ "def", "from_missing_values", "(", "cls", ",", "is_leap_year", "=", "False", ")", ":", "# Initialize the class with all data missing", "epw_obj", "=", "cls", "(", "None", ")", "epw_obj", ".", "_is_leap_year", "=", "is_leap_year", "epw_obj", ".", "_location", "=", ...
42.797297
18.513514
def loadTFRecords(sc, input_dir, binary_features=[]): """Load TFRecords from disk into a Spark DataFrame. This will attempt to automatically convert the tf.train.Example features into Spark DataFrame columns of equivalent types. Note: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to disambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a "hint" from the caller in the ``binary_features`` argument. Args: :sc: SparkContext :input_dir: location of TFRecords on disk. :binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays. Returns: A Spark DataFrame mirroring the tf.train.Example schema. """ import tensorflow as tf tfr_rdd = sc.newAPIHadoopFile(input_dir, "org.tensorflow.hadoop.io.TFRecordFileInputFormat", keyClass="org.apache.hadoop.io.BytesWritable", valueClass="org.apache.hadoop.io.NullWritable") # infer Spark SQL types from tf.Example record = tfr_rdd.take(1)[0] example = tf.train.Example() example.ParseFromString(bytes(record[0])) schema = infer_schema(example, binary_features) # convert serialized protobuf to tf.Example to Row example_rdd = tfr_rdd.mapPartitions(lambda x: fromTFExample(x, binary_features)) # create a Spark DataFrame from RDD[Row] df = example_rdd.toDF(schema) # save reference of this dataframe loadedDF[df] = input_dir return df
[ "def", "loadTFRecords", "(", "sc", ",", "input_dir", ",", "binary_features", "=", "[", "]", ")", ":", "import", "tensorflow", "as", "tf", "tfr_rdd", "=", "sc", ".", "newAPIHadoopFile", "(", "input_dir", ",", "\"org.tensorflow.hadoop.io.TFRecordFileInputFormat\"", ...
39.078947
27.763158
def dump(self,indent='',depth=0): """Diagnostic method for listing out the contents of a C{ParseResults}. Accepts an optional C{indent} argument so that this string can be embedded in a nested display of other data.""" out = [] out.append( indent+_ustr(self.asList()) ) keys = self.items() keys.sort() for k,v in keys: if out: out.append('\n') out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) if isinstance(v,ParseResults): if v.keys(): out.append( v.dump(indent,depth+1) ) else: out.append(_ustr(v)) else: out.append(_ustr(v)) return "".join(out)
[ "def", "dump", "(", "self", ",", "indent", "=", "''", ",", "depth", "=", "0", ")", ":", "out", "=", "[", "]", "out", ".", "append", "(", "indent", "+", "_ustr", "(", "self", ".", "asList", "(", ")", ")", ")", "keys", "=", "self", ".", "items"...
39.1
13.65
def rotation_filename(self, default_name: str) -> str: """ Modify the filename of a log file when rotating. This is provided so that a custom filename can be provided. :param default_name: The default name for the log file. """ if self.namer is None: return default_name return self.namer(default_name)
[ "def", "rotation_filename", "(", "self", ",", "default_name", ":", "str", ")", "->", "str", ":", "if", "self", ".", "namer", "is", "None", ":", "return", "default_name", "return", "self", ".", "namer", "(", "default_name", ")" ]
30.166667
18.333333
def get_all(self, fields=list(), limit=None, order_by=list(), offset=None): """DEPRECATED - see get_multiple()""" warnings.warn("get_all() is deprecated, please use get_multiple() instead", DeprecationWarning) return self.get_multiple(fields, limit, order_by, offset)
[ "def", "get_all", "(", "self", ",", "fields", "=", "list", "(", ")", ",", "limit", "=", "None", ",", "order_by", "=", "list", "(", ")", ",", "offset", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"get_all() is deprecated, please use get_multiple(...
72
30.75
def findAll(self, strSeq) : """Same as find but returns a list of all occurences""" arr = self.encode(strSeq) lst = [] lst = self._kmp_find(arr[0], self, lst) return lst
[ "def", "findAll", "(", "self", ",", "strSeq", ")", ":", "arr", "=", "self", ".", "encode", "(", "strSeq", ")", "lst", "=", "[", "]", "lst", "=", "self", ".", "_kmp_find", "(", "arr", "[", "0", "]", ",", "self", ",", "lst", ")", "return", "lst" ...
31.333333
16.5
def _comparator_eq(filter_value, tested_value): """ Tests if the filter value is equal to the tested value """ if isinstance(tested_value, ITERABLES): # Convert the list items to strings for value in tested_value: # Try with the string conversion if not is_string(value): value = repr(value) if filter_value == value: # Match ! return True # Standard comparison elif not is_string(tested_value): # String vs string representation return filter_value == repr(tested_value) else: # String vs string return filter_value == tested_value return False
[ "def", "_comparator_eq", "(", "filter_value", ",", "tested_value", ")", ":", "if", "isinstance", "(", "tested_value", ",", "ITERABLES", ")", ":", "# Convert the list items to strings", "for", "value", "in", "tested_value", ":", "# Try with the string conversion", "if", ...
29.956522
11.347826
def get_date(date): """ Get the date from a value that could be a date object or a string. :param date: The date object or string. :returns: The date object. """ if type(date) is str: return datetime.strptime(date, '%Y-%m-%d').date() else: return date
[ "def", "get_date", "(", "date", ")", ":", "if", "type", "(", "date", ")", "is", "str", ":", "return", "datetime", ".", "strptime", "(", "date", ",", "'%Y-%m-%d'", ")", ".", "date", "(", ")", "else", ":", "return", "date" ]
23.833333
19
def _install_signal_handlers(self): """ Installs signal handlers for handling SIGINT and SIGTERM gracefully. """ def stop(signum, frame): """ Register scheduler's death and exit and remove previously acquired lock and exit. """ self.log.info('Shutting down RQ scheduler...') self.register_death() self.remove_lock() raise SystemExit() signal.signal(signal.SIGINT, stop) signal.signal(signal.SIGTERM, stop)
[ "def", "_install_signal_handlers", "(", "self", ")", ":", "def", "stop", "(", "signum", ",", "frame", ")", ":", "\"\"\"\n Register scheduler's death and exit\n and remove previously acquired lock and exit.\n \"\"\"", "self", ".", "log", ".", "in...
30.111111
11.777778
def is_inside_bounds(value, params): """Return ``True`` if ``value`` is contained in ``params``. This method supports broadcasting in the sense that for ``params.ndim >= 2``, if more than one value is given, the inputs are broadcast against each other. Parameters ---------- value : `array-like` Value(s) to be checked. For several inputs, the final bool tells whether all inputs pass the check or not. params : `IntervalProd` Set in which the value is / the values are supposed to lie. Returns ------- is_inside_bounds : bool ``True`` is all values lie in ``params``, ``False`` otherwise. Examples -------- Check a single point: >>> params = odl.IntervalProd([0, 0], [1, 2]) >>> is_inside_bounds([0, 0], params) True >>> is_inside_bounds([0, -1], params) False Using broadcasting: >>> pts_ax0 = np.array([0, 0, 1, 0, 1])[:, None] >>> pts_ax1 = np.array([2, 0, 1])[None, :] >>> is_inside_bounds([pts_ax0, pts_ax1], params) True >>> pts_ax1 = np.array([-2, 1])[None, :] >>> is_inside_bounds([pts_ax0, pts_ax1], params) False """ if value in params: # Single parameter return True else: if params.ndim == 1: return params.contains_all(np.ravel(value)) else: # Flesh out and flatten to check bounds bcast_value = np.broadcast_arrays(*value) stacked_value = np.vstack(bcast_value) flat_value = stacked_value.reshape(params.ndim, -1) return params.contains_all(flat_value)
[ "def", "is_inside_bounds", "(", "value", ",", "params", ")", ":", "if", "value", "in", "params", ":", "# Single parameter", "return", "True", "else", ":", "if", "params", ".", "ndim", "==", "1", ":", "return", "params", ".", "contains_all", "(", "np", "....
30.365385
20.365385
def shift(self, time: int) -> 'Interval': """Return a new interval shifted by `time` from self Args: time: time to be shifted Returns: Interval: interval shifted by `time` """ return Interval(self._begin + time, self._end + time)
[ "def", "shift", "(", "self", ",", "time", ":", "int", ")", "->", "'Interval'", ":", "return", "Interval", "(", "self", ".", "_begin", "+", "time", ",", "self", ".", "_end", "+", "time", ")" ]
28.6
16.5
def scons_subst_once(strSubst, env, key): """Perform single (non-recursive) substitution of a single construction variable keyword. This is used when setting a variable when copying or overriding values in an Environment. We want to capture (expand) the old value before we override it, so people can do things like: env2 = env.Clone(CCFLAGS = '$CCFLAGS -g') We do this with some straightforward, brute-force code here... """ if isinstance(strSubst, str) and strSubst.find('$') < 0: return strSubst matchlist = ['$' + key, '${' + key + '}'] val = env.get(key, '') def sub_match(match, val=val, matchlist=matchlist): a = match.group(1) if a in matchlist: a = val if is_Sequence(a): return ' '.join(map(str, a)) else: return str(a) if is_Sequence(strSubst): result = [] for arg in strSubst: if is_String(arg): if arg in matchlist: arg = val if is_Sequence(arg): result.extend(arg) else: result.append(arg) else: result.append(_dollar_exps.sub(sub_match, arg)) else: result.append(arg) return result elif is_String(strSubst): return _dollar_exps.sub(sub_match, strSubst) else: return strSubst
[ "def", "scons_subst_once", "(", "strSubst", ",", "env", ",", "key", ")", ":", "if", "isinstance", "(", "strSubst", ",", "str", ")", "and", "strSubst", ".", "find", "(", "'$'", ")", "<", "0", ":", "return", "strSubst", "matchlist", "=", "[", "'$'", "+...
31.8
16.577778
def insert_into_channel(api_key, api_secret, channel_key, video_key, **kwargs): """ Function which inserts video into a channel/playlist. :param api_key: <string> JWPlatform api-key :param api_secret: <string> JWPlatform shared-secret :param channel_key: <string> Key of the channel to which add a video. :param video_key: <string> Key of the video that should be added to the channel. :param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/create.html :return: <dict> Dict which represents the JSON response. """ jwplatform_client = jwplatform.Client(api_key, api_secret) logging.info("Inserting video into channel") try: response = jwplatform_client.channels.videos.create( channel_key=channel_key, video_key=video_key, **kwargs) except jwplatform.errors.JWPlatformError as e: logging.error("Encountered an error inserting {} into channel {}.\n{}".format(video_key, channel_key, e)) sys.exit(e.message) return response
[ "def", "insert_into_channel", "(", "api_key", ",", "api_secret", ",", "channel_key", ",", "video_key", ",", "*", "*", "kwargs", ")", ":", "jwplatform_client", "=", "jwplatform", ".", "Client", "(", "api_key", ",", "api_secret", ")", "logging", ".", "info", "...
49.5
24.954545
def from_user_config(cls): """ Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'. Search first in the working directory and then in the AbiPy configuration directory. Raises: RuntimeError if file is not found. """ global _USER_CONFIG_TASKMANAGER if _USER_CONFIG_TASKMANAGER is not None: return _USER_CONFIG_TASKMANAGER # Try in the current directory then in user configuration directory. path = os.path.join(os.getcwd(), cls.YAML_FILE) if not os.path.exists(path): path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE) if not os.path.exists(path): raise RuntimeError(colored( "\nCannot locate %s neither in current directory nor in %s\n" "!!! PLEASE READ THIS: !!!\n" "To use AbiPy to run jobs this file must be present\n" "It provides a description of the cluster/computer you are running on\n" "Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red")) _USER_CONFIG_TASKMANAGER = cls.from_file(path) return _USER_CONFIG_TASKMANAGER
[ "def", "from_user_config", "(", "cls", ")", ":", "global", "_USER_CONFIG_TASKMANAGER", "if", "_USER_CONFIG_TASKMANAGER", "is", "not", "None", ":", "return", "_USER_CONFIG_TASKMANAGER", "# Try in the current directory then in user configuration directory.", "path", "=", "os", ...
43.666667
21.37037
def resource_op_defaults_to(name, op_default, value, extra_args=None, cibname=None): ''' Ensure a resource operation default in the cluster is set to a given value Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: pcs_properties__resource_op_defaults_to_{{op_default}}) op_default name of the operation default resource property value value of the operation default resource property extra_args additional options for the pcs command cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml pcs_properties__resource_op_defaults_to_monitor-interval: pcs.resource_op_defaults_to: - op_default: monitor-interval - value: 60s - cibname: cib_for_cluster_settings ''' return _item_present(name=name, item='resource', item_id='{0}={1}'.format(op_default, value), item_type=None, show=['op', 'defaults'], create=['op', 'defaults'], extra_args=extra_args, cibname=cibname)
[ "def", "resource_op_defaults_to", "(", "name", ",", "op_default", ",", "value", ",", "extra_args", "=", "None", ",", "cibname", "=", "None", ")", ":", "return", "_item_present", "(", "name", "=", "name", ",", "item", "=", "'resource'", ",", "item_id", "=",...
35.945946
22.27027
def metrics(self, *metrics): """ Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list """ for m in metrics: self._cauldron.use(self._shelf.find(m, Metric)) self.dirty = True return self
[ "def", "metrics", "(", "self", ",", "*", "metrics", ")", ":", "for", "m", "in", "metrics", ":", "self", ".", "_cauldron", ".", "use", "(", "self", ".", "_shelf", ".", "find", "(", "m", ",", "Metric", ")", ")", "self", ".", "dirty", "=", "True", ...
40.25
19.3125
def _getThread(self, given_thread_id=None, given_thread_type=None): """ Checks if thread ID is given, checks if default is set and returns correct values :raises ValueError: If thread ID is not given and there is no default :return: Thread ID and thread type :rtype: tuple """ if given_thread_id is None: if self._default_thread_id is not None: return self._default_thread_id, self._default_thread_type else: raise ValueError("Thread ID is not set") else: return given_thread_id, given_thread_type
[ "def", "_getThread", "(", "self", ",", "given_thread_id", "=", "None", ",", "given_thread_type", "=", "None", ")", ":", "if", "given_thread_id", "is", "None", ":", "if", "self", ".", "_default_thread_id", "is", "not", "None", ":", "return", "self", ".", "_...
41.066667
20.133333
def normaliseURL(url): """Normalising - strips and leading or trailing whitespace, - replaces HTML entities and character references, - removes any leading empty segments to avoid breaking urllib2. """ url = unicode_safe(url).strip() # XXX: brutal hack url = unescape(url) pu = list(urlparse(url)) segments = pu[2].split('/') while segments and segments[0] in ('', '..'): del segments[0] pu[2] = '/' + '/'.join(segments) # remove leading '&' from query if pu[4].startswith('&'): pu[4] = pu[4][1:] # remove anchor pu[5] = "" return urlunparse(pu)
[ "def", "normaliseURL", "(", "url", ")", ":", "url", "=", "unicode_safe", "(", "url", ")", ".", "strip", "(", ")", "# XXX: brutal hack", "url", "=", "unescape", "(", "url", ")", "pu", "=", "list", "(", "urlparse", "(", "url", ")", ")", "segments", "="...
29.047619
13.857143
def clone_git_repo(repo_url): """ input: repo_url output: path of the cloned repository steps: 1. clone the repo 2. parse 'site' into for templating assumptions: repo_url = "git@github.com:littleq0903/django-deployer-template-openshift-experiment.git" repo_local_location = "/tmp/djangodeployer-cache-xxxx" # xxxx here will be some short uuid for identify different downloads """ REPO_PREFIX = "djangodeployer-cache-" REPO_POSTFIX_UUID = str(uuid.uuid4()).split('-')[-1] REPO_CACHE_NAME = REPO_PREFIX + REPO_POSTFIX_UUID REPO_CACHE_LOCATION = '/tmp/%s' % REPO_CACHE_NAME repo = git.Repo.clone_from(repo_url, REPO_CACHE_LOCATION) return REPO_CACHE_LOCATION
[ "def", "clone_git_repo", "(", "repo_url", ")", ":", "REPO_PREFIX", "=", "\"djangodeployer-cache-\"", "REPO_POSTFIX_UUID", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "REPO_CACHE_NAME", "=", "REPO...
37.842105
21.421053
def fetch_logs(self, max_rows=1024, orientation=None): """Mocked. Retrieve the logs produced by the execution of the query. Can be called multiple times to fetch the logs produced after the previous call. :returns: list<str> :raises: ``ProgrammingError`` when no query has been started .. note:: This is not a part of DB-API. """ from pyhive import hive from TCLIService import ttypes from thrift import Thrift orientation = orientation or ttypes.TFetchOrientation.FETCH_NEXT try: req = ttypes.TGetLogReq(operationHandle=self._operationHandle) logs = self._connection.client.GetLog(req).log return logs # raised if Hive is used except (ttypes.TApplicationException, Thrift.TApplicationException): if self._state == self._STATE_NONE: raise hive.ProgrammingError('No query yet') logs = [] while True: req = ttypes.TFetchResultsReq( operationHandle=self._operationHandle, orientation=ttypes.TFetchOrientation.FETCH_NEXT, maxRows=self.arraysize, fetchType=1, # 0: results, 1: logs ) response = self._connection.client.FetchResults(req) hive._check_status(response) assert not response.results.rows, \ 'expected data in columnar format' assert len(response.results.columns) == 1, response.results.columns new_logs = hive._unwrap_column(response.results.columns[0]) logs += new_logs if not new_logs: break return '\n'.join(logs)
[ "def", "fetch_logs", "(", "self", ",", "max_rows", "=", "1024", ",", "orientation", "=", "None", ")", ":", "from", "pyhive", "import", "hive", "from", "TCLIService", "import", "ttypes", "from", "thrift", "import", "Thrift", "orientation", "=", "orientation", ...
40.146341
14.658537
def highlightBlock(self, text): """ Highlights a block of text. Please do not override, this method. Instead you should implement :func:`spyder.utils.syntaxhighplighters.SyntaxHighlighter.highlight_block`. :param text: text to highlight. """ self.highlight_block(text) # Process blocks for fold detection current_block = self.currentBlock() previous_block = self._find_prev_non_blank_block(current_block) if self.editor: if self.fold_detector is not None: self.fold_detector._editor = weakref.ref(self.editor) self.fold_detector.process_block( current_block, previous_block, text)
[ "def", "highlightBlock", "(", "self", ",", "text", ")", ":", "self", ".", "highlight_block", "(", "text", ")", "# Process blocks for fold detection\r", "current_block", "=", "self", ".", "currentBlock", "(", ")", "previous_block", "=", "self", ".", "_find_prev_non...
40.777778
16.111111
def subtract(df, new_column, column_1, column_2): """ DEPRECATED - use `formula` instead """ return _basic_math_operation(df, new_column, column_1, column_2, op='sub')
[ "def", "subtract", "(", "df", ",", "new_column", ",", "column_1", ",", "column_2", ")", ":", "return", "_basic_math_operation", "(", "df", ",", "new_column", ",", "column_1", ",", "column_2", ",", "op", "=", "'sub'", ")" ]
36
9.6
def _write_plan(self, stream): """Write the plan line to the stream. If we have a plan and have not yet written it out, write it to the given stream. """ if self.plan is not None: if not self._plan_written: print("1..{0}".format(self.plan), file=stream) self._plan_written = True
[ "def", "_write_plan", "(", "self", ",", "stream", ")", ":", "if", "self", ".", "plan", "is", "not", "None", ":", "if", "not", "self", ".", "_plan_written", ":", "print", "(", "\"1..{0}\"", ".", "format", "(", "self", ".", "plan", ")", ",", "file", ...
35.1
12.9
def highlight_occurences(editor): """ Highlights given editor current line. :param editor: Document editor. :type editor: QWidget :return: Method success. :rtype: bool """ format = editor.language.theme.get("accelerator.occurence") if not format: return False extra_selections = editor.extraSelections() or [] if not editor.isReadOnly(): word = editor.get_word_under_cursor() if not word: return False block = editor.document().findBlock(0) cursor = editor.document().find(word, block.position(), QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords) while block.isValid() and cursor.position() != -1: selection = QTextEdit.ExtraSelection() selection.format.setBackground(format.background()) selection.cursor = cursor extra_selections.append(selection) cursor = editor.document().find(word, cursor.position(), QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords) block = block.next() editor.setExtraSelections(extra_selections) return True
[ "def", "highlight_occurences", "(", "editor", ")", ":", "format", "=", "editor", ".", "language", ".", "theme", ".", "get", "(", "\"accelerator.occurence\"", ")", "if", "not", "format", ":", "return", "False", "extra_selections", "=", "editor", ".", "extraSele...
36.771429
18.428571
def get_shape(bin_edges, sid): """ :returns: the shape of the disaggregation matrix for the given site, of form (#mags-1, #dists-1, #lons-1, #lats-1, #eps-1) """ mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges return (len(mag_bins) - 1, len(dist_bins) - 1, len(lon_bins[sid]) - 1, len(lat_bins[sid]) - 1, len(eps_bins) - 1)
[ "def", "get_shape", "(", "bin_edges", ",", "sid", ")", ":", "mag_bins", ",", "dist_bins", ",", "lon_bins", ",", "lat_bins", ",", "eps_bins", "=", "bin_edges", "return", "(", "len", "(", "mag_bins", ")", "-", "1", ",", "len", "(", "dist_bins", ")", "-",...
41.888889
17.444444
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata, comment_wrap=False, site=self.site, compiler=self)) fd.write(content)
[ "def", "create_post", "(", "self", ",", "path", ",", "*", "*", "kw", ")", ":", "content", "=", "kw", ".", "pop", "(", "'content'", ",", "None", ")", "onefile", "=", "kw", ".", "pop", "(", "'onefile'", ",", "False", ")", "# is_page is not used by create...
40.1875
11.9375
def view_meta_admonition(admonition_name, name=None): """List all found admonition from all the rst files found in directory. view_meta_admonition is called by the 'meta' url: /__XXXXXXX__ where XXXXXXX represents and admonition name, like: * todo * warning * danger * ... .. note:: this function may works for any docutils node, not only admonition Keyword Arguments: :admonition_name: (str) -- name of the admonition """ print("meta admo: %s - %s" % (admonition_name, name)) admonition = None if admonition_name == 'todo': admonition = todo elif admonition_name == 'done': admonition = done elif hasattr(nodes, admonition_name): admonition = getattr(nodes, admonition_name) else: return abort(404) doc2_content = "" doc2_output, doc2_pub = docutils.core.publish_programmatically( source_class=io.StringInput, source=doc2_content, source_path=None, destination_class=io.StringOutput, destination=None, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=AttowikiWriter(), writer_name=None, settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False) section1 = nodes.section("{0}_list_file".format(admonition_name)) doc2_pub.reader.document.append(section1) title1 = nodes.title("{0} LIST".format(admonition_name.upper()), "{0} LIST".format(admonition_name.upper())) doc2_pub.reader.document.append(title1) if name is None: rst_files = [filename[2:-4] for filename in sorted( glob.glob("./*.rst"))] rst_files.reverse() else: rst_files = [filename[2:-4] for filename in sorted(glob.glob("./{0}.rst".format(name)))] for file in rst_files: file_title = False file_handle = open(file + '.rst', 'r') file_content = file_handle.read() file_handle.close() file_content = file_content.decode('utf-8') output, pub = docutils.core.publish_programmatically( source_class=io.StringInput, source=file_content, source_path=None, destination_class=io.StringOutput, destination=None, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='html', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False) my_settings = pub.get_settings() parser = docutils.parsers.rst.Parser() document = docutils.utils.new_document('test', my_settings) parser.parse(file_content, document) for node in document.traverse(admonition): if not file_title: file_title = True # new section section2 = nodes.section(file) doc2_pub.reader.document.append(section2) # add link to the originating file paragraph = nodes.paragraph() file_target = nodes.target(ids=[file], names=[file], refuri="/" + file) file_ref = nodes.reference(file, file, name=file, refuri="/" + file) paragraph.append(nodes.Text("in ")) paragraph.append(file_ref) paragraph.append(file_target) paragraph.append(nodes.Text(":")) doc2_pub.reader.document.append(paragraph) # doc2_pub.reader.document.append(file_target) doc2_pub.reader.document.append(node) doc2_pub.apply_transforms() doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination) doc2_pub.writer.assemble_parts() if name is None: display_file_name = '__{0}__'.format(admonition_name) extended_name = None else: display_file_name = '{0}'.format(name) extended_name = '__{0}__'.format(admonition_name) return template('page', type="view", name=display_file_name, extended_name=extended_name, is_repo=check_repo(), history=[], gitref=None, content=doc2_pub.writer.parts['html_body'])
[ "def", "view_meta_admonition", "(", "admonition_name", ",", "name", "=", "None", ")", ":", "print", "(", "\"meta admo: %s - %s\"", "%", "(", "admonition_name", ",", "name", ")", ")", "admonition", "=", "None", "if", "admonition_name", "==", "'todo'", ":", "adm...
37.459016
16.065574
def dissect(self, data): """ Dissect the field. :param bytes data: The data to extract the field value from :return: The rest of the data not used to dissect the field value :rtype: bytes """ size = struct.calcsize("B") if len(data) < size: raise NotEnoughData( "Not enough data to decode field '%s' value" % self.name ) curve_type = struct.unpack("B", data[:size])[0] if curve_type == 0x03: self._value = ECParametersNamedCurveField("none") data = self._value.dissect(data) else: raise NotImplementedError( "Decoding of KeyExchange message for curve 0x%.2X not implemented" % curve_type ) return data
[ "def", "dissect", "(", "self", ",", "data", ")", ":", "size", "=", "struct", ".", "calcsize", "(", "\"B\"", ")", "if", "len", "(", "data", ")", "<", "size", ":", "raise", "NotEnoughData", "(", "\"Not enough data to decode field '%s' value\"", "%", "self", ...
32.541667
20.625
def filter(self, search): """ Add a ``post_filter`` to the search request narrowing the results based on the facet filters. """ if not self._filters: return search post_filter = MatchAll() for f in itervalues(self._filters): post_filter &= f return search.post_filter(post_filter)
[ "def", "filter", "(", "self", ",", "search", ")", ":", "if", "not", "self", ".", "_filters", ":", "return", "search", "post_filter", "=", "MatchAll", "(", ")", "for", "f", "in", "itervalues", "(", "self", ".", "_filters", ")", ":", "post_filter", "&=",...
29.833333
13.333333
def block_jids(self, jids_to_block): """ Add the JIDs in the sequence `jids_to_block` to the client's blocklist. """ yield from self._check_for_blocking() if not jids_to_block: return cmd = blocking_xso.BlockCommand(jids_to_block) iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=cmd, ) yield from self.client.send(iq)
[ "def", "block_jids", "(", "self", ",", "jids_to_block", ")", ":", "yield", "from", "self", ".", "_check_for_blocking", "(", ")", "if", "not", "jids_to_block", ":", "return", "cmd", "=", "blocking_xso", ".", "BlockCommand", "(", "jids_to_block", ")", "iq", "=...
26.4375
15.8125
def filter_significance(diff, significance): """ Prune any changes in the patch which are due to numeric changes less than this level of significance. """ changed = diff['changed'] # remove individual field changes that are significant reduced = [{'key': delta['key'], 'fields': {k: v for k, v in delta['fields'].items() if _is_significant(v, significance)}} for delta in changed] # call a key changed only if it still has significant changes filtered = [delta for delta in reduced if delta['fields']] diff = diff.copy() diff['changed'] = filtered return diff
[ "def", "filter_significance", "(", "diff", ",", "significance", ")", ":", "changed", "=", "diff", "[", "'changed'", "]", "# remove individual field changes that are significant", "reduced", "=", "[", "{", "'key'", ":", "delta", "[", "'key'", "]", ",", "'fields'", ...
33.8
19.5
def get_markov_blanket(self, node): """ Returns a markov blanket for a random variable. In the case of Bayesian Networks, the markov blanket is the set of node's parents, its children and its children's other parents. Returns ------- list(blanket_nodes): List of nodes contained in Markov Blanket Parameters ---------- node: string, int or any hashable python object. The node whose markov blanket would be returned. Examples -------- >>> from pgmpy.base import DAG >>> from pgmpy.factors.discrete import TabularCPD >>> G = DAG([('x', 'y'), ('z', 'y'), ('y', 'w'), ('y', 'v'), ('u', 'w'), ('s', 'v'), ('w', 't'), ('w', 'm'), ('v', 'n'), ('v', 'q')]) >>> G.get_markov_blanket('y') ['s', 'w', 'x', 'u', 'z', 'v'] """ children = self.get_children(node) parents = self.get_parents(node) blanket_nodes = children + parents for child_node in children: blanket_nodes.extend(self.get_parents(child_node)) blanket_nodes = set(blanket_nodes) blanket_nodes.remove(node) return list(blanket_nodes)
[ "def", "get_markov_blanket", "(", "self", ",", "node", ")", ":", "children", "=", "self", ".", "get_children", "(", "node", ")", "parents", "=", "self", ".", "get_parents", "(", "node", ")", "blanket_nodes", "=", "children", "+", "parents", "for", "child_n...
37.9375
17.9375
def add_interceptor(self, *interceptors): """ Adds one or multiple HTTP traffic interceptors to the current mocking engine. Interceptors are typically HTTP client specific wrapper classes that implements the pook interceptor interface. Arguments: interceptors (pook.interceptors.BaseInterceptor) """ for interceptor in interceptors: self.interceptors.append(interceptor(self.engine))
[ "def", "add_interceptor", "(", "self", ",", "*", "interceptors", ")", ":", "for", "interceptor", "in", "interceptors", ":", "self", ".", "interceptors", ".", "append", "(", "interceptor", "(", "self", ".", "engine", ")", ")" ]
35.461538
18.230769
def _convert_connected_app(self): """Convert Connected App to service""" if self.services and "connected_app" in self.services: # already a service return connected_app = self.get_connected_app() if not connected_app: # not configured return self.logger.warning( "Reading Connected App info from deprecated config." " Connected App should be changed to a service." " If using environment keychain, update the environment variable." " Otherwise, it has been handled automatically and you should not" " see this message again." ) ca_config = ServiceConfig( { "callback_url": connected_app.callback_url, "client_id": connected_app.client_id, "client_secret": connected_app.client_secret, } ) self.set_service("connected_app", ca_config)
[ "def", "_convert_connected_app", "(", "self", ")", ":", "if", "self", ".", "services", "and", "\"connected_app\"", "in", "self", ".", "services", ":", "# already a service", "return", "connected_app", "=", "self", ".", "get_connected_app", "(", ")", "if", "not",...
40.083333
18.083333
def phonetic_i_umlaut(sound: Vowel) -> Vowel: """ >>> umlaut_a = OldNorsePhonology.phonetic_i_umlaut(a) >>> umlaut_a.ipar 'ɛ' >>> umlaut_au = OldNorsePhonology.phonetic_i_umlaut(DIPHTHONGS_IPA_class["au"]) >>> umlaut_au.ipar 'ɐy' :param sound: :return: """ if sound.is_equal(a): return ee elif sound.is_equal(a.lengthen()): return ee.lengthen() elif sound.is_equal(o): return oee elif sound.is_equal(o.lengthen()): return oee.lengthen() elif sound.is_equal(u): return y elif sound.is_equal(u.lengthen()): return y.lengthen() if sound.is_equal(DIPHTHONGS_IPA_class["au"]): return DIPHTHONGS_IPA_class["ey"]
[ "def", "phonetic_i_umlaut", "(", "sound", ":", "Vowel", ")", "->", "Vowel", ":", "if", "sound", ".", "is_equal", "(", "a", ")", ":", "return", "ee", "elif", "sound", ".", "is_equal", "(", "a", ".", "lengthen", "(", ")", ")", ":", "return", "ee", "....
29.666667
15.444444
def content_type(self) -> ContentType: """Override superclass method.""" if self._ctype: return self._ctype return (ContentType.config if self.parent.config else ContentType.nonconfig)
[ "def", "content_type", "(", "self", ")", "->", "ContentType", ":", "if", "self", ".", "_ctype", ":", "return", "self", ".", "_ctype", "return", "(", "ContentType", ".", "config", "if", "self", ".", "parent", ".", "config", "else", "ContentType", ".", "no...
38.5
8.666667
def getitem(self, index, context=None): """Return the inference of a subscript. This is basically looking up the method in the metaclass and calling it. :returns: The inferred value of a subscript to this class. :rtype: NodeNG :raises AstroidTypeError: If this class does not define a ``__getitem__`` method. """ try: methods = dunder_lookup.lookup(self, "__getitem__") except exceptions.AttributeInferenceError as exc: raise exceptions.AstroidTypeError(node=self, context=context) from exc method = methods[0] # Create a new callcontext for providing index as an argument. new_context = contextmod.bind_context_to_node(context, self) new_context.callcontext = contextmod.CallContext(args=[index]) try: return next(method.infer_call_result(self, new_context)) except exceptions.InferenceError: return util.Uninferable
[ "def", "getitem", "(", "self", ",", "index", ",", "context", "=", "None", ")", ":", "try", ":", "methods", "=", "dunder_lookup", ".", "lookup", "(", "self", ",", "\"__getitem__\"", ")", "except", "exceptions", ".", "AttributeInferenceError", "as", "exc", "...
37.307692
24.153846
def get_bool(self, key, default=None): u""" Возвращает значение, приведенное к булеву """ return self.get_converted( key, ConversionTypeEnum.BOOL, default=default)
[ "def", "get_bool", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "return", "self", ".", "get_converted", "(", "key", ",", "ConversionTypeEnum", ".", "BOOL", ",", "default", "=", "default", ")" ]
33.666667
5.833333
def QA_util_to_datetime(time): """ 字符串 '2018-01-01' 转变成 datatime 类型 :param time: 字符串str -- 格式必须是 2018-01-01 ,长度10 :return: 类型datetime.datatime """ if len(str(time)) == 10: _time = '{} 00:00:00'.format(time) elif len(str(time)) == 19: _time = str(time) else: QA_util_log_info('WRONG DATETIME FORMAT {}'.format(time)) return datetime.datetime.strptime(_time, '%Y-%m-%d %H:%M:%S')
[ "def", "QA_util_to_datetime", "(", "time", ")", ":", "if", "len", "(", "str", "(", "time", ")", ")", "==", "10", ":", "_time", "=", "'{} 00:00:00'", ".", "format", "(", "time", ")", "elif", "len", "(", "str", "(", "time", ")", ")", "==", "19", ":...
32.769231
11.538462
def sense_dep(self, target): """Sense for an active DEP Target is not supported. The device only supports passive activation via sense_tta/sense_ttf. """ message = "{device} does not support sense for active DEP Target" raise nfc.clf.UnsupportedTargetError(message.format(device=self))
[ "def", "sense_dep", "(", "self", ",", "target", ")", ":", "message", "=", "\"{device} does not support sense for active DEP Target\"", "raise", "nfc", ".", "clf", ".", "UnsupportedTargetError", "(", "message", ".", "format", "(", "device", "=", "self", ")", ")" ]
45.714286
19.714286
def class_can_run_parallel(test_class: unittest.TestSuite) -> bool: """ Checks if a given class of tests can be run in parallel or not :param test_class: the class to run :return: True if te class can be run in parallel, False otherwise """ for test_case in test_class: return not getattr(test_case, "__no_parallel__", False)
[ "def", "class_can_run_parallel", "(", "test_class", ":", "unittest", ".", "TestSuite", ")", "->", "bool", ":", "for", "test_case", "in", "test_class", ":", "return", "not", "getattr", "(", "test_case", ",", "\"__no_parallel__\"", ",", "False", ")" ]
42
18.222222
def is_christmas_period(): """Is this the christmas period?""" now = datetime.date.today() if now.month != 12: return False if now.day < 15: return False if now.day > 27: return False return True
[ "def", "is_christmas_period", "(", ")", ":", "now", "=", "datetime", ".", "date", ".", "today", "(", ")", "if", "now", ".", "month", "!=", "12", ":", "return", "False", "if", "now", ".", "day", "<", "15", ":", "return", "False", "if", "now", ".", ...
23.4
16.5
def wait(self): """ Wait until all in progress and queued items are processed """ self._wait_called = True while self.tracked_coordinator_count() > 0 or \ self.waiting_coordinator_count() > 0: time.sleep(1) super(AsperaTransferCoordinatorController, self).wait() self._wait_called = False
[ "def", "wait", "(", "self", ")", ":", "self", ".", "_wait_called", "=", "True", "while", "self", ".", "tracked_coordinator_count", "(", ")", ">", "0", "or", "self", ".", "waiting_coordinator_count", "(", ")", ">", "0", ":", "time", ".", "sleep", "(", "...
44.125
13.75
def load_config_from_file(app, filepath): """Helper function to load config from a specified file""" try: app.config.from_pyfile(filepath) return True except IOError: # TODO: Can we print to sys.stderr in production? Should this go to # logs instead? print("Did not find settings file %s for additional settings, skipping it" % filepath, file=sys.stderr) return False
[ "def", "load_config_from_file", "(", "app", ",", "filepath", ")", ":", "try", ":", "app", ".", "config", ".", "from_pyfile", "(", "filepath", ")", "return", "True", "except", "IOError", ":", "# TODO: Can we print to sys.stderr in production? Should this go to", "# log...
41.8
21.8
def isPythonFile(filename): """Return True if filename points to a Python file.""" if filename.endswith('.py'): return True # Avoid obvious Emacs backup files if filename.endswith("~"): return False max_bytes = 128 try: with open(filename, 'rb') as f: text = f.read(max_bytes) if not text: return False except IOError: return False first_line = text.splitlines()[0] return PYTHON_SHEBANG_REGEX.match(first_line)
[ "def", "isPythonFile", "(", "filename", ")", ":", "if", "filename", ".", "endswith", "(", "'.py'", ")", ":", "return", "True", "# Avoid obvious Emacs backup files", "if", "filename", ".", "endswith", "(", "\"~\"", ")", ":", "return", "False", "max_bytes", "=",...
23.952381
17.761905
def _validate_customer_service(self): """ Validate input parameters if customer service is on then create directory for tarball files with correct premissions for user and group. """ direc = self.customer_service_dir if not direc.exists: mode = 0o750 print("Creating customer_service_dir %s with mode %s" % (direc, mode)) direc.makedirs() os.chmod(direc.path, mode) if self.mailto is None: raise RuntimeError("customer_service_dir requires mailto option in scheduler.yml")
[ "def", "_validate_customer_service", "(", "self", ")", ":", "direc", "=", "self", ".", "customer_service_dir", "if", "not", "direc", ".", "exists", ":", "mode", "=", "0o750", "print", "(", "\"Creating customer_service_dir %s with mode %s\"", "%", "(", "direc", ","...
41.142857
18.714286
def start(self): """Start listener in a background thread Returns: address of the Server as a tuple of (host, port) """ server_sock = self.start_listening_socket() # hostname may not be resolvable but IP address probably will be host = self.get_server_ip() port = server_sock.getsockname()[1] addr = (host, port) logging.info("listening for reservations at {0}".format(addr)) def _listen(self, sock): CONNECTIONS = [] CONNECTIONS.append(sock) while not self.done: read_socks, write_socks, err_socks = select.select(CONNECTIONS, [], [], 60) for sock in read_socks: if sock == server_sock: client_sock, client_addr = sock.accept() CONNECTIONS.append(client_sock) logging.debug("client connected from {0}".format(client_addr)) else: try: msg = self.receive(sock) self._handle_message(sock, msg) except Exception as e: logging.debug(e) sock.close() CONNECTIONS.remove(sock) server_sock.close() t = threading.Thread(target=_listen, args=(self, server_sock)) t.daemon = True t.start() return addr
[ "def", "start", "(", "self", ")", ":", "server_sock", "=", "self", ".", "start_listening_socket", "(", ")", "# hostname may not be resolvable but IP address probably will be", "host", "=", "self", ".", "get_server_ip", "(", ")", "port", "=", "server_sock", ".", "get...
29.487805
19.463415
def dcshift(self, shift=0.0): '''Apply a DC shift to the audio. Parameters ---------- shift : float Amount to shift audio between -2 and 2. (Audio is between -1 and 1) See Also -------- highpass ''' if not is_number(shift) or shift < -2 or shift > 2: raise ValueError('shift must be a number between -2 and 2.') effect_args = ['dcshift', '{:f}'.format(shift)] self.effects.extend(effect_args) self.effects_log.append('dcshift') return self
[ "def", "dcshift", "(", "self", ",", "shift", "=", "0.0", ")", ":", "if", "not", "is_number", "(", "shift", ")", "or", "shift", "<", "-", "2", "or", "shift", ">", "2", ":", "raise", "ValueError", "(", "'shift must be a number between -2 and 2.'", ")", "ef...
26.285714
24
def values(self, axis): """ Returns the values of the given axis from all the datasets within this chart. :param axis | <str> :return [<variant>, ..] """ output = [] for dataset in self.datasets(): output += dataset.values(axis) return output
[ "def", "values", "(", "self", ",", "axis", ")", ":", "output", "=", "[", "]", "for", "dataset", "in", "self", ".", "datasets", "(", ")", ":", "output", "+=", "dataset", ".", "values", "(", "axis", ")", "return", "output" ]
25.857143
15.428571
def pick_sdf(filename, directory=None): """Returns a full path to the chosen SDF file. The supplied file is not expected to contain a recognised SDF extension, this is added automatically. If a file with the extension `.sdf.gz` or `.sdf` is found the path to it (excluding the extension) is returned. If this fails, `None` is returned. :param filename: The SDF file basename, whose path is required. :type filename: ``str`` :param directory: An optional directory. If not provided it is calculated automatically. :type directory: ``str`` :return: The full path to the file without extension, or None if it does not exist :rtype: ``str`` """ if directory is None: directory = utils.get_undecorated_calling_module() # If the 'cwd' is not '/output' (which indicates we're in a Container) # then remove the CWD and the anticipated '/' # from the front of the module if os.getcwd() not in ['/output']: directory = directory[len(os.getcwd()) + 1:] file_path = os.path.join(directory, filename) if os.path.isfile(file_path + '.sdf.gz'): return file_path + '.sdf.gz' elif os.path.isfile(file_path + '.sdf'): return file_path + '.sdf' # Couldn't find a suitable SDF file return None
[ "def", "pick_sdf", "(", "filename", ",", "directory", "=", "None", ")", ":", "if", "directory", "is", "None", ":", "directory", "=", "utils", ".", "get_undecorated_calling_module", "(", ")", "# If the 'cwd' is not '/output' (which indicates we're in a Container)", "# th...
42.580645
15.83871
def send_identity(self): """ Send the identity of the service. """ service_name = {'service_name': self.messaging._service_name} service_name = _json.dumps(service_name).encode('utf8') identify_frame = (b'', b'IDENT', _json.dumps([]).encode('utf8'), service_name) # NOTE: Have to do this manually since we built the frame if self.messaging._run_control_loop: # pep8 alias send = self.messaging.command_socket.send_multipart self.messaging.add_callback(send, identify_frame) else: self.messaging.command_socket.send_multipart(identify_frame) self.logger.info(' Service Identity sent: %s', self.messaging._service_name) if self.identity_callback: self.identity_callback()
[ "def", "send_identity", "(", "self", ")", ":", "service_name", "=", "{", "'service_name'", ":", "self", ".", "messaging", ".", "_service_name", "}", "service_name", "=", "_json", ".", "dumps", "(", "service_name", ")", ".", "encode", "(", "'utf8'", ")", "i...
36
17.92
def element_as_json(name): """ Get specified element json data by name :param name: name of element :return: json data representing element, else None """ if name: element = fetch_json_by_name(name) if element.json: return element.json
[ "def", "element_as_json", "(", "name", ")", ":", "if", "name", ":", "element", "=", "fetch_json_by_name", "(", "name", ")", "if", "element", ".", "json", ":", "return", "element", ".", "json" ]
27.5
13.1
def parse_block(lines, header=False): # type: (List[str], bool) -> List[str] """Parse and return a single block, popping off the start of `lines`. If parsing a header block, we stop after we reach a line that is not a comment. Otherwise, we stop after reaching an empty line. :param lines: list of lines :param header: whether we are parsing a header block :return: list of lines that form the single block """ block_lines = [] while lines and lines[0] and (not header or lines[0].startswith('#')): block_lines.append(lines.pop(0)) return block_lines
[ "def", "parse_block", "(", "lines", ",", "header", "=", "False", ")", ":", "# type: (List[str], bool) -> List[str]", "block_lines", "=", "[", "]", "while", "lines", "and", "lines", "[", "0", "]", "and", "(", "not", "header", "or", "lines", "[", "0", "]", ...
42
20.142857
def add_file(self, f): """Add a partition identity as a child of a dataset identity.""" if not self.files: self.files = set() self.files.add(f) self.locations.set(f.type_)
[ "def", "add_file", "(", "self", ",", "f", ")", ":", "if", "not", "self", ".", "files", ":", "self", ".", "files", "=", "set", "(", ")", "self", ".", "files", ".", "add", "(", "f", ")", "self", ".", "locations", ".", "set", "(", "f", ".", "typ...
23.333333
20.222222
def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete)
[ "def", "bulk_refresh", "(", "self", ")", ":", "session", "=", "OAuth2Session", "(", "app_settings", ".", "ESI_SSO_CLIENT_ID", ")", "auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "app_settings", ".", "ESI_SSO_CLIENT_ID", ",", "app_settings", "."...
50
20.190476
def get_implementation(cls, force: bool = False) -> AxesHandler: """ Fetch and initialize configured handler implementation and memoize it to avoid reinitialization. This method is re-entrant and can be called multiple times from e.g. Django application loader. """ if force or not cls.implementation: cls.implementation = import_string(settings.AXES_HANDLER)() return cls.implementation
[ "def", "get_implementation", "(", "cls", ",", "force", ":", "bool", "=", "False", ")", "->", "AxesHandler", ":", "if", "force", "or", "not", "cls", ".", "implementation", ":", "cls", ".", "implementation", "=", "import_string", "(", "settings", ".", "AXES_...
44
27.2
def createNetwork(dataSource): """Create and initialize a network.""" with open(_PARAMS_PATH, "r") as f: modelParams = yaml.safe_load(f)["modelParams"] # Create a network that will hold the regions. network = Network() # Add a sensor region. network.addRegion("sensor", "py.RecordSensor", '{}') # Set the encoder and data source of the sensor region. sensorRegion = network.regions["sensor"].getSelf() sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"]) sensorRegion.dataSource = dataSource # Make sure the SP input width matches the sensor region output width. modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth() # Add SP and TM regions. network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"])) network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"])) # Add a classifier region. clName = "py.%s" % modelParams["clParams"].pop("regionName") network.addRegion("classifier", clName, json.dumps(modelParams["clParams"])) # Add all links createSensorToClassifierLinks(network, "sensor", "classifier") createDataOutLink(network, "sensor", "SP") createFeedForwardLink(network, "SP", "TM") createFeedForwardLink(network, "TM", "classifier") # Reset links are optional, since the sensor region does not send resets. createResetLink(network, "sensor", "SP") createResetLink(network, "sensor", "TM") # Make sure all objects are initialized. network.initialize() return network
[ "def", "createNetwork", "(", "dataSource", ")", ":", "with", "open", "(", "_PARAMS_PATH", ",", "\"r\"", ")", "as", "f", ":", "modelParams", "=", "yaml", ".", "safe_load", "(", "f", ")", "[", "\"modelParams\"", "]", "# Create a network that will hold the regions....
37.05
22.15
def adapt_logger(logger): """ Adapt our custom logger.BaseLogger object into a standard logging.Logger object. Adaptations are: - NoOpLogger turns into a logger with a single NullHandler. - SimpleLogger turns into a logger with a StreamHandler and level. Args: logger: Possibly a logger.BaseLogger, or a standard python logging.Logger. Returns: a standard python logging.Logger. """ if isinstance(logger, logging.Logger): return logger # Use the standard python logger created by these classes. if isinstance(logger, (SimpleLogger, NoOpLogger)): return logger.logger # Otherwise, return whatever we were given because we can't adapt. return logger
[ "def", "adapt_logger", "(", "logger", ")", ":", "if", "isinstance", "(", "logger", ",", "logging", ".", "Logger", ")", ":", "return", "logger", "# Use the standard python logger created by these classes.", "if", "isinstance", "(", "logger", ",", "(", "SimpleLogger",...
29.26087
24.826087
def mmGetPlotStability(self, title="Stability", showReset=False, resetShading=0.25): """ Returns plot of the overlap metric between union SDRs within a sequence. @param title an optional title for the figure @return (Plot) plot """ plot = Plot(self, title) self._mmComputeSequenceRepresentationData() data = self._mmData["stabilityConfusion"] plot.addGraph(sorted(data, reverse=True), position=211, xlabel="Time steps", ylabel="Overlap") plot.addHistogram(data, position=212, bins=100, xlabel="Overlap", ylabel="# time steps") return plot
[ "def", "mmGetPlotStability", "(", "self", ",", "title", "=", "\"Stability\"", ",", "showReset", "=", "False", ",", "resetShading", "=", "0.25", ")", ":", "plot", "=", "Plot", "(", "self", ",", "title", ")", "self", ".", "_mmComputeSequenceRepresentationData", ...
38.444444
12.111111
def AIC(N, rho, k): r"""Akaike Information Criterion :param rho: rho at order k :param N: sample size :param k: AR order. If k is the AR order and N the size of the sample, then Akaike criterion is .. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N} :: AIC(64, [0.5,0.3,0.2], [1,2,3]) :validation: double checked versus octave. """ from numpy import log, array #k+1 #todo check convention. agrees with octave res = N * log(array(rho)) + 2.* (array(k)+1) return res
[ "def", "AIC", "(", "N", ",", "rho", ",", "k", ")", ":", "from", "numpy", "import", "log", ",", "array", "#k+1 #todo check convention. agrees with octave", "res", "=", "N", "*", "log", "(", "array", "(", "rho", ")", ")", "+", "2.", "*", "(", "array", ...
23.045455
22.181818
def get_json(self, force=False, silent=False, cache=True): """Parse :attr:`data` as JSON. If the mimetype does not indicate JSON (:mimetype:`application/json`, see :meth:`is_json`), this returns ``None``. If parsing fails, :meth:`on_json_loading_failed` is called and its return value is used as the return value. :param force: Ignore the mimetype and always try to parse JSON. :param silent: Silence parsing errors and return ``None`` instead. :param cache: Store the parsed JSON to return for subsequent calls. """ if cache and self._cached_json[silent] is not Ellipsis: return self._cached_json[silent] if not (force or self.is_json): return None data = self._get_data_for_json(cache=cache) try: rv = self.json_module.loads(data) except ValueError as e: if silent: rv = None if cache: normal_rv, _ = self._cached_json self._cached_json = (normal_rv, rv) else: rv = self.on_json_loading_failed(e) if cache: _, silent_rv = self._cached_json self._cached_json = (rv, silent_rv) else: if cache: self._cached_json = (rv, rv) return rv
[ "def", "get_json", "(", "self", ",", "force", "=", "False", ",", "silent", "=", "False", ",", "cache", "=", "True", ")", ":", "if", "cache", "and", "self", ".", "_cached_json", "[", "silent", "]", "is", "not", "Ellipsis", ":", "return", "self", ".", ...
31.590909
20.795455
def _cfg(key, default=None): ''' Return the requested value from the aws_kms key in salt configuration. If it's not set, return the default. ''' root_cfg = __salt__.get('config.get', __opts__.get) kms_cfg = root_cfg('aws_kms', {}) return kms_cfg.get(key, default)
[ "def", "_cfg", "(", "key", ",", "default", "=", "None", ")", ":", "root_cfg", "=", "__salt__", ".", "get", "(", "'config.get'", ",", "__opts__", ".", "get", ")", "kms_cfg", "=", "root_cfg", "(", "'aws_kms'", ",", "{", "}", ")", "return", "kms_cfg", "...
31.555556
19.333333
def cli_default_perms(self, *args): """Show default permissions for all schemata""" for key, item in schemastore.items(): # self.log(item, pretty=True) if item['schema'].get('no_perms', False): self.log('Schema without permissions:', key) continue try: perms = item['schema']['properties']['perms']['properties'] if perms == {}: self.log('Schema:', item, pretty=True) self.log( 'Schema:', key, 'read', perms['read']['default'], 'write', perms['write']['default'], 'list', perms['list']['default'], 'create', item['schema']['roles_create'] ) except KeyError as e: self.log('Fishy schema found:', key, e, lvl=error) self.log(item, pretty=True)
[ "def", "cli_default_perms", "(", "self", ",", "*", "args", ")", ":", "for", "key", ",", "item", "in", "schemastore", ".", "items", "(", ")", ":", "# self.log(item, pretty=True)", "if", "item", "[", "'schema'", "]", ".", "get", "(", "'no_perms'", ",", "Fa...
40.565217
15.913043
def camelise(text, capital_first=True): """Convert lower_underscore to CamelCase.""" def camelcase(): if not capital_first: yield str.lower while True: yield str.capitalize if istype(text, 'unicode'): text = text.encode('utf8') c = camelcase() return "".join(next(c)(x) if x else '_' for x in text.split("_"))
[ "def", "camelise", "(", "text", ",", "capital_first", "=", "True", ")", ":", "def", "camelcase", "(", ")", ":", "if", "not", "capital_first", ":", "yield", "str", ".", "lower", "while", "True", ":", "yield", "str", ".", "capitalize", "if", "istype", "(...
28.230769
16.846154
def acquisition_function(self,x): """ Takes an acquisition and weights it so the domain and cost are taken into account. """ f_acqu = self._compute_acq(x) cost_x, _ = self.cost_withGradients(x) return -(f_acqu*self.space.indicator_constraints(x))/cost_x
[ "def", "acquisition_function", "(", "self", ",", "x", ")", ":", "f_acqu", "=", "self", ".", "_compute_acq", "(", "x", ")", "cost_x", ",", "_", "=", "self", ".", "cost_withGradients", "(", "x", ")", "return", "-", "(", "f_acqu", "*", "self", ".", "spa...
42.142857
13.285714
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): """Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params """ init_params = super(RLEstimator, cls)\ ._prepare_init_params_from_job_description(job_details, model_channel_name) image_name = init_params.pop('image') framework, _, tag, _ = fw_utils.framework_name_from_image(image_name) if not framework: # If we were unable to parse the framework name from the image it is not one of our # officially supported images, in this case just add the image to the init params. init_params['image_name'] = image_name return init_params toolkit, toolkit_version = cls._toolkit_and_version_from_tag(tag) if not cls._is_combination_supported(toolkit, toolkit_version, framework): training_job_name = init_params['base_job_name'] raise ValueError( "Training job: {} didn't use image for requested framework".format( training_job_name) ) init_params['toolkit'] = RLToolkit(toolkit) init_params['toolkit_version'] = toolkit_version init_params['framework'] = RLFramework(framework) return init_params
[ "def", "_prepare_init_params_from_job_description", "(", "cls", ",", "job_details", ",", "model_channel_name", "=", "None", ")", ":", "init_params", "=", "super", "(", "RLEstimator", ",", "cls", ")", ".", "_prepare_init_params_from_job_description", "(", "job_details", ...
43.378378
27.621622
def references(self): """ list: External links, or references, listed anywhere on the \ MediaWiki page Note: Not settable Note May include external links within page that are not \ technically cited anywhere """ if self._references is None: self._references = list() self.__pull_combined_properties() return self._references
[ "def", "references", "(", "self", ")", ":", "if", "self", ".", "_references", "is", "None", ":", "self", ".", "_references", "=", "list", "(", ")", "self", ".", "__pull_combined_properties", "(", ")", "return", "self", ".", "_references" ]
37.583333
11.333333
def trim_data(xmin, xmax, xdata, *args): """ Removes all the data except that in which xdata is between xmin and xmax. This does not mutilate the input arrays, and additional arrays can be supplied via args (provided they match xdata in shape) xmin and xmax can be None """ # make sure it's a numpy array if not isinstance(xdata, _n.ndarray): xdata = _n.array(xdata) # make sure xmin and xmax are numbers if xmin is None: xmin = min(xdata) if xmax is None: xmax = max(xdata) # get all the indices satisfying the trim condition ns = _n.argwhere((xdata >= xmin) & (xdata <= xmax)).transpose()[0] # trim the xdata output = [] output.append(xdata[ns]) # trim the rest for a in args: # make sure it's a numpy array if not isinstance(a, _n.ndarray): a = _n.array(a) output.append(a[ns]) return output
[ "def", "trim_data", "(", "xmin", ",", "xmax", ",", "xdata", ",", "*", "args", ")", ":", "# make sure it's a numpy array", "if", "not", "isinstance", "(", "xdata", ",", "_n", ".", "ndarray", ")", ":", "xdata", "=", "_n", ".", "array", "(", "xdata", ")",...
29.1
20.5
def read_binary_matrix(filename): """Reads and returns binary formatted matrix stored in filename. The file format is described on the data set page: https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/ Args: filename: String with path to the file. Returns: Numpy array contained in the file. """ with tf.io.gfile.GFile(filename, "rb") as f: s = f.read() # Data is stored in little-endian byte order. int32_dtype = np.dtype("int32").newbyteorder("<") # The first 4 bytes contain a magic code that specifies the data type. magic = int(np.frombuffer(s, dtype=int32_dtype, count=1)) if magic == 507333717: data_dtype = np.dtype("uint8") # uint8 does not have a byte order. elif magic == 507333716: data_dtype = np.dtype("int32").newbyteorder("<") else: raise ValueError("Invalid magic value for data type!") # The second 4 bytes contain an int32 with the number of dimensions of the # stored array. ndim = int(np.frombuffer(s, dtype=int32_dtype, count=1, offset=4)) # The next ndim x 4 bytes contain the shape of the array in int32. dims = np.frombuffer(s, dtype=int32_dtype, count=ndim, offset=8) # If the array has less than three dimensions, three int32 are still used to # save the shape info (remaining int32 are simply set to 1). The shape info # hence uses max(3, ndim) bytes. bytes_used_for_shape_info = max(3, ndim) * 4 # The remaining bytes are the array. data = np.frombuffer( s, dtype=data_dtype, offset=8 + bytes_used_for_shape_info) return data.reshape(tuple(dims))
[ "def", "read_binary_matrix", "(", "filename", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "s", "=", "f", ".", "read", "(", ")", "# Data is stored in little-endian byte order.", "int32_...
36.418605
22.627907
def _step5(self, word): """step5() removes a final -e if m() > 1, and changes -ll to -l if m() > 1. """ if word[-1] == 'e': a = self._m(word, len(word)-1) if a > 1 or (a == 1 and not self._cvc(word, len(word)-2)): word = word[:-1] if word.endswith('ll') and self._m(word, len(word)-1) > 1: word = word[:-1] return word
[ "def", "_step5", "(", "self", ",", "word", ")", ":", "if", "word", "[", "-", "1", "]", "==", "'e'", ":", "a", "=", "self", ".", "_m", "(", "word", ",", "len", "(", "word", ")", "-", "1", ")", "if", "a", ">", "1", "or", "(", "a", "==", "...
34
16.083333
def get_reqv(self): """ :returns: an instance of class:`RjbEquivalent` if reqv_hdf5 is set """ if 'reqv' not in self.inputs: return return {key: valid.RjbEquivalent(value) for key, value in self.inputs['reqv'].items()}
[ "def", "get_reqv", "(", "self", ")", ":", "if", "'reqv'", "not", "in", "self", ".", "inputs", ":", "return", "return", "{", "key", ":", "valid", ".", "RjbEquivalent", "(", "value", ")", "for", "key", ",", "value", "in", "self", ".", "inputs", "[", ...
34.875
13.625
def _set_show_mpls_te_path(self, v, load=False): """ Setter method for show_mpls_te_path, mapped from YANG variable /brocade_mpls_rpc/show_mpls_te_path (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_te_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_te_path() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_mpls_te_path.show_mpls_te_path, is_leaf=True, yang_name="show-mpls-te-path", rest_name="show-mpls-te-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsTePath'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_mpls_te_path must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_mpls_te_path.show_mpls_te_path, is_leaf=True, yang_name="show-mpls-te-path", rest_name="show-mpls-te-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsTePath'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__show_mpls_te_path = t if hasattr(self, '_set'): self._set()
[ "def", "_set_show_mpls_te_path", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",",...
75
35.227273
def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction)
[ "def", "handle_pi", "(", "self", ",", "text", ")", ":", "if", "text", "[", ":", "3", "]", "==", "\"xml\"", ":", "text", "=", "u\"xml version='1.0' encoding='%SOUP-ENCODING%'\"", "self", ".", "_toStringSubclass", "(", "text", ",", "ProcessingInstruction", ")" ]
51.571429
14.285714
def open_url(url, headers=None): """ Opens a URL. If headers are passed as argument, no check is performed and the URL will be opened. @param url: the URL to open @type url: string @param headers: the headers to use @type headers: dictionary @return: a file-like object as returned by urllib2.urlopen. """ request = urllib2.Request(url) if headers: for key, value in headers.items(): request.add_header(key, value) return URL_OPENER.open(request)
[ "def", "open_url", "(", "url", ",", "headers", "=", "None", ")", ":", "request", "=", "urllib2", ".", "Request", "(", "url", ")", "if", "headers", ":", "for", "key", ",", "value", "in", "headers", ".", "items", "(", ")", ":", "request", ".", "add_h...
27.777778
15.666667
def clip(self, min=None, max=None): """ Clip values above and below. Parameters ---------- min : scalar or array-like Minimum value. If array, will be broadcasted max : scalar or array-like Maximum value. If array, will be broadcasted. """ return self._constructor( self.values.clip(min=min, max=max)).__finalize__(self)
[ "def", "clip", "(", "self", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "clip", "(", "min", "=", "min", ",", "max", "=", "max", ")", ")", ".", "__finalize__",...
29.214286
15.071429
def unpackSample(self, rawData): """ unpacks a single sample of data (where sample length is based on the currently enabled sensors). :param rawData: the data to convert :return: a converted data set. """ length = len(rawData) # TODO error if not multiple of 2 # logger.debug(">> unpacking sample %d length %d", self._sampleIdx, length) unpacked = struct.unpack(">" + ('h' * (length // 2)), memoryview(bytearray(rawData)).tobytes()) # store the data in a dictionary mpu6050 = collections.OrderedDict() mpu6050[SAMPLE_TIME] = self._sampleIdx / self.fs sensorIdx = 0 if self.isAccelerometerEnabled(): mpu6050[ACCEL_X] = unpacked[sensorIdx] * self._accelerationFactor sensorIdx += 1 mpu6050[ACCEL_Y] = unpacked[sensorIdx] * self._accelerationFactor sensorIdx += 1 mpu6050[ACCEL_Z] = unpacked[sensorIdx] * self._accelerationFactor sensorIdx += 1 if self.isTemperatureEnabled(): mpu6050[TEMP] = unpacked[sensorIdx] * self._temperatureGain + self._temperatureOffset sensorIdx += 1 if self.isGyroEnabled(): mpu6050[GYRO_X] = unpacked[sensorIdx] * self._gyroFactor sensorIdx += 1 mpu6050[GYRO_Y] = unpacked[sensorIdx] * self._gyroFactor sensorIdx += 1 mpu6050[GYRO_Z] = unpacked[sensorIdx] * self._gyroFactor sensorIdx += 1 # TODO should we send as a dict so the keys are available? output = list(mpu6050.values()) self._sampleIdx += 1 # logger.debug("<< unpacked sample length %d into vals size %d", length, len(output)) return output
[ "def", "unpackSample", "(", "self", ",", "rawData", ")", ":", "length", "=", "len", "(", "rawData", ")", "# TODO error if not multiple of 2", "# logger.debug(\">> unpacking sample %d length %d\", self._sampleIdx, length)", "unpacked", "=", "struct", ".", "unpack", "(", "\...
45.473684
20.631579
def _poly(self, clade, merge_compressed): """ Function to resolve polytomies for a given parent node. If the number of the direct decendants is less than three (not a polytomy), does nothing. Otherwise, for each pair of nodes, assess the possible LH increase which could be gained by merging the two nodes. The increase in the LH is basically the tradeoff between the gain of the LH due to the changing the branch lenghts towards the optimal values and the decrease due to the introduction of the new branch with zero optimal length. """ from .branch_len_interpolator import BranchLenInterpolator zero_branch_slope = self.gtr.mu*self.seq_len def _c_gain(t, n1, n2, parent): """ cost gain if nodes n1, n2 are joined and their parent is placed at time t cost gain = (LH loss now) - (LH loss when placed at time t) """ cg2 = n2.branch_length_interpolator(parent.time_before_present - n2.time_before_present) - n2.branch_length_interpolator(t - n2.time_before_present) cg1 = n1.branch_length_interpolator(parent.time_before_present - n1.time_before_present) - n1.branch_length_interpolator(t - n1.time_before_present) cg_new = - zero_branch_slope * (parent.time_before_present - t) # loss in LH due to the new branch return -(cg2+cg1+cg_new) def cost_gain(n1, n2, parent): """ cost gained if the two nodes would have been connected. """ try: cg = sciopt.minimize_scalar(_c_gain, bounds=[max(n1.time_before_present,n2.time_before_present), parent.time_before_present], method='Bounded',args=(n1,n2, parent)) return cg['x'], - cg['fun'] except: self.logger("TreeTime._poly.cost_gain: optimization of gain failed", 3, warn=True) return parent.time_before_present, 0.0 def merge_nodes(source_arr, isall=False): mergers = np.array([[cost_gain(n1,n2, clade) if i1<i2 else (0.0,-1.0) for i1,n1 in enumerate(source_arr)] for i2, n2 in enumerate(source_arr)]) LH = 0 while len(source_arr) > 1 + int(isall): # max possible gains of the cost when connecting the nodes: # this is only a rough approximation because it assumes the new node positions # to be optimal new_positions = mergers[:,:,0] cost_gains = mergers[:,:,1] # set zero to large negative value and find optimal pair np.fill_diagonal(cost_gains, -1e11) idxs = np.unravel_index(cost_gains.argmax(),cost_gains.shape) if (idxs[0] == idxs[1]) or cost_gains.max()<0: self.logger("TreeTime._poly.merge_nodes: node is not fully resolved "+clade.name,4) return LH n1, n2 = source_arr[idxs[0]], source_arr[idxs[1]] LH += cost_gains[idxs] new_node = Phylo.BaseTree.Clade() # fix positions and branch lengths new_node.time_before_present = new_positions[idxs] new_node.branch_length = clade.time_before_present - new_node.time_before_present new_node.clades = [n1,n2] n1.branch_length = new_node.time_before_present - n1.time_before_present n2.branch_length = new_node.time_before_present - n2.time_before_present # set parameters for the new node new_node.up = clade n1.up = new_node n2.up = new_node if hasattr(clade, "cseq"): new_node.cseq = clade.cseq self._store_compressed_sequence_to_node(new_node) new_node.mutations = [] new_node.mutation_length = 0.0 new_node.branch_length_interpolator = BranchLenInterpolator(new_node, self.gtr, one_mutation=self.one_mutation, branch_length_mode = self.branch_length_mode) clade.clades.remove(n1) clade.clades.remove(n2) clade.clades.append(new_node) self.logger('TreeTime._poly.merge_nodes: creating new node as child of '+clade.name,3) self.logger("TreeTime._poly.merge_nodes: Delta-LH = " + str(cost_gains[idxs].round(3)), 3) # and modify source_arr array for the next loop if len(source_arr)>2: # if more than 3 nodes in polytomy, replace row/column for ii in np.sort(idxs)[::-1]: tmp_ind = np.arange(mergers.shape[0])!=ii mergers = mergers[tmp_ind].swapaxes(0,1) mergers = mergers[tmp_ind].swapaxes(0,1) source_arr.remove(n1) source_arr.remove(n2) new_gains = np.array([[cost_gain(n1,new_node, clade) for n1 in source_arr]]) mergers = np.vstack((mergers, new_gains)).swapaxes(0,1) source_arr.append(new_node) new_gains = np.array([[cost_gain(n1,new_node, clade) for n1 in source_arr]]) mergers = np.vstack((mergers, new_gains)).swapaxes(0,1) else: # otherwise just recalculate matrix source_arr.remove(n1) source_arr.remove(n2) source_arr.append(new_node) mergers = np.array([[cost_gain(n1,n2, clade) for n1 in source_arr] for n2 in source_arr]) return LH stretched = [c for c in clade.clades if c.mutation_length < c.clock_length] compressed = [c for c in clade.clades if c not in stretched] if len(stretched)==1 and merge_compressed is False: return 0.0 LH = merge_nodes(stretched, isall=len(stretched)==len(clade.clades)) if merge_compressed and len(compressed)>1: LH += merge_nodes(compressed, isall=len(compressed)==len(clade.clades)) return LH
[ "def", "_poly", "(", "self", ",", "clade", ",", "merge_compressed", ")", ":", "from", ".", "branch_len_interpolator", "import", "BranchLenInterpolator", "zero_branch_slope", "=", "self", ".", "gtr", ".", "mu", "*", "self", ".", "seq_len", "def", "_c_gain", "("...
50.788618
28.00813
def get_max_instances_of_usb_controller_type(self, chipset, type_p): """Returns the maximum number of USB controller instances which can be configured for each VM. This corresponds to the number of USB controllers one can have. Value may depend on chipset type used. in chipset of type :class:`ChipsetType` The chipset type to get the value for. in type_p of type :class:`USBControllerType` The USB controller type to get the value for. return max_instances of type int The maximum number of instances for the given USB controller type. """ if not isinstance(chipset, ChipsetType): raise TypeError("chipset can only be an instance of type ChipsetType") if not isinstance(type_p, USBControllerType): raise TypeError("type_p can only be an instance of type USBControllerType") max_instances = self._call("getMaxInstancesOfUSBControllerType", in_p=[chipset, type_p]) return max_instances
[ "def", "get_max_instances_of_usb_controller_type", "(", "self", ",", "chipset", ",", "type_p", ")", ":", "if", "not", "isinstance", "(", "chipset", ",", "ChipsetType", ")", ":", "raise", "TypeError", "(", "\"chipset can only be an instance of type ChipsetType\"", ")", ...
45.347826
22.565217
def setChatPhoto(self, chat_id, photo): """ See: https://core.telegram.org/bots/api#setchatphoto """ p = _strip(locals(), more=['photo']) return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo)
[ "def", "setChatPhoto", "(", "self", ",", "chat_id", ",", "photo", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ",", "more", "=", "[", "'photo'", "]", ")", "return", "self", ".", "_api_request_with_file", "(", "'setChatPhoto'", ",", "_rectify", ...
59.5
13
def create_index(self, fields, no_term_offsets=False, no_field_flags=False, stopwords = None): """ Create the search index. The index must not already exist. ### Parameters: - **fields**: a list of TextField or NumericField objects - **no_term_offsets**: If true, we will not save term offsets in the index - **no_field_flags**: If true, we will not save field flags that allow searching in specific fields - **stopwords**: If not None, we create the index with this custom stopword list. The list can be empty """ args = [self.CREATE_CMD, self.index_name] if no_term_offsets: args.append(self.NOOFFSETS) if no_field_flags: args.append(self.NOFIELDS) if stopwords is not None and isinstance(stopwords, (list, tuple, set)): args += [self.STOPWORDS, len(stopwords)] if len(stopwords) > 0: args += list(stopwords) args.append('SCHEMA') args += list(itertools.chain(*(f.redis_args() for f in fields))) return self.redis.execute_command(*args)
[ "def", "create_index", "(", "self", ",", "fields", ",", "no_term_offsets", "=", "False", ",", "no_field_flags", "=", "False", ",", "stopwords", "=", "None", ")", ":", "args", "=", "[", "self", ".", "CREATE_CMD", ",", "self", ".", "index_name", "]", "if",...
40.214286
23.785714
def normalize_choices(db_values, field_name, app=DEFAULT_APP, model_name='', human_readable=True, none_value='Null', blank_value='Unknown', missing_value='Unknown DB Code'): '''Output the human-readable strings associated with the list of database values for a model field. Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`. In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`. ''' if app and isinstance(app, basestring): app = get_app(app) if not db_values: return try: db_values = dict(db_values) except: raise NotImplemented("This function can only handle objects that can be converted to a dict, not lists or querysets returned by django `.values().aggregate()`.") if not field_name in db_values: return db_values if human_readable: for i, db_value in enumerate(db_values[field_name]): if db_value in (None, 'None') or app in (None, 'None'): db_values[field_name][i] = none_value continue if isinstance(db_value, basestring): normalized_code = str(db_value).strip().upper() # the app is actually the models.py module, NOT the app_name package # so don't look in app.models, you'll only find django.db.models there (app_name.models.models) choices = getattr(app, 'CHOICES_%s' % field_name.upper(), []) normalized_name = None if choices: normalized_name = str(choices.get(normalized_code, missing_value)).strip() elif normalized_code: normalized_name = 'DB Code: "%s"' % normalized_code db_values[field_name][i] = normalized_name or blank_value else: raise NotImplemented("This function can only convert database choices to human-readable strings.") return db_values
[ "def", "normalize_choices", "(", "db_values", ",", "field_name", ",", "app", "=", "DEFAULT_APP", ",", "model_name", "=", "''", ",", "human_readable", "=", "True", ",", "none_value", "=", "'Null'", ",", "blank_value", "=", "'Unknown'", ",", "missing_value", "="...
52.162162
32.378378
def get_default_config(self): """ Returns the default collector settings """ config = super(SidekiqWebCollector, self).get_default_config() config.update({ 'host': 'localhost', 'port': 9999, 'byte_unit': ['byte'], }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "SidekiqWebCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'host'", ":", "'localhost'", ",", "'port'", ":", "9999", ",",...
28.363636
12.909091
def get(app, name): '''Get a backend given its name''' backend = get_all(app).get(name) if not backend: msg = 'Harvest backend "{0}" is not registered'.format(name) raise EntrypointError(msg) return backend
[ "def", "get", "(", "app", ",", "name", ")", ":", "backend", "=", "get_all", "(", "app", ")", ".", "get", "(", "name", ")", "if", "not", "backend", ":", "msg", "=", "'Harvest backend \"{0}\" is not registered'", ".", "format", "(", "name", ")", "raise", ...
33.142857
14.857143
def defaults(self): """ Return a nested dicionary of all registered factory defaults. """ return { key: get_defaults(value) for key, value in self.all.items() }
[ "def", "defaults", "(", "self", ")", ":", "return", "{", "key", ":", "get_defaults", "(", "value", ")", "for", "key", ",", "value", "in", "self", ".", "all", ".", "items", "(", ")", "}" ]
24.111111
17.222222
def seconds_remaining(self, ttl): """Return number of seconds left before Imgur API needs to be queried for this instance. :param int ttl: Number of seconds before this is considered out of date. :return: Seconds left before this is expired. 0 indicated update needed (no negatives). :rtype: int """ return max(0, ttl - (int(time.time()) - self.mod_time))
[ "def", "seconds_remaining", "(", "self", ",", "ttl", ")", ":", "return", "max", "(", "0", ",", "ttl", "-", "(", "int", "(", "time", ".", "time", "(", ")", ")", "-", "self", ".", "mod_time", ")", ")" ]
44.111111
25.111111
def unhold(name=None, pkgs=None, **kwargs): ''' Remove specified package lock. root operate on a different root directory. CLI Example: .. code-block:: bash salt '*' pkg.remove_lock <package name> salt '*' pkg.remove_lock <package1>,<package2>,<package3> salt '*' pkg.remove_lock pkgs='["foo", "bar"]' ''' ret = {} root = kwargs.get('root') if (not name and not pkgs) or (name and pkgs): raise CommandExecutionError('Name or packages must be specified.') elif name: pkgs = [name] locks = list_locks(root) try: pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys()) except MinionError as exc: raise CommandExecutionError(exc) removed = [] missing = [] for pkg in pkgs: if locks.get(pkg): removed.append(pkg) ret[pkg]['comment'] = 'Package {0} is no longer held.'.format(pkg) else: missing.append(pkg) ret[pkg]['comment'] = 'Package {0} unable to be unheld.'.format(pkg) if removed: __zypper__(root=root).call('rl', *removed) return ret
[ "def", "unhold", "(", "name", "=", "None", ",", "pkgs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "root", "=", "kwargs", ".", "get", "(", "'root'", ")", "if", "(", "not", "name", "and", "not", "pkgs", ")", "or", "(...
26.714286
23.857143
async def _create_rev_reg(self, rr_id: str, rr_size: int = None) -> None: """ Create revocation registry and new tails file (and association to corresponding revocation registry definition via symbolic link) for input revocation registry identifier. :param rr_id: revocation registry identifier :param rr_size: revocation registry size (defaults to 256) """ LOGGER.debug('Issuer._create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size) rr_size = rr_size or 256 (cd_id, tag) = rev_reg_id2cred_def_id__tag(rr_id) LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id) tails_writer_handle = await blob_storage.open_writer( 'default', json.dumps({ 'base_dir': Tails.dir(self._dir_tails, rr_id), 'uri_pattern': '' })) apriori = Tails.unlinked(self._dir_tails) (rr_id, rrd_json, rre_json) = await anoncreds.issuer_create_and_store_revoc_reg( self.wallet.handle, self.did, 'CL_ACCUM', tag, cd_id, json.dumps({ 'max_cred_num': rr_size, 'issuance_type': 'ISSUANCE_ON_DEMAND' }), tails_writer_handle) delta = Tails.unlinked(self._dir_tails) - apriori if len(delta) != 1: LOGGER.debug( 'Issuer._create_rev_reg: <!< Could not create tails file for rev reg id: %s', rr_id) raise CorruptTails('Could not create tails file for rev reg id {}'.format(rr_id)) tails_hash = basename(delta.pop()) Tails.associate(self._dir_tails, rr_id, tails_hash) with REVO_CACHE.lock: rrd_req_json = await ledger.build_revoc_reg_def_request(self.did, rrd_json) await self._sign_submit(rrd_req_json) await self._get_rev_reg_def(rr_id) # add to cache en passant rre_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rre_json) await self._sign_submit(rre_req_json) LOGGER.debug('Issuer._create_rev_reg <<<')
[ "async", "def", "_create_rev_reg", "(", "self", ",", "rr_id", ":", "str", ",", "rr_size", ":", "int", "=", "None", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'Issuer._create_rev_reg >>> rr_id: %s, rr_size: %s'", ",", "rr_id", ",", "rr_size", ")", ...
42.156863
24.235294
def _cas_1(self): '''1 - The desired structure is entirely contained into one image.''' lonc = self._format_lon(self.lonm) latc = self._format_lat(self.latm) img = self._format_name_map(lonc, latc) img_map = BinaryTable(img, self.path_pdsfiles) return img_map.extract_grid(self.lonm, self.lonM, self.latm, self.latM)
[ "def", "_cas_1", "(", "self", ")", ":", "lonc", "=", "self", ".", "_format_lon", "(", "self", ".", "lonm", ")", "latc", "=", "self", ".", "_format_lat", "(", "self", ".", "latm", ")", "img", "=", "self", ".", "_format_name_map", "(", "lonc", ",", "...
39.777778
22.666667
def iter_insert_items(tree): """ Iterate over the items to insert from an INSERT statement """ if tree.list_values: keys = tree.attrs for values in tree.list_values: if len(keys) != len(values): raise SyntaxError( "Values '%s' do not match attributes " "'%s'" % (values, keys) ) yield dict(zip(keys, map(resolve, values))) elif tree.map_values: for item in tree.map_values: data = {} for (key, val) in item: data[key] = resolve(val) yield data else: raise SyntaxError("No insert data found")
[ "def", "iter_insert_items", "(", "tree", ")", ":", "if", "tree", ".", "list_values", ":", "keys", "=", "tree", ".", "attrs", "for", "values", "in", "tree", ".", "list_values", ":", "if", "len", "(", "keys", ")", "!=", "len", "(", "values", ")", ":", ...
36.111111
12.833333
def upload_template_and_reload(name): """ Uploads a template only if it has changed, and if so, reload the related service. """ template = get_templates()[name] local_path = template["local_path"] if not os.path.exists(local_path): project_root = os.path.dirname(os.path.abspath(__file__)) local_path = os.path.join(project_root, local_path) remote_path = template["remote_path"] reload_command = template.get("reload_command") owner = template.get("owner") mode = template.get("mode") remote_data = "" if exists(remote_path): with hide("stdout"): remote_data = sudo("cat %s" % remote_path, show=False) with open(local_path, "r") as f: local_data = f.read() # Escape all non-string-formatting-placeholder occurrences of '%': local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data) if "%(db_pass)s" in local_data: env.db_pass = db_pass() local_data %= env clean = lambda s: s.replace("\n", "").replace("\r", "").strip() if clean(remote_data) == clean(local_data): return upload_template(local_path, remote_path, env, use_sudo=True, backup=False) if owner: sudo("chown %s %s" % (owner, remote_path)) if mode: sudo("chmod %s %s" % (mode, remote_path)) if reload_command: sudo(reload_command)
[ "def", "upload_template_and_reload", "(", "name", ")", ":", "template", "=", "get_templates", "(", ")", "[", "name", "]", "local_path", "=", "template", "[", "\"local_path\"", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "local_path", ")", ":",...
38.685714
14.171429
def set_slats_level(self, slatsLevel=0.0, shutterLevel=None): """ sets the slats and shutter level Args: slatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed, shutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value Returns: the result of the _restCall """ if shutterLevel is None: shutterLevel = self.shutterLevel data = { "channelIndex": 1, "deviceId": self.id, "slatsLevel": slatsLevel, "shutterLevel": shutterLevel, } return self._restCall("device/control/setSlatsLevel", json.dumps(data))
[ "def", "set_slats_level", "(", "self", ",", "slatsLevel", "=", "0.0", ",", "shutterLevel", "=", "None", ")", ":", "if", "shutterLevel", "is", "None", ":", "shutterLevel", "=", "self", ".", "shutterLevel", "data", "=", "{", "\"channelIndex\"", ":", "1", ","...
39.166667
20.111111
def plot(self, x, y, panel='top', xlabel=None, **kws): """plot after clearing current plot """ panel = self.get_panel(panel) panel.plot(x, y, **kws) if xlabel is not None: self.xlabel = xlabel if self.xlabel is not None: self.panel_bot.set_xlabel(self.xlabel)
[ "def", "plot", "(", "self", ",", "x", ",", "y", ",", "panel", "=", "'top'", ",", "xlabel", "=", "None", ",", "*", "*", "kws", ")", ":", "panel", "=", "self", ".", "get_panel", "(", "panel", ")", "panel", ".", "plot", "(", "x", ",", "y", ",", ...
39.5
7.375
def adduser(name, username, root=None): ''' Add a user in the group. name Name of the group to modify username Username to add to the group root Directory to chroot into CLI Example: .. code-block:: bash salt '*' group.adduser foo bar Verifies if a valid username 'bar' as a member of an existing group 'foo', if not then adds it. ''' on_redhat_5 = __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '5' on_suse_11 = __grains__.get('os_family') == 'Suse' and __grains__.get('osmajorrelease') == '11' if __grains__['kernel'] == 'Linux': if on_redhat_5: cmd = ['gpasswd', '-a', username, name] elif on_suse_11: cmd = ['usermod', '-A', name, username] else: cmd = ['gpasswd', '--add', username, name] if root is not None: cmd.extend(('--root', root)) else: cmd = ['usermod', '-G', name, username] if root is not None: cmd.extend(('-R', root)) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) return not retcode
[ "def", "adduser", "(", "name", ",", "username", ",", "root", "=", "None", ")", ":", "on_redhat_5", "=", "__grains__", ".", "get", "(", "'os_family'", ")", "==", "'RedHat'", "and", "__grains__", ".", "get", "(", "'osmajorrelease'", ")", "==", "'5'", "on_s...
26.666667
23.952381
def get_supported( versions=None, # type: Optional[List[str]] noarch=False, # type: bool platform=None, # type: Optional[str] impl=None, # type: Optional[str] abi=None # type: Optional[str] ): # type: (...) -> List[Pep425Tag] """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. :param platform: specify the exact platform you want valid tags for, or None. If None, use the local system platform. :param impl: specify the exact implementation you want valid tags for, or None. If None, use the local interpreter impl. :param abi: specify the exact abi you want valid tags for, or None. If None, use the local interpreter abi. """ supported = [] # Versions must be given with respect to the preference if versions is None: version_info = get_impl_version_info() versions = get_all_minor_versions_as_strings(version_info) impl = impl or get_abbr_impl() abis = [] # type: List[str] abi = abi or get_abi_tag() if abi: abis[0:0] = [abi] abi3s = set() for suffix in get_extension_suffixes(): if suffix.startswith('.abi'): abi3s.add(suffix.split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') if not noarch: arch = platform or get_platform() arch_prefix, arch_sep, arch_suffix = arch.partition('_') if arch.startswith('macosx'): # support macosx-10.6-intel on macosx-10.9-x86_64 match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() tpl = '{}_{}_%i_%s'.format(name, major) arches = [] for m in reversed(range(int(minor) + 1)): for a in get_darwin_arches(int(major), m, actual_arch): arches.append(tpl % (m, a)) else: # arch pattern didn't match (?!) arches = [arch] elif arch_prefix == 'manylinux2010': # manylinux1 wheels run on most manylinux2010 systems with the # exception of wheels depending on ncurses. PEP 571 states # manylinux1 wheels should be considered manylinux2010 wheels: # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels arches = [arch, 'manylinux1' + arch_sep + arch_suffix] elif platform is None: arches = [] if is_manylinux2010_compatible(): arches.append('manylinux2010' + arch_sep + arch_suffix) if is_manylinux1_compatible(): arches.append('manylinux1' + arch_sep + arch_suffix) arches.append(arch) else: arches = [arch] # Current version, current API (built specifically for our Python): for abi in abis: for arch in arches: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # abi3 modules compatible with older version of Python for version in versions[1:]: # abi3 was introduced in Python 3.2 if version in {'31', '30'}: break for abi in abi3s: # empty set if not Python 3 for arch in arches: supported.append(("%s%s" % (impl, version), abi, arch)) # Has binaries, does not use the Python API: for arch in arches: supported.append(('py%s' % (versions[0][0]), 'none', arch)) # No abi / arch, but requires our implementation: supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported
[ "def", "get_supported", "(", "versions", "=", "None", ",", "# type: Optional[List[str]]", "noarch", "=", "False", ",", "# type: bool", "platform", "=", "None", ",", "# type: Optional[str]", "impl", "=", "None", ",", "# type: Optional[str]", "abi", "=", "None", "# ...
38.915888
20.233645
def urlretrieve(self, url, filename, data=None): """ Similar to urllib.urlretrieve or urllib.request.urlretrieve only that *filname* is required. :param url: URL to download. :param filename: Filename to save the content to. :param data: Valid URL-encoded data. :return: Tuple containing path and headers. """ logger.info('saving: \'%s\' to \'%s\'', url, filename) if _is_py3: return _urlretrieve_with_opener(self.opener, url, filename, data=data) return self.opener2.retrieve(url, filename, data=data)
[ "def", "urlretrieve", "(", "self", ",", "url", ",", "filename", ",", "data", "=", "None", ")", ":", "logger", ".", "info", "(", "'saving: \\'%s\\' to \\'%s\\''", ",", "url", ",", "filename", ")", "if", "_is_py3", ":", "return", "_urlretrieve_with_opener", "(...
36.875
18.625
def get_alt_date_bug_totals(self, startday, endday, bug_ids): """use previously fetched bug_ids to check for total failures exceeding 150 in 21 days""" bugs = (BugJobMap.failures.by_date(startday, endday) .filter(bug_id__in=bug_ids) .values('bug_id') .annotate(total=Count('id')) .values('bug_id', 'total')) return {bug['bug_id']: bug['total'] for bug in bugs if bug['total'] >= 150}
[ "def", "get_alt_date_bug_totals", "(", "self", ",", "startday", ",", "endday", ",", "bug_ids", ")", ":", "bugs", "=", "(", "BugJobMap", ".", "failures", ".", "by_date", "(", "startday", ",", "endday", ")", ".", "filter", "(", "bug_id__in", "=", "bug_ids", ...
54.6
19.9
def start(self, datas): """ Starts the pipeline by connecting the input ``Pipers`` of the pipeline to the input data, connecting the pipeline and starting the ``NuMap`` instances. The order of items in the "datas" argument sequence should correspond to the order of the input ``Pipers`` defined by ``Dagger._cmp`` and ``Piper.ornament``. Arguments: - datas(sequence) A sequence of external input data in the form of sequences or iterators. """ if not self._started.isSet() and \ not self._running.isSet() and \ not self._pausing.isSet(): # Plumber statistics self.stats = {} self.stats['start_time'] = None self.stats['run_time'] = None # connects input pipers to external data self.connect_inputs(datas) # connects pipers within the pipeline self.connect() # make pointers to results collected for pipers by imaps self.stats['pipers_tracked'] = {} for piper in self.postorder(): if hasattr(piper.imap, '_tasks_tracked') and piper.track: self.stats['pipers_tracked'][piper] = \ [piper.imap._tasks_tracked[t.task] for t in piper.imap_tasks] self.stats['start_time'] = time() # starts the Dagger # this starts Pipers and NuMaps super(Plumber, self).start() # transitioning to started state self._started.set() self._finished.clear() else: raise PlumberError
[ "def", "start", "(", "self", ",", "datas", ")", ":", "if", "not", "self", ".", "_started", ".", "isSet", "(", ")", "and", "not", "self", ".", "_running", ".", "isSet", "(", ")", "and", "not", "self", ".", "_pausing", ".", "isSet", "(", ")", ":", ...
38.465116
16.744186
def is_valid_method_view(endpoint): """ Return True if obj is MethodView """ klass = endpoint.__dict__.get('view_class', None) try: return issubclass(klass, MethodView) except TypeError: return False
[ "def", "is_valid_method_view", "(", "endpoint", ")", ":", "klass", "=", "endpoint", ".", "__dict__", ".", "get", "(", "'view_class'", ",", "None", ")", "try", ":", "return", "issubclass", "(", "klass", ",", "MethodView", ")", "except", "TypeError", ":", "r...
25.666667
10.777778
def _check_transition_target(self, transition): """Checks the validity of a transition target Checks whether the transition target is valid. :param rafcon.core.transition.Transition transition: The transition to be checked :return bool validity, str message: validity is True, when the transition is valid, False else. message gives more information especially if the transition is not valid """ to_state_id = transition.to_state to_outcome_id = transition.to_outcome if to_state_id == self.state_id: if to_outcome_id not in self.outcomes: return False, "to_outcome is not existing" else: if to_state_id not in self.states: return False, "to_state is not existing" if to_outcome_id is not None: return False, "to_outcome must be None as transition goes to child state" return True, "valid"
[ "def", "_check_transition_target", "(", "self", ",", "transition", ")", ":", "to_state_id", "=", "transition", ".", "to_state", "to_outcome_id", "=", "transition", ".", "to_outcome", "if", "to_state_id", "==", "self", ".", "state_id", ":", "if", "to_outcome_id", ...
41.217391
22.695652
def compress_pruned(table): """Compress table based on pruning mask. Only the rows/cols in which all of the elements are masked need to be pruned. """ if not isinstance(table, np.ma.core.MaskedArray): return table if table.ndim == 0: return table.data if table.ndim == 1: return np.ma.compressed(table) row_inds = ~table.mask.all(axis=1) col_inds = ~table.mask.all(axis=0) table = table[row_inds, :][:, col_inds] if table.dtype == float and table.mask.any(): table[table.mask] = np.nan return table
[ "def", "compress_pruned", "(", "table", ")", ":", "if", "not", "isinstance", "(", "table", ",", "np", ".", "ma", ".", "core", ".", "MaskedArray", ")", ":", "return", "table", "if", "table", ".", "ndim", "==", "0", ":", "return", "table", ".", "data",...
26.761905
17.380952