text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def load_builtin_slots(): ''' Helper function to load builtin slots from the data location ''' builtin_slots = {} for index, line in enumerate(open(BUILTIN_SLOTS_LOCATION)): o = line.strip().split('\t') builtin_slots[index] = {'name' : o[0], 'description' : o[1] } return builtin_slots
[ "def", "load_builtin_slots", "(", ")", ":", "builtin_slots", "=", "{", "}", "for", "index", ",", "line", "in", "enumerate", "(", "open", "(", "BUILTIN_SLOTS_LOCATION", ")", ")", ":", "o", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'",...
35
18.6
def init(args=None): """Initialize the rabit library with arguments""" if args is None: args = [] arr = (ctypes.c_char_p * len(args))() arr[:] = args _LIB.RabitInit(len(arr), arr)
[ "def", "init", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "[", "]", "arr", "=", "(", "ctypes", ".", "c_char_p", "*", "len", "(", "args", ")", ")", "(", ")", "arr", "[", ":", "]", "=", "args", "_LIB", "...
28.714286
13.428571
def get_core(self): """ Get an unsatisfiable core if the formula was previously unsatisfied. """ if self.maplesat and self.status == False: return pysolvers.maplecm_core(self.maplesat)
[ "def", "get_core", "(", "self", ")", ":", "if", "self", ".", "maplesat", "and", "self", ".", "status", "==", "False", ":", "return", "pysolvers", ".", "maplecm_core", "(", "self", ".", "maplesat", ")" ]
29.75
16.25
def check_subdomain(fqn): """ Verify that the given fqn is a subdomain >>> check_subdomain('a.b.c') True >>> check_subdomain(123) False >>> check_subdomain('a.b.c.d') False >>> check_subdomain('A.b.c') False >>> check_subdomain('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a.b') True >>> check_subdomain('a.abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a') False >>> check_subdomain('a.b.cdabcdabcdabcdabcdabcdabcdabcdabcd') False >>> check_subdomain('a.b') False """ if type(fqn) not in [str, unicode]: return False if not is_subdomain(fqn): return False return True
[ "def", "check_subdomain", "(", "fqn", ")", ":", "if", "type", "(", "fqn", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "return", "False", "if", "not", "is_subdomain", "(", "fqn", ")", ":", "return", "False", "return", "True" ]
23.107143
21.607143
def get_authenticate_header(self, request): """ If a request is unauthenticated, determine the WWW-Authenticate header to use for 401 responses, if any. """ authenticators = self.get_authenticators() if authenticators: return authenticators[0].authenticate_header(request)
[ "def", "get_authenticate_header", "(", "self", ",", "request", ")", ":", "authenticators", "=", "self", ".", "get_authenticators", "(", ")", "if", "authenticators", ":", "return", "authenticators", "[", "0", "]", ".", "authenticate_header", "(", "request", ")" ]
40.625
11.375
def _isrc_long(name=None): """ Creates the grammar for a short ISRC code. ISRC stands for International Standard Recording Code, which is the standard ISO 3901. This stores information identifying a particular recording. This variant contain no separator for the parts, and follows the pattern: CCXXXYYNNNNN Where each code means: - CC: country code - XXX: registrant - YY: year - NNNNN: work id :param name: name for the field :return: grammar for an ISRC field """ config = CWRTables() if name is None: name = 'ISRC Field' country = config.get_data('isrc_country_code') # registrant = basic.alphanum(3) # year = pp.Regex('[0-9]{2}') # work_id = pp.Regex('[0-9]{5}') country_regex = '' for c in country: if len(country_regex) > 0: country_regex += '|' country_regex += c country_regex = '(' + country_regex + ')' field = pp.Regex(country_regex + '.{3}[0-9]{2}[0-9]{5}') # country.setName('ISO-2 Country Code') # registrant.setName('Registrant') # year.setName('Year') # work_id.setName('Work ID') field.setName(name) return field.setResultsName('isrc')
[ "def", "_isrc_long", "(", "name", "=", "None", ")", ":", "config", "=", "CWRTables", "(", ")", "if", "name", "is", "None", ":", "name", "=", "'ISRC Field'", "country", "=", "config", ".", "get_data", "(", "'isrc_country_code'", ")", "# registrant = basic.alp...
24.541667
20.041667
def _process_children_elems(elem, dic, subdic, container=dict, children="@children", **options): """ :param elem: ET Element object or None :param dic: <container> (dict[-like]) object converted from elem :param subdic: Sub <container> object converted from elem :param container: callble to make a container object :param children: Tag for children nodes :param options: Keyword options, see the description of :func:`elem_to_container` for more details. :return: None but updating dic and subdic as side effects """ cdics = [elem_to_container(c, container=container, **options) for c in elem] merge_attrs = options.get("merge_attrs", False) sdics = [container(elem.attrib) if merge_attrs else subdic] + cdics if _dicts_have_unique_keys(sdics): # ex. <a><b>1</b><c>c</c></a> dic[elem.tag] = _merge_dicts(sdics, container) elif not subdic: # There are no attrs nor text and only these children. dic[elem.tag] = cdics else: subdic[children] = cdics
[ "def", "_process_children_elems", "(", "elem", ",", "dic", ",", "subdic", ",", "container", "=", "dict", ",", "children", "=", "\"@children\"", ",", "*", "*", "options", ")", ":", "cdics", "=", "[", "elem_to_container", "(", "c", ",", "container", "=", "...
42.72
20
def delete_firewall_rule(self, datacenter_id, server_id, nic_id, firewall_rule_id): """ Removes a firewall rule from the NIC. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param nic_id: The unique ID of the NIC. :type nic_id: ``str`` :param firewall_rule_id: The unique ID of the firewall rule. :type firewall_rule_id: ``str`` """ response = self._perform_request( url='/datacenters/%s/servers/%s/nics/%s/firewallrules/%s' % ( datacenter_id, server_id, nic_id, firewall_rule_id), method='DELETE') return response
[ "def", "delete_firewall_rule", "(", "self", ",", "datacenter_id", ",", "server_id", ",", "nic_id", ",", "firewall_rule_id", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/datacenters/%s/servers/%s/nics/%s/firewallrules/%s'", "%", "(", ...
32.185185
18.259259
def play_track(self, track_id=DEFAULT_TRACK_ID, position=0): """Plays a track at the given position.""" self.publish( action='playTrack', resource='audioPlayback/player', publish_response=False, properties={'trackId': track_id, 'position': position} )
[ "def", "play_track", "(", "self", ",", "track_id", "=", "DEFAULT_TRACK_ID", ",", "position", "=", "0", ")", ":", "self", ".", "publish", "(", "action", "=", "'playTrack'", ",", "resource", "=", "'audioPlayback/player'", ",", "publish_response", "=", "False", ...
39.5
14.25
def list_to_json(source_list): """ Serialise all the items in source_list to json """ result = [] for item in source_list: result.append(item.to_json()) return result
[ "def", "list_to_json", "(", "source_list", ")", ":", "result", "=", "[", "]", "for", "item", "in", "source_list", ":", "result", ".", "append", "(", "item", ".", "to_json", "(", ")", ")", "return", "result" ]
23.875
10.375
def discover_filename(label, scopes=None): ''' Check the filesystem for the existence of a .plist file matching the job label. Optionally specify one or more scopes to search (default all). :param label: string :param scope: tuple or list or oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS) ''' if scopes is None: scopes = [k for k in PLIST_LOCATIONS] elif not isinstance(scopes, (list, tuple)): scopes = (scopes, ) for thisscope in scopes: plistfilename = compute_filename(label, thisscope) if os.path.isfile(plistfilename): return plistfilename return None
[ "def", "discover_filename", "(", "label", ",", "scopes", "=", "None", ")", ":", "if", "scopes", "is", "None", ":", "scopes", "=", "[", "k", "for", "k", "in", "PLIST_LOCATIONS", "]", "elif", "not", "isinstance", "(", "scopes", ",", "(", "list", ",", "...
37.411765
20.705882
def is_field_remote(model, field_name): """Check whether a given model field is a remote field. A remote field is the inverse of a one-to-many or a many-to-many relationship. Arguments: model: a Django model field_name: the name of a field Returns: True if `field_name` is a remote field, False otherwise. """ if not hasattr(model, '_meta'): # ephemeral model with no metaclass return False model_field = get_model_field(model, field_name) return isinstance(model_field, (ManyToManyField, RelatedObject))
[ "def", "is_field_remote", "(", "model", ",", "field_name", ")", ":", "if", "not", "hasattr", "(", "model", ",", "'_meta'", ")", ":", "# ephemeral model with no metaclass", "return", "False", "model_field", "=", "get_model_field", "(", "model", ",", "field_name", ...
29.789474
18.105263
def _setable_set_(name, self, func): "Used to set the attribute a single time using the given function." setattr(self._attr_data_, name, func()) if hasattr(self._attr_func_, name): delattr(self._attr_func_, name) setattr( type(self), name, property( functools.partial(self._simple_get_, name) ) )
[ "def", "_setable_set_", "(", "name", ",", "self", ",", "func", ")", ":", "setattr", "(", "self", ".", "_attr_data_", ",", "name", ",", "func", "(", ")", ")", "if", "hasattr", "(", "self", ".", "_attr_func_", ",", "name", ")", ":", "delattr", "(", "...
26.733333
22.066667
def _permute_aux_specs(self): """Generate all permutations of the non-core specifications.""" # Convert to attr names that Calc is expecting. calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy() # Special case: manually add 'library' to mapping calc_aux_mapping[_OBJ_LIB_STR] = None [calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES] specs = self._get_aux_specs() for suite_name, calc_name in calc_aux_mapping.items(): specs[calc_name] = specs.pop(suite_name) return _permuted_dicts_of_specs(specs)
[ "def", "_permute_aux_specs", "(", "self", ")", ":", "# Convert to attr names that Calc is expecting.", "calc_aux_mapping", "=", "self", ".", "_NAMES_SUITE_TO_CALC", ".", "copy", "(", ")", "# Special case: manually add 'library' to mapping", "calc_aux_mapping", "[", "_OBJ_LIB_ST...
48.583333
15
def to_world(self, shape, dst_crs=None): """Return the shape (provided in pixel coordinates) in world coordinates, as GeoVector.""" if dst_crs is None: dst_crs = self.crs shp = transform(shape, self.crs, dst_crs, dst_affine=self.affine) return GeoVector(shp, dst_crs)
[ "def", "to_world", "(", "self", ",", "shape", ",", "dst_crs", "=", "None", ")", ":", "if", "dst_crs", "is", "None", ":", "dst_crs", "=", "self", ".", "crs", "shp", "=", "transform", "(", "shape", ",", "self", ".", "crs", ",", "dst_crs", ",", "dst_a...
51
9.666667
def get_user_modified_lines(self): """ Output: {file_path: [(line_a_start, line_a_end), (line_b_start, line_b_end)]} Lines ranges are sorted and not overlapping """ # I assume that git diff: # - doesn't mix diffs from different files, # - diffs are not overlapping # - diffs are sorted based on line numbers output = {} FILE_NAME_RE = r'^\+\+\+ (.+)$' CHANGED_LINES_RE = r'^@@ -[0-9,]+ \+([0-9]+)(?:,([0-9]+))? @@' current_file_name = None for line in self.git_wrapper.get_min_diff(self.remote_sha1, self.local_sha1).split('\n'): file_name_match = re.match(FILE_NAME_RE, line) if file_name_match: current_file_name, = file_name_match.groups() output[current_file_name] = [] continue line_number_match = re.match(CHANGED_LINES_RE, line) if line_number_match: assert current_file_name if current_file_name == '/dev/null': continue line_start, diff_len = line_number_match.groups() line_start, diff_len = int(line_start), int(diff_len or 0) output[current_file_name].append(LinesRange(line_start, line_start + diff_len)) continue return output
[ "def", "get_user_modified_lines", "(", "self", ")", ":", "# I assume that git diff:", "# - doesn't mix diffs from different files,", "# - diffs are not overlapping", "# - diffs are sorted based on line numbers", "output", "=", "{", "}", "FILE_NAME_RE", "=", "r'^\\+\\+\\+ (.+)$'", "...
39.294118
20.117647
def setPalette(self, palette): """ Sets the palette for this node to the inputed palette. If None is provided, then the scene's palette will be used for this node. :param palette | <XNodePalette> || None """ self._palette = XNodePalette(palette) if palette is not None else None self.setDirty()
[ "def", "setPalette", "(", "self", ",", "palette", ")", ":", "self", ".", "_palette", "=", "XNodePalette", "(", "palette", ")", "if", "palette", "is", "not", "None", "else", "None", "self", ".", "setDirty", "(", ")" ]
39.666667
19.222222
def find_interior_point( distribution, parameters=None, cache=None, iterations=1000, retall=False, seed=None, ): """ Find interior point of the distribution where forward evaluation is guarantied to be both ``distribution.fwd(xloc) > 0`` and ``distribution.fwd(xloc) < 1``. Args: distribution (Dist): Distribution to find interior on. parameters (Optional[Dict[Dist, numpy.ndarray]]): Parameters for the distribution. cache (Optional[Dict[Dist, numpy.ndarray]]): Memory cache for the location in the evaluation so far. iterations (int): The number of iterations allowed to be performed retall (bool): If provided, lower and upper bound which guaranties that ``distribution.fwd(lower) == 0`` and ``distribution.fwd(upper) == 1`` is returned as well. seed (Optional[int]): Fix random seed. Returns: numpy.ndarray: An input array with shape ``(len(distribution),)`` which is guarantied to be on the interior of the probability distribution. Example: >>> distribution = chaospy.MvNormal([1, 2, 3], numpy.eye(3)+.03) >>> midpoint, lower, upper = find_interior_point( ... distribution, retall=True, seed=1234) >>> print(lower.T) [[-64. -64. -64.]] >>> print(numpy.around(midpoint, 4).T) [[ 0.6784 -33.7687 -19.0182]] >>> print(upper.T) [[16. 16. 16.]] >>> distribution = chaospy.Uniform(1000, 1010) >>> midpoint, lower, upper = find_interior_point( ... distribution, retall=True, seed=1234) >>> print(numpy.around(lower, 4)) [[-1.]] >>> print(numpy.around(midpoint, 4)) [[1009.8873]] >>> print(numpy.around(upper, 4)) [[1024.]] """ random_state = numpy.random.get_state() numpy.random.seed(seed) forward = partial(evaluation.evaluate_forward, cache=cache, distribution=distribution, parameters=parameters) dim = len(distribution) upper = numpy.ones((dim, 1)) for _ in range(100): indices = forward(x_data=upper) < 1 if not numpy.any(indices): break upper[indices] *= 2 lower = -numpy.ones((dim, 1)) for _ in range(100): indices = forward(x_data=lower) > 0 if not numpy.any(indices): break lower[indices] *= 2 for _ in range(iterations): rand = numpy.random.random(dim) proposal = (rand*lower.T + (1-rand)*upper.T).T evals = forward(x_data=proposal) indices0 = evals > 0 indices1 = evals < 1 range_ = numpy.random.choice(dim, size=dim, replace=False) upper_ = numpy.where(indices1, upper, evals) for idx in range_: if upper.flatten()[idx] == upper_.flatten()[idx]: continue if numpy.all(forward(x_data=upper_) == 1): upper = upper_ break upper_[idx] = upper[idx] lower_ = numpy.where(indices0, lower, evals) for idx in range_: if lower.flatten()[idx] == lower_.flatten()[idx]: continue if numpy.all(forward(x_data=lower_) == 0): lower = lower_ break lower_[idx] = lower[idx] if numpy.all(indices0 & indices1): break else: if retall: return proposal, lower, upper return proposal raise evaluation.DependencyError( "Too many iterations required to find interior point.") numpy.random.set_state(random_state) if retall: return proposal, lower, upper return proposal
[ "def", "find_interior_point", "(", "distribution", ",", "parameters", "=", "None", ",", "cache", "=", "None", ",", "iterations", "=", "1000", ",", "retall", "=", "False", ",", "seed", "=", "None", ",", ")", ":", "random_state", "=", "numpy", ".", "random...
32.219298
19.149123
def write_name (self, url_data): """Write url_data.name.""" args = (self.part("name"), cgi.escape(url_data.name)) self.writeln(u"<tr><td>%s</td><td>`%s'</td></tr>" % args)
[ "def", "write_name", "(", "self", ",", "url_data", ")", ":", "args", "=", "(", "self", ".", "part", "(", "\"name\"", ")", ",", "cgi", ".", "escape", "(", "url_data", ".", "name", ")", ")", "self", ".", "writeln", "(", "u\"<tr><td>%s</td><td>`%s'</td></tr...
48
13.5
def removeSubscribers(self, emails_list): """Remove subscribers from this workitem If the subscribers have not been added, no more actions will be performed. :param emails_list: a :class:`list`/:class:`tuple`/:class:`set` contains the the subscribers' emails """ if not hasattr(emails_list, "__iter__"): error_msg = "Input parameter 'emails_list' is not iterable" self.log.error(error_msg) raise exception.BadValue(error_msg) # overall flag missing_flags = True headers, raw_data = self._perform_subscribe() for email in emails_list: missing_flag, raw_data = self._remove_subscriber(email, raw_data) missing_flags = missing_flags and missing_flag if missing_flags: return self._update_subscribe(headers, raw_data) self.log.info("Successfully remove subscribers: %s for <Workitem %s>", emails_list, self)
[ "def", "removeSubscribers", "(", "self", ",", "emails_list", ")", ":", "if", "not", "hasattr", "(", "emails_list", ",", "\"__iter__\"", ")", ":", "error_msg", "=", "\"Input parameter 'emails_list' is not iterable\"", "self", ".", "log", ".", "error", "(", "error_m...
34.206897
21.068966
def ReadAllClientGraphSeries( self, client_label, report_type, time_range = None, cursor=None): """Reads graph series for the given label and report-type from the DB.""" query = """ SELECT UNIX_TIMESTAMP(timestamp), graph_series FROM client_report_graphs WHERE client_label = %s AND report_type = %s """ args = [client_label, report_type.SerializeToDataStore()] if time_range is not None: query += "AND `timestamp` BETWEEN FROM_UNIXTIME(%s) AND FROM_UNIXTIME(%s)" args += [ mysql_utils.RDFDatetimeToTimestamp(time_range.start), mysql_utils.RDFDatetimeToTimestamp(time_range.end) ] cursor.execute(query, args) results = {} for timestamp, raw_series in cursor.fetchall(): # TODO(hanuszczak): pytype does not seem to understand overloads, so it is # not possible to correctly annotate `TimestampToRDFDatetime`. timestamp = cast(rdfvalue.RDFDatetime, mysql_utils.TimestampToRDFDatetime(timestamp)) series = rdf_stats.ClientGraphSeries.FromSerializedString(raw_series) results[timestamp] = series return results
[ "def", "ReadAllClientGraphSeries", "(", "self", ",", "client_label", ",", "report_type", ",", "time_range", "=", "None", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"\"\"\n SELECT UNIX_TIMESTAMP(timestamp), graph_series\n FROM client_report_graphs\n W...
36.03125
20.375
def count_items(self): """Counts Items in full_soup and soup. For debugging""" soup_items = self.soup.findAll('item') full_soup_items = self.full_soup.findAll('item') return len(soup_items), len(full_soup_items)
[ "def", "count_items", "(", "self", ")", ":", "soup_items", "=", "self", ".", "soup", ".", "findAll", "(", "'item'", ")", "full_soup_items", "=", "self", ".", "full_soup", ".", "findAll", "(", "'item'", ")", "return", "len", "(", "soup_items", ")", ",", ...
47.8
10.4
def wait_for_fresh_games(self, poll_interval=15.0): """Block caller until required new games have been played. Args: poll_interval: number of seconds to wait between checks If the cell `table_state=metadata:wait_for_game_number` exists, then block the caller, checking every `poll_interval` seconds, until `table_state=metadata:game_counter is at least the value in that cell. """ wait_until_game = self.read_wait_cell() if not wait_until_game: return latest_game = self.latest_game_number last_latest = latest_game while latest_game < wait_until_game: utils.dbg('Latest game {} not yet at required game {} ' '(+{}, {:0.3f} games/sec)'.format( latest_game, wait_until_game, latest_game - last_latest, (latest_game - last_latest) / poll_interval )) time.sleep(poll_interval) last_latest = latest_game latest_game = self.latest_game_number
[ "def", "wait_for_fresh_games", "(", "self", ",", "poll_interval", "=", "15.0", ")", ":", "wait_until_game", "=", "self", ".", "read_wait_cell", "(", ")", "if", "not", "wait_until_game", ":", "return", "latest_game", "=", "self", ".", "latest_game_number", "last_...
41.777778
15.814815
def add_glitch(psr, epoch, amp): """ Like pulsar term BWM event, but now differently parameterized: just an amplitude (not log-amp) parameter, and an epoch. [source: piccard] :param psr: pulsar object :param epoch: TOA time (MJD) the burst hits the earth :param amp: amplitude of the glitch """ # Define the heaviside function heaviside = lambda x: 0.5 * (N.sign(x) + 1) # Glitches are spontaneous spin-up events. # Thus TOAs will be advanced, and resiudals will be negative. psr.stoas[:] -= amp * heaviside(psr.toas() - epoch) * \ (psr.toas() - epoch)*86400.0
[ "def", "add_glitch", "(", "psr", ",", "epoch", ",", "amp", ")", ":", "# Define the heaviside function", "heaviside", "=", "lambda", "x", ":", "0.5", "*", "(", "N", ".", "sign", "(", "x", ")", "+", "1", ")", "# Glitches are spontaneous spin-up events.", "# Th...
34.055556
17.611111
def broadcast_info(team_id, date=datetime.now()): """Return BroadcastInfo object that containts information about the television and radio broadcasts for the team_id and year""" data = mlbgame.info.broadcast_info(team_id, date) return [mlbgame.info.BroadcastInfo(x) for x in data]
[ "def", "broadcast_info", "(", "team_id", ",", "date", "=", "datetime", ".", "now", "(", ")", ")", ":", "data", "=", "mlbgame", ".", "info", ".", "broadcast_info", "(", "team_id", ",", "date", ")", "return", "[", "mlbgame", ".", "info", ".", "BroadcastI...
49.166667
9.833333
def meff_lh_110(self, **kwargs): ''' Returns the light-hole band effective mass in the [110] direction, meff_lh_110, in units of electron mass. ''' return 2. / (2 * self.luttinger1(**kwargs) + self.luttinger2(**kwargs) + 3 * self.luttinger3(**kwargs))
[ "def", "meff_lh_110", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "2.", "/", "(", "2", "*", "self", ".", "luttinger1", "(", "*", "*", "kwargs", ")", "+", "self", ".", "luttinger2", "(", "*", "*", "kwargs", ")", "+", "3", "*", "sel...
43
21.857143
def build_csv_transforming_training_input_fn(schema, features, stats, analysis_output_dir, raw_data_file_pattern, training_batch_size, num_epochs=None, randomize_input=False, min_after_dequeue=1, reader_num_threads=1, allow_smaller_final_batch=True): """Creates training input_fn that reads raw csv data and applies transforms. Args: schema: schema list features: features dict stats: stats dict analysis_output_dir: output folder from analysis raw_data_file_pattern: file path, or list of files training_batch_size: An int specifying the batch size to use. num_epochs: numer of epochs to read from the files. Use None to read forever. randomize_input: If true, the input rows are read out of order. This randomness is limited by the min_after_dequeue value. min_after_dequeue: Minimum number elements in the reading queue after a dequeue, used to ensure a level of mixing of elements. Only used if randomize_input is True. reader_num_threads: The number of threads enqueuing data. allow_smaller_final_batch: If false, fractional batches at the end of training or evaluation are not used. Returns: An input_fn suitable for training that reads raw csv training data and applies transforms. """ def raw_training_input_fn(): """Training input function that reads raw data and applies transforms.""" if isinstance(raw_data_file_pattern, six.string_types): filepath_list = [raw_data_file_pattern] else: filepath_list = raw_data_file_pattern files = [] for path in filepath_list: files.extend(file_io.get_matching_files(path)) filename_queue = tf.train.string_input_producer( files, num_epochs=num_epochs, shuffle=randomize_input) csv_id, csv_lines = tf.TextLineReader().read_up_to(filename_queue, training_batch_size) queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue if randomize_input: _, batch_csv_lines = tf.train.shuffle_batch( tensors=[csv_id, csv_lines], batch_size=training_batch_size, capacity=queue_capacity, min_after_dequeue=min_after_dequeue, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch) else: _, batch_csv_lines = tf.train.batch( tensors=[csv_id, csv_lines], batch_size=training_batch_size, capacity=queue_capacity, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch) csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target=True) parsed_tensors = tf.decode_csv(batch_csv_lines, record_defaults, name='csv_to_tensors') raw_features = dict(zip(csv_header, parsed_tensors)) transform_fn = make_preprocessing_fn(analysis_output_dir, features, keep_target=True) transformed_tensors = transform_fn(raw_features) # Expand the dims of non-sparse tensors. This is needed by tf.learn. transformed_features = {} for k, v in six.iteritems(transformed_tensors): if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1: transformed_features[k] = tf.expand_dims(v, -1) else: transformed_features[k] = v # image_feature_engineering does not need to be called as images are not # supported in raw csv for training. # Remove the target tensor, and return it directly target_name = get_target_name(features) if not target_name or target_name not in transformed_features: raise ValueError('Cannot find target transform in features') transformed_target = transformed_features.pop(target_name) return transformed_features, transformed_target return raw_training_input_fn
[ "def", "build_csv_transforming_training_input_fn", "(", "schema", ",", "features", ",", "stats", ",", "analysis_output_dir", ",", "raw_data_file_pattern", ",", "training_batch_size", ",", "num_epochs", "=", "None", ",", "randomize_input", "=", "False", ",", "min_after_d...
41.623762
22.554455
def derive_link_fields(self, context): """ Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable. """ if self.link_fields is not None: return self.link_fields else: link_fields = set() if self.fields: for field in self.fields: if field != 'is_active': link_fields.add(field) break return link_fields
[ "def", "derive_link_fields", "(", "self", ",", "context", ")", ":", "if", "self", ".", "link_fields", "is", "not", "None", ":", "return", "self", ".", "link_fields", "else", ":", "link_fields", "=", "set", "(", ")", "if", "self", ".", "fields", ":", "f...
31.941176
14.294118
def file_cmd(context, tags, archive, bundle_name, path): """Add a file to a bundle.""" bundle_obj = context.obj['db'].bundle(bundle_name) if bundle_obj is None: click.echo(click.style(f"unknown bundle: {bundle_name}", fg='red')) context.abort() version_obj = bundle_obj.versions[0] new_file = context.obj['db'].new_file( path=str(Path(path).absolute()), to_archive=archive, tags=[context.obj['db'].tag(tag_name) if context.obj['db'].tag(tag_name) else context.obj['db'].new_tag(tag_name) for tag_name in tags] ) new_file.version = version_obj context.obj['db'].add_commit(new_file) click.echo(click.style(f"new file added: {new_file.path} ({new_file.id})", fg='green'))
[ "def", "file_cmd", "(", "context", ",", "tags", ",", "archive", ",", "bundle_name", ",", "path", ")", ":", "bundle_obj", "=", "context", ".", "obj", "[", "'db'", "]", ".", "bundle", "(", "bundle_name", ")", "if", "bundle_obj", "is", "None", ":", "click...
46.5
17.5625
def print_horiz_table(self, data): """Print a horizontal pretty table from data.""" # Build list of returned objects return_objects = list() fields = self.job_args.get('fields') if not fields: fields = set() for item_dict in data: for field_item in item_dict.keys(): fields.add(field_item) fields = sorted(fields) for obj in data: item_struct = dict() for item in fields: item_struct[item] = obj.get(item) else: return_objects.append(item_struct) table = prettytable.PrettyTable(fields) for obj in return_objects: table.add_row([obj.get(i) for i in fields]) for tbl in table.align.keys(): table.align[tbl] = 'l' sort_key = self.job_args.get('sort_by') if sort_key: table.sortby = sort_key self.printer(table)
[ "def", "print_horiz_table", "(", "self", ",", "data", ")", ":", "# Build list of returned objects", "return_objects", "=", "list", "(", ")", "fields", "=", "self", ".", "job_args", ".", "get", "(", "'fields'", ")", "if", "not", "fields", ":", "fields", "=", ...
29.84375
14.71875
def datapt_to_system(self, datapt, system=None, coords='data', naxispath=None): """ Map points to given coordinate system. Parameters ---------- datapt : array-like Pixel coordinates in the format of ``[[x0, y0, ...], [x1, y1, ...], ..., [xn, yn, ...]]``. system : str or None, optional, default to 'icrs' Coordinate system name. coords : 'data' or None, optional, default to 'data' Expresses whether the data coordinate is indexed from zero naxispath : list-like or None, optional, defaults to None A sequence defining the pixel indexes > 2D, if any Returns ------- coord : SkyCoord """ if self.coordsys == 'raw': raise common.WCSError("No usable WCS") if system is None: system = 'icrs' wcspt = self.datapt_to_wcspt(datapt, coords=coords, naxispath=naxispath) frame_class = coordinates.frame_transform_graph.lookup_name( self.coordsys) ra_deg = wcspt[:, 0] dec_deg = wcspt[:, 1] coord = frame_class(ra_deg * units.degree, dec_deg * units.degree) to_class = coordinates.frame_transform_graph.lookup_name(system) # Skip if input and output is the same (no realize_frame # call in astropy) if to_class != frame_class: coord = coord.transform_to(to_class) return coord
[ "def", "datapt_to_system", "(", "self", ",", "datapt", ",", "system", "=", "None", ",", "coords", "=", "'data'", ",", "naxispath", "=", "None", ")", ":", "if", "self", ".", "coordsys", "==", "'raw'", ":", "raise", "common", ".", "WCSError", "(", "\"No ...
32.456522
21.413043
def add_recipients(self, id, recipients): """ Add recipients. Add recipients to an existing group conversation. Response is similar to the GET/show action, except that only includes the latest message (e.g. "joe was added to the conversation by bob") """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - recipients """An array of recipient ids. These may be user ids or course/group ids prefixed with "course_" or "group_" respectively, e.g. recipients[]=1&recipients[]=2&recipients[]=course_3""" data["recipients"] = recipients self.logger.debug("POST /api/v1/conversations/{id}/add_recipients with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/conversations/{id}/add_recipients".format(**path), data=data, params=params, no_data=True)
[ "def", "add_recipients", "(", "self", ",", "id", ",", "recipients", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "# REQUIRED - recipien...
42.625
25.708333
def localize(self, dt, is_dst=False): '''Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight saving time. >>> from pytz import timezone >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> amdam = timezone('Europe/Amsterdam') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) '2004-10-31 02:00:00 CEST (+0200)' >>> loc_dt2.strftime(fmt) '2004-10-31 02:00:00 CET (+0100)' >>> str(loc_dt2 - loc_dt1) '1:00:00' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight saving time >>> try: ... loc_dt1 = amdam.localize(dt, is_dst=None) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True is_dst is also used to determine the correct timezone in the wallclock times jumped over at the start of daylight saving time. >>> pacific = timezone('US/Pacific') >>> dt = datetime(2008, 3, 9, 2, 0, 0) >>> ploc_dt1 = pacific.localize(dt, is_dst=True) >>> ploc_dt2 = pacific.localize(dt, is_dst=False) >>> ploc_dt1.strftime(fmt) '2008-03-09 02:00:00 PDT (-0700)' >>> ploc_dt2.strftime(fmt) '2008-03-09 02:00:00 PST (-0800)' >>> str(ploc_dt2 - ploc_dt1) '1:00:00' Use is_dst=None to raise a NonExistentTimeError for these skipped times. >>> try: ... loc_dt1 = pacific.localize(dt, is_dst=None) ... except NonExistentTimeError: ... print('Non-existent') Non-existent ''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') # Find the two best possibilities. possible_loc_dt = set() for delta in [timedelta(days=-1), timedelta(days=1)]: loc_dt = dt + delta idx = max(0, bisect_right( self._utc_transition_times, loc_dt) - 1) inf = self._transition_info[idx] tzinfo = self._tzinfos[inf] loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if loc_dt.replace(tzinfo=None) == dt: possible_loc_dt.add(loc_dt) if len(possible_loc_dt) == 1: return possible_loc_dt.pop() # If there are no possibly correct timezones, we are attempting # to convert a time that never happened - the time period jumped # during the start-of-DST transition period. if len(possible_loc_dt) == 0: # If we refuse to guess, raise an exception. if is_dst is None: raise NonExistentTimeError(dt) # If we are forcing the pre-DST side of the DST transition, we # obtain the correct timezone by winding the clock forward a few # hours. elif is_dst: return self.localize( dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) # If we are forcing the post-DST side of the DST transition, we # obtain the correct timezone by winding the clock back. else: return self.localize( dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) # If we get this far, we have multiple possible timezones - this # is an ambiguous case occuring during the end-of-DST transition. # If told to be strict, raise an exception since we have an # ambiguous case if is_dst is None: raise AmbiguousTimeError(dt) # Filter out the possiblilities that don't match the requested # is_dst filtered_possible_loc_dt = [ p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst ] # Hopefully we only have one possibility left. Return it. if len(filtered_possible_loc_dt) == 1: return filtered_possible_loc_dt[0] if len(filtered_possible_loc_dt) == 0: filtered_possible_loc_dt = list(possible_loc_dt) # If we get this far, we have in a wierd timezone transition # where the clocks have been wound back but is_dst is the same # in both (eg. Europe/Warsaw 1915 when they switched to CET). # At this point, we just have to guess unless we allow more # hints to be passed in (such as the UTC offset or abbreviation), # but that is just getting silly. # # Choose the earliest (by UTC) applicable timezone if is_dst=True # Choose the latest (by UTC) applicable timezone if is_dst=False # i.e., behave like end-of-DST transition dates = {} # utc -> local for local_dt in filtered_possible_loc_dt: utc_time = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset assert utc_time not in dates dates[utc_time] = local_dt return dates[[min, max][not is_dst](dates)]
[ "def", "localize", "(", "self", ",", "dt", ",", "is_dst", "=", "False", ")", ":", "if", "dt", ".", "tzinfo", "is", "not", "None", ":", "raise", "ValueError", "(", "'Not naive datetime (tzinfo is already set)'", ")", "# Find the two best possibilities.", "possible_...
38.817518
19.79562
def listfolder(p): """ generator of list folder in the path. folders only """ for entry in scandir.scandir(p): if entry.is_dir(): yield entry.name
[ "def", "listfolder", "(", "p", ")", ":", "for", "entry", "in", "scandir", ".", "scandir", "(", "p", ")", ":", "if", "entry", ".", "is_dir", "(", ")", ":", "yield", "entry", ".", "name" ]
22.375
9.625
def sort_elements_by_child_values(obj_pyxb, child_name_list): """In-place sort simple or complex elements in a PyXB object by values they contain in child elements. Args: obj_pyxb: PyXB object child_name_list: list of str List of element names that are direct children of the PyXB object. """ obj_pyxb.sort(key=lambda x: [get_auto(getattr(x, n)) for n in child_name_list])
[ "def", "sort_elements_by_child_values", "(", "obj_pyxb", ",", "child_name_list", ")", ":", "obj_pyxb", ".", "sort", "(", "key", "=", "lambda", "x", ":", "[", "get_auto", "(", "getattr", "(", "x", ",", "n", ")", ")", "for", "n", "in", "child_name_list", "...
33.666667
23.833333
def create_o3d_asset(self, manip=None, small_ov_set=None, large_ov_set=None, display_name='', description=''): """stub""" if manip and not isinstance(manip, ABCDataInputStream): raise InvalidArgument('Manipulatable object must be an ' + 'osid.transport.DataInputStream object') if small_ov_set and not isinstance(small_ov_set, ABCDataInputStream): raise InvalidArgument('Small OV Set object must be an ' + 'osid.transport.DataInputStream object') if large_ov_set and not isinstance(large_ov_set, ABCDataInputStream): raise InvalidArgument('Large OV Set object must be an ' + 'osid.transport.DataInputStream object') asset_id, asset_content_id = self.create_asset(asset_type=O3D_ASSET_TYPE, display_name=display_name, description=description) if manip is not None: self.add_content_to_asset(asset_id=asset_id, asset_data=manip, asset_content_type=MANIP_ASSET_CONTENT_TYPE, asset_label='3d manipulatable') if small_ov_set is not None: self.add_content_to_asset(asset_id=asset_id, asset_data=small_ov_set, asset_content_type=OV_SET_SMALL_ASSET_CONTENT_TYPE, asset_label='small orthoviewset') if large_ov_set is not None: self.add_content_to_asset(asset_id=asset_id, asset_data=large_ov_set, asset_content_type=OV_SET_LARGE_ASSET_CONTENT_TYPE, asset_label='large orthoviewset') return asset_id
[ "def", "create_o3d_asset", "(", "self", ",", "manip", "=", "None", ",", "small_ov_set", "=", "None", ",", "large_ov_set", "=", "None", ",", "display_name", "=", "''", ",", "description", "=", "''", ")", ":", "if", "manip", "and", "not", "isinstance", "("...
59.371429
23.085714
def process_files(): """ Process files with a single progress bar """ with enlighten.Counter(total=100, desc='Simple', unit='ticks') as pbar: for num in range(100): # pylint: disable=unused-variable time.sleep(0.05) pbar.update()
[ "def", "process_files", "(", ")", ":", "with", "enlighten", ".", "Counter", "(", "total", "=", "100", ",", "desc", "=", "'Simple'", ",", "unit", "=", "'ticks'", ")", "as", "pbar", ":", "for", "num", "in", "range", "(", "100", ")", ":", "# pylint: dis...
30.111111
16.777778
def report(self, simulation, state): """Generate a report. Parameters ---------- simulation : Simulation The Simulation to generate a report for state : State The current state of the simulation """ if not self._initialized: self._initialized = True self._steps[0] += self.interval positions = state.getPositions() # Serialize self._out.write(b''.join([b'\nSTARTOFCHUNK\n', pickle.dumps([self._steps[0], positions._value]), b'\nENDOFCHUNK\n'])) if hasattr(self._out, 'flush'): self._out.flush()
[ "def", "report", "(", "self", ",", "simulation", ",", "state", ")", ":", "if", "not", "self", ".", "_initialized", ":", "self", ".", "_initialized", "=", "True", "self", ".", "_steps", "[", "0", "]", "+=", "self", ".", "interval", "positions", "=", "...
31.272727
15
def buildcontent(self): """build HTML content only, no header or body tags""" self.buildcontainer() self.option = json.dumps(self.options, cls = HighchartsEncoder) self.setoption = json.dumps(self.setOptions, cls = HighchartsEncoder) self.data = json.dumps(self.data_temp, cls = HighchartsEncoder) # DEM 2017/04/25: Make 'data' available as an array # ... this permits jinja2 array access to each data definition # ... which is useful for looping over multiple data sources self.data_list = [json.dumps(x, cls = HighchartsEncoder) for x in self.data_temp] if self.navi_seri_flag: self.navi_seri = json.dumps(self.navi_seri_temp, cls = HighchartsEncoder) self._htmlcontent = self.template_content_highcharts.render(chart=self).encode('utf-8')
[ "def", "buildcontent", "(", "self", ")", ":", "self", ".", "buildcontainer", "(", ")", "self", ".", "option", "=", "json", ".", "dumps", "(", "self", ".", "options", ",", "cls", "=", "HighchartsEncoder", ")", "self", ".", "setoption", "=", "json", ".",...
49.764706
29.823529
def _clone_repo(cls, repo, url, path, name, **kwargs): """:return: Repo instance of newly cloned repository :param repo: our parent repository :param url: url to clone from :param path: repository-relative path to the submodule checkout location :param name: canonical of the submodule :param kwrags: additinoal arguments given to git.clone""" module_abspath = cls._module_abspath(repo, path, name) module_checkout_path = module_abspath if cls._need_gitfile_submodules(repo.git): kwargs['separate_git_dir'] = module_abspath module_abspath_dir = osp.dirname(module_abspath) if not osp.isdir(module_abspath_dir): os.makedirs(module_abspath_dir) module_checkout_path = osp.join(repo.working_tree_dir, path) # end clone = git.Repo.clone_from(url, module_checkout_path, **kwargs) if cls._need_gitfile_submodules(repo.git): cls._write_git_file_and_module_config(module_checkout_path, module_abspath) # end return clone
[ "def", "_clone_repo", "(", "cls", ",", "repo", ",", "url", ",", "path", ",", "name", ",", "*", "*", "kwargs", ")", ":", "module_abspath", "=", "cls", ".", "_module_abspath", "(", "repo", ",", "path", ",", "name", ")", "module_checkout_path", "=", "modu...
49.090909
17.681818
def build_tree(self, rebuild=False): """ method builds tree by iterating from the synergy_start_timeperiod to the current time and inserting corresponding nodes """ time_qualifier = self.process_hierarchy.bottom_process.time_qualifier process_name = self.process_hierarchy.bottom_process.process_name if rebuild or self.build_timeperiod is None: timeperiod = settings.settings['synergy_start_timeperiod'] else: timeperiod = self.build_timeperiod timeperiod = cast_to_time_qualifier(time_qualifier, timeperiod) actual_timeperiod = time_helper.actual_timeperiod(time_qualifier) while actual_timeperiod >= timeperiod: self.get_node(process_name, timeperiod) timeperiod = time_helper.increment_timeperiod(time_qualifier, timeperiod) self.build_timeperiod = actual_timeperiod
[ "def", "build_tree", "(", "self", ",", "rebuild", "=", "False", ")", ":", "time_qualifier", "=", "self", ".", "process_hierarchy", ".", "bottom_process", ".", "time_qualifier", "process_name", "=", "self", ".", "process_hierarchy", ".", "bottom_process", ".", "p...
46.736842
23.368421
def get_model(model:nn.Module): "Return the model maybe wrapped inside `model`." return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
[ "def", "get_model", "(", "model", ":", "nn", ".", "Module", ")", ":", "return", "model", ".", "module", "if", "isinstance", "(", "model", ",", "(", "DistributedDataParallel", ",", "nn", ".", "DataParallel", ")", ")", "else", "model" ]
60.666667
26.666667
def copy_image_to_context(self, context, position, rotation=0, zoom=None): """Draw a cached image on the context :param context: The Cairo context to draw on :param position: The position od the image """ if not zoom: zoom = self.__zoom zoom_multiplicator = zoom * self.multiplicator context.save() context.scale(1. / zoom_multiplicator, 1. / zoom_multiplicator) image_position = round(position[0] * zoom_multiplicator), round(position[1] * zoom_multiplicator) context.translate(*image_position) context.rotate(rotation) context.set_source_surface(self.__image, 0, 0) context.paint() context.restore()
[ "def", "copy_image_to_context", "(", "self", ",", "context", ",", "position", ",", "rotation", "=", "0", ",", "zoom", "=", "None", ")", ":", "if", "not", "zoom", ":", "zoom", "=", "self", ".", "__zoom", "zoom_multiplicator", "=", "zoom", "*", "self", "...
37.368421
20.526316
def endpoint_list(profile=None, **connection_args): ''' Return a list of available endpoints (keystone endpoints-list) CLI Example: .. code-block:: bash salt '*' keystone.endpoint_list ''' kstone = auth(profile, **connection_args) ret = {} for endpoint in kstone.endpoints.list(): ret[endpoint.id] = dict((value, getattr(endpoint, value)) for value in dir(endpoint) if not value.startswith('_') and isinstance(getattr(endpoint, value), (six.string_types, dict, bool))) return ret
[ "def", "endpoint_list", "(", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "ret", "=", "{", "}", "for", "endpoint", "in", "kstone", ".", "endpoints", "....
32.333333
28
def _init_auth(self, config): """ Init authentication @dict: configuration of ldapcherry """ self.auth_mode = self._get_param('auth', 'auth.mode', config) if self.auth_mode in ['and', 'or', 'none']: pass elif self.auth_mode == 'custom': # load custom auth module auth_module = self._get_param('auth', 'auth.module', config) auth = __import__(auth_module, globals(), locals(), ['Auth'], 0) self.auth = auth.Auth(config['auth'], cherrypy.log) else: raise WrongParamValue( 'auth.mode', 'auth', ['and', 'or', 'none', 'custom'], ) self.roles_file = self._get_param('roles', 'roles.file', config) cherrypy.log.error( msg="loading roles file '%(file)s'" % {'file': self.roles_file}, severity=logging.DEBUG ) self.roles = Roles(self.roles_file)
[ "def", "_init_auth", "(", "self", ",", "config", ")", ":", "self", ".", "auth_mode", "=", "self", ".", "_get_param", "(", "'auth'", ",", "'auth.mode'", ",", "config", ")", "if", "self", ".", "auth_mode", "in", "[", "'and'", ",", "'or'", ",", "'none'", ...
38.44
17
def _get_price(self, package): """Returns valid price for ordering a dedicated host.""" for price in package['prices']: if not price.get('locationGroupId'): return price['id'] raise SoftLayer.SoftLayerError("Could not find valid price")
[ "def", "_get_price", "(", "self", ",", "package", ")", ":", "for", "price", "in", "package", "[", "'prices'", "]", ":", "if", "not", "price", ".", "get", "(", "'locationGroupId'", ")", ":", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", "."...
35.375
16.625
def countSeries(requestContext, *seriesLists): """ Draws a horizontal line representing the number of nodes found in the seriesList. Example:: &target=countSeries(carbon.agents.*.*) """ if not seriesLists or not any(seriesLists): series = constantLine(requestContext, 0).pop() series.pathExpression = "countSeries()" else: seriesList, start, end, step = normalize(seriesLists) name = "countSeries(%s)" % formatPathExpressions(seriesList) values = (int(len(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
[ "def", "countSeries", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "if", "not", "seriesLists", "or", "not", "any", "(", "seriesLists", ")", ":", "series", "=", "constantLine", "(", "requestContext", ",", "0", ")", ".", "pop", "(", ")", "seri...
32.142857
20.809524
def __normalize_grades(self): """ Adjust the grades list. If a grade has been set, set All to false """ if 'grades' in self and self['grades']['All'] is True: for grade in self['grades']: if grade != 'All' and self['grades'][grade] is True: self['grades']['All'] = False break
[ "def", "__normalize_grades", "(", "self", ")", ":", "if", "'grades'", "in", "self", "and", "self", "[", "'grades'", "]", "[", "'All'", "]", "is", "True", ":", "for", "grade", "in", "self", "[", "'grades'", "]", ":", "if", "grade", "!=", "'All'", "and...
34.090909
13
def cellpar_to_cell(cellpar, ab_normal=(0,0,1), a_direction=None): """Return a 3x3 cell matrix from `cellpar` = [a, b, c, alpha, beta, gamma]. The returned cell is orientated such that a and b are normal to `ab_normal` and a is parallel to the projection of `a_direction` in the a-b plane. Default `a_direction` is (1,0,0), unless this is parallel to `ab_normal`, in which case default `a_direction` is (0,0,1). The returned cell has the vectors va, vb and vc along the rows. The cell will be oriented such that va and vb are normal to `ab_normal` and va will be along the projection of `a_direction` onto the a-b plane. Example: >>> cell = cellpar_to_cell([1, 2, 4, 10, 20, 30], (0,1,1), (1,2,3)) >>> np.round(cell, 3) array([[ 0.816, -0.408, 0.408], [ 1.992, -0.13 , 0.13 ], [ 3.859, -0.745, 0.745]]) """ if a_direction is None: if np.linalg.norm(np.cross(ab_normal, (1,0,0))) < 1e-5: a_direction = (0,0,1) else: a_direction = (1,0,0) # Define rotated X,Y,Z-system, with Z along ab_normal and X along # the projection of a_direction onto the normal plane of Z. ad = np.array(a_direction) Z = unit_vector(ab_normal) X = unit_vector(ad - dot(ad, Z)*Z) Y = np.cross(Z, X) # Express va, vb and vc in the X,Y,Z-system alpha, beta, gamma = 90., 90., 90. if isinstance(cellpar, (int, float)): a = b = c = cellpar elif len(cellpar) == 1: a = b = c = cellpar[0] elif len(cellpar) == 3: a, b, c = cellpar alpha, beta, gamma = 90., 90., 90. else: a, b, c, alpha, beta, gamma = cellpar alpha *= pi/180.0 beta *= pi/180.0 gamma *= pi/180.0 va = a * np.array([1, 0, 0]) vb = b * np.array([cos(gamma), sin(gamma), 0]) cx = cos(beta) cy = (cos(alpha) - cos(beta)*cos(gamma))/sin(gamma) cz = sqrt(1. - cx*cx - cy*cy) vc = c * np.array([cx, cy, cz]) # Convert to the Cartesian x,y,z-system abc = np.vstack((va, vb, vc)) T = np.vstack((X, Y, Z)) cell = dot(abc, T) return cell
[ "def", "cellpar_to_cell", "(", "cellpar", ",", "ab_normal", "=", "(", "0", ",", "0", ",", "1", ")", ",", "a_direction", "=", "None", ")", ":", "if", "a_direction", "is", "None", ":", "if", "np", ".", "linalg", ".", "norm", "(", "np", ".", "cross", ...
33.580645
18.290323
def grad(self, X, mean=None, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the mean and length scales. Parameters ---------- x: ndarray (n, d) array of observations where n is the number of samples, and d is the dimensionality of x. mean: ndarray, optional array of shape (d,) frequency means (one for each dimension of X). If not input, this uses the value of the initial mean. lenscale: ndarray, optional array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: shape (n, 4*nbases) where nbases is number of random rbf bases, again to the nearest larger two power. This is :math:`\partial \phi(\mathbf{x}) / \partial \boldsymbol\mu` ndarray: shape (n, 4*nbases) where nbases is number of random rbf bases, again to the nearest larger two power. This is :math:`\partial \phi(\mathbf{x}) / \partial \mathbf{l}` """ d = X.shape[1] mean = self._check_dim(d, mean, paramind=0) lenscale = self._check_dim(d, lenscale, paramind=1) VX = self._makeVX(X / lenscale) mX = X.dot(mean)[:, np.newaxis] sinVXpmX = - np.sin(VX + mX) sinVXmmX = - np.sin(VX - mX) cosVXpmX = np.cos(VX + mX) cosVXmmX = np.cos(VX - mX) dPhi_len = [] dPhi_mean = [] for i, l in enumerate(lenscale): # Means dmX = X[:, [i]] dPhi_mean.append(np.hstack((dmX * sinVXpmX, dmX * cosVXpmX, -dmX * sinVXmmX, -dmX * cosVXmmX)) / np.sqrt(2 * self.n)) # Lenscales indlen = np.zeros(d) indlen[i] = 1. / l**2 dVX = - self._makeVX(X * indlen) # FIXME make this more efficient? dPhi_len.append(np.hstack((dVX * sinVXpmX, dVX * cosVXpmX, dVX * sinVXmmX, dVX * cosVXmmX)) / np.sqrt(2 * self.n)) dPhi_mean = np.dstack(dPhi_mean) if d != 1 else dPhi_mean[0] dPhi_len = np.dstack(dPhi_len) if d != 1 else dPhi_len[0] return dPhi_mean, dPhi_len
[ "def", "grad", "(", "self", ",", "X", ",", "mean", "=", "None", ",", "lenscale", "=", "None", ")", ":", "d", "=", "X", ".", "shape", "[", "1", "]", "mean", "=", "self", ".", "_check_dim", "(", "d", ",", "mean", ",", "paramind", "=", "0", ")",...
39.016667
21.533333
def parse(s): """Parse a string representation back into the Vector. >>> Vectors.parse('[2,1,2 ]') DenseVector([2.0, 1.0, 2.0]) >>> Vectors.parse(' ( 100, [0], [2])') SparseVector(100, {0: 2.0}) """ if s.find('(') == -1 and s.find('[') != -1: return DenseVector.parse(s) elif s.find('(') != -1: return SparseVector.parse(s) else: raise ValueError( "Cannot find tokens '[' or '(' from the input string.")
[ "def", "parse", "(", "s", ")", ":", "if", "s", ".", "find", "(", "'('", ")", "==", "-", "1", "and", "s", ".", "find", "(", "'['", ")", "!=", "-", "1", ":", "return", "DenseVector", ".", "parse", "(", "s", ")", "elif", "s", ".", "find", "(",...
34.4
11.8
def check_field_cohesion(self, rec_write_fields, sig_write_fields): """ Check the cohesion of fields used to write the header """ # If there are no signal specification fields, there is nothing to check. if self.n_sig>0: # The length of all signal specification fields must match n_sig # even if some of its elements are None. for f in sig_write_fields: if len(getattr(self, f)) != self.n_sig: raise ValueError('The length of field: '+f+' must match field n_sig.') # Each file_name must correspond to only one fmt, (and only one byte offset if defined). datfmts = {} for ch in range(self.n_sig): if self.file_name[ch] not in datfmts: datfmts[self.file_name[ch]] = self.fmt[ch] else: if datfmts[self.file_name[ch]] != self.fmt[ch]: raise ValueError('Each file_name (dat file) specified must have the same fmt') datoffsets = {} if self.byte_offset is not None: # At least one byte offset value exists for ch in range(self.n_sig): if self.byte_offset[ch] is None: continue if self.file_name[ch] not in datoffsets: datoffsets[self.file_name[ch]] = self.byte_offset[ch] else: if datoffsets[self.file_name[ch]] != self.byte_offset[ch]: raise ValueError('Each file_name (dat file) specified must have the same byte offset')
[ "def", "check_field_cohesion", "(", "self", ",", "rec_write_fields", ",", "sig_write_fields", ")", ":", "# If there are no signal specification fields, there is nothing to check.", "if", "self", ".", "n_sig", ">", "0", ":", "# The length of all signal specification fields must ma...
48.441176
24.794118
def op_at_on(operation: ops.Operation, time: Timestamp, device: Device): """Creates a scheduled operation with a device-determined duration.""" return ScheduledOperation(time, device.duration_of(operation), operation)
[ "def", "op_at_on", "(", "operation", ":", "ops", ".", "Operation", ",", "time", ":", "Timestamp", ",", "device", ":", "Device", ")", ":", "return", "ScheduledOperation", "(", "time", ",", "device", ".", "duration_of", "(", "operation", ")", ",", "operation...
47
6.428571
def compile_search(pattern, flags=0, **kwargs): """Compile with extended search references.""" return _regex.compile(_apply_search_backrefs(pattern, flags), flags, **kwargs)
[ "def", "compile_search", "(", "pattern", ",", "flags", "=", "0", ",", "*", "*", "kwargs", ")", ":", "return", "_regex", ".", "compile", "(", "_apply_search_backrefs", "(", "pattern", ",", "flags", ")", ",", "flags", ",", "*", "*", "kwargs", ")" ]
44.75
22.25
def color_text(text, color): r""" SeeAlso: highlight_text lexer_shortnames = sorted(ut.flatten(ut.take_column(pygments.lexers.LEXERS.values(), 2))) """ import utool as ut if color is None or not ENABLE_COLORS: return text elif color == 'python': return highlight_text(text, color) elif color == 'sql': return highlight_text(text, 'sql') try: import pygments import pygments.console # if color == 'guess': # import linguist # NOQA # pygments.lexers.guess_lexer(text) # return highlight_text(text, color) ansi_text = pygments.console.colorize(color, text) if ut.WIN32: import colorama ansi_reset = (colorama.Style.RESET_ALL) else: ansi_reset = pygments.console.colorize('reset', '') ansi_text = ansi_text + ansi_reset return ansi_text except ImportError: return text
[ "def", "color_text", "(", "text", ",", "color", ")", ":", "import", "utool", "as", "ut", "if", "color", "is", "None", "or", "not", "ENABLE_COLORS", ":", "return", "text", "elif", "color", "==", "'python'", ":", "return", "highlight_text", "(", "text", ",...
31.866667
14.766667
def from_dict(data, ctx): """ Instantiate a new Instrument from a dict (generally from loading a JSON response). The data used to instantiate the Instrument is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('minimumTradeSize') is not None: data['minimumTradeSize'] = ctx.convert_decimal_number( data.get('minimumTradeSize') ) if data.get('maximumTrailingStopDistance') is not None: data['maximumTrailingStopDistance'] = ctx.convert_decimal_number( data.get('maximumTrailingStopDistance') ) if data.get('minimumTrailingStopDistance') is not None: data['minimumTrailingStopDistance'] = ctx.convert_decimal_number( data.get('minimumTrailingStopDistance') ) if data.get('maximumPositionSize') is not None: data['maximumPositionSize'] = ctx.convert_decimal_number( data.get('maximumPositionSize') ) if data.get('maximumOrderUnits') is not None: data['maximumOrderUnits'] = ctx.convert_decimal_number( data.get('maximumOrderUnits') ) if data.get('marginRate') is not None: data['marginRate'] = ctx.convert_decimal_number( data.get('marginRate') ) if data.get('commission') is not None: data['commission'] = \ ctx.primitives.InstrumentCommission.from_dict( data['commission'], ctx ) return Instrument(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'minimumTradeSize'", ")", "is", "not", "None", ":", "data", "[", "'minimumTradeSize'", "]", "=", "ctx", ".", "convert_d...
35.553191
22.617021
def extract_included(cls, fields, resource, resource_instance, included_resources, included_cache): """ Adds related data to the top level included key when the request includes ?include=example,example_field2 """ # this function may be called with an empty record (example: Browsable Interface) if not resource_instance: return current_serializer = fields.serializer context = current_serializer.context included_serializers = utils.get_included_serializers(current_serializer) included_resources = copy.copy(included_resources) included_resources = [inflection.underscore(value) for value in included_resources] for field_name, field in six.iteritems(fields): # Skip URL field if field_name == api_settings.URL_FIELD_NAME: continue # Skip fields without relations or serialized data if not isinstance( field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer) ): continue try: included_resources.remove(field_name) except ValueError: # Skip fields not in requested included resources # If no child field, directly continue with the next field if field_name not in [node.split('.')[0] for node in included_resources]: continue relation_instance = cls.extract_relation_instance( field_name, field, resource_instance, current_serializer ) if isinstance(relation_instance, Manager): relation_instance = relation_instance.all() serializer_data = resource.get(field_name) if isinstance(field, relations.ManyRelatedField): serializer_class = included_serializers[field_name] field = serializer_class(relation_instance, many=True, context=context) serializer_data = field.data if isinstance(field, relations.RelatedField): if relation_instance is None or not serializer_data: continue many = field._kwargs.get('child_relation', None) is not None if isinstance(field, ResourceRelatedField) and not many: already_included = serializer_data['type'] in included_cache and \ serializer_data['id'] in included_cache[serializer_data['type']] if already_included: continue serializer_class = included_serializers[field_name] field = serializer_class(relation_instance, many=many, context=context) serializer_data = field.data new_included_resources = [key.replace('%s.' % field_name, '', 1) for key in included_resources if field_name == key.split('.')[0]] if isinstance(field, ListSerializer): serializer = field.child relation_type = utils.get_resource_type_from_serializer(serializer) relation_queryset = list(relation_instance) if serializer_data: for position in range(len(serializer_data)): serializer_resource = serializer_data[position] nested_resource_instance = relation_queryset[position] resource_type = ( relation_type or utils.get_resource_type_from_instance(nested_resource_instance) ) serializer_fields = utils.get_serializer_fields( serializer.__class__( nested_resource_instance, context=serializer.context ) ) new_item = cls.build_json_resource_obj( serializer_fields, serializer_resource, nested_resource_instance, resource_type, getattr(serializer, '_poly_force_type_resolution', False) ) included_cache[new_item['type']][new_item['id']] = \ utils._format_object(new_item) cls.extract_included( serializer_fields, serializer_resource, nested_resource_instance, new_included_resources, included_cache, ) if isinstance(field, Serializer): relation_type = utils.get_resource_type_from_serializer(field) # Get the serializer fields serializer_fields = utils.get_serializer_fields(field) if serializer_data: new_item = cls.build_json_resource_obj( serializer_fields, serializer_data, relation_instance, relation_type, getattr(field, '_poly_force_type_resolution', False) ) included_cache[new_item['type']][new_item['id']] = utils._format_object( new_item ) cls.extract_included( serializer_fields, serializer_data, relation_instance, new_included_resources, included_cache, )
[ "def", "extract_included", "(", "cls", ",", "fields", ",", "resource", ",", "resource_instance", ",", "included_resources", ",", "included_cache", ")", ":", "# this function may be called with an empty record (example: Browsable Interface)", "if", "not", "resource_instance", ...
45.314961
22.023622
def deactivate_mfa_device(self, user_name, serial_number): """Deactivate and detach MFA Device from user if device exists.""" user = self.get_user(user_name) if serial_number not in user.mfa_devices: raise IAMNotFoundException( "Device {0} not found".format(serial_number) ) user.deactivate_mfa_device(serial_number)
[ "def", "deactivate_mfa_device", "(", "self", ",", "user_name", ",", "serial_number", ")", ":", "user", "=", "self", ".", "get_user", "(", "user_name", ")", "if", "serial_number", "not", "in", "user", ".", "mfa_devices", ":", "raise", "IAMNotFoundException", "(...
42.333333
13.888889
def extract_bs(self, cutoff, ligcentroid, resis): """Return list of ids from residues belonging to the binding site""" return [obres.GetIdx() for obres in resis if self.res_belongs_to_bs(obres, cutoff, ligcentroid)]
[ "def", "extract_bs", "(", "self", ",", "cutoff", ",", "ligcentroid", ",", "resis", ")", ":", "return", "[", "obres", ".", "GetIdx", "(", ")", "for", "obres", "in", "resis", "if", "self", ".", "res_belongs_to_bs", "(", "obres", ",", "cutoff", ",", "ligc...
76.333333
24.333333
def _init_metadata(self): """stub""" self._start_timestamp_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'start_timestamp'), 'element_label': 'start timestamp', 'instructions': 'enter an integer number of seconds for the start time', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'syntax': 'INTEGER', 'minimum_integer': 0, 'maximum_integer': None, 'integer_set': [], 'default_integer_values': [0] } self._end_timestamp_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'end_timestamp'), 'element_label': 'end timestamp', 'instructions': 'enter an integer number of seconds for the end time', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'syntax': 'INTEGER', 'minimum_integer': 0, 'maximum_integer': None, 'integer_set': [], 'default_integer_values': [0] }
[ "def", "_init_metadata", "(", "self", ")", ":", "self", ".", "_start_timestamp_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'start_ti...
39.147059
12.911765
def _get_registry(self, registry_path_or_url): '''dict: Return the registry as dict with profiles keyed by id.''' if registry_path_or_url.startswith('http'): profiles = self._load_json_url(registry_path_or_url) else: profiles = self._load_json_file(registry_path_or_url) try: registry = {} for profile in profiles: registry[profile['id']] = profile return registry except KeyError as e: msg = ( 'Registry at "{path}" has no "id" column.' ).format(path=registry_path_or_url) six.raise_from(ValueError(msg), e)
[ "def", "_get_registry", "(", "self", ",", "registry_path_or_url", ")", ":", "if", "registry_path_or_url", ".", "startswith", "(", "'http'", ")", ":", "profiles", "=", "self", ".", "_load_json_url", "(", "registry_path_or_url", ")", "else", ":", "profiles", "=", ...
41.3125
16.1875
def write_stats_as_csv(gtfs, path_to_csv, re_write=False): """ Writes data from get_stats to csv file Parameters ---------- gtfs: GTFS path_to_csv: str filepath to the csv file to be generated re_write: insted of appending, create a new one. """ stats_dict = get_stats(gtfs) # check if file exist if re_write: os.remove(path_to_csv) #if not os.path.isfile(path_to_csv): # is_new = True #else: # is_new = False is_new = True mode = 'r' if os.path.exists(path_to_csv) else 'w+' with open(path_to_csv, mode) as csvfile: for line in csvfile: if line: is_new = False else: is_new = True with open(path_to_csv, 'a') as csvfile: if (sys.version_info > (3, 0)): delimiter = u"," else: delimiter = b"," statswriter = csv.writer(csvfile, delimiter=delimiter) # write column names if if is_new: statswriter.writerow([key for key in sorted(stats_dict.keys())]) row_to_write = [] # write stats row sorted by column name for key in sorted(stats_dict.keys()): row_to_write.append(stats_dict[key]) statswriter.writerow(row_to_write)
[ "def", "write_stats_as_csv", "(", "gtfs", ",", "path_to_csv", ",", "re_write", "=", "False", ")", ":", "stats_dict", "=", "get_stats", "(", "gtfs", ")", "# check if file exist", "if", "re_write", ":", "os", ".", "remove", "(", "path_to_csv", ")", "#if not os.p...
27.695652
16.782609
def delete_tracking_beacon(self, tracking_beacons_id, **data): """ DELETE /tracking_beacons/:tracking_beacons_id/ Delete the :format:`tracking_beacons` with the specified :tracking_beacons_id. """ return self.delete("/tracking_beacons/{0}/".format(tracking_beacons_id), data=data)
[ "def", "delete_tracking_beacon", "(", "self", ",", "tracking_beacons_id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "delete", "(", "\"/tracking_beacons/{0}/\"", ".", "format", "(", "tracking_beacons_id", ")", ",", "data", "=", "data", ")" ]
46.142857
23.571429
def get_value(self): """ Return dictionary with values of subsettings. Returns: dict: values of subsettings. """ try: self.raw_value except (AttributeError, KeyError) as err: self._reraise_if_required(err) default_value = self.default_value if self.transform_default: return self.transform(default_value) return default_value else: # If setting is defined, load values of all subsettings. value = {} for key, subsetting in self.settings.items(): value[key] = subsetting.get_value() return value
[ "def", "get_value", "(", "self", ")", ":", "try", ":", "self", ".", "raw_value", "except", "(", "AttributeError", ",", "KeyError", ")", "as", "err", ":", "self", ".", "_reraise_if_required", "(", "err", ")", "default_value", "=", "self", ".", "default_valu...
32.52381
14.047619
def comp(seq): """ returns a seq with complement. Preserves little n's for splitters.""" ## makes base to its small complement then makes upper return seq.replace("A", 't')\ .replace('T', 'a')\ .replace('C', 'g')\ .replace('G', 'c')\ .replace('n', 'Z')\ .upper()\ .replace("Z", "n")
[ "def", "comp", "(", "seq", ")", ":", "## makes base to its small complement then makes upper", "return", "seq", ".", "replace", "(", "\"A\"", ",", "'t'", ")", ".", "replace", "(", "'T'", ",", "'a'", ")", ".", "replace", "(", "'C'", ",", "'g'", ")", ".", ...
36.9
10.4
def help_for_command(command): """Get the help text (signature + docstring) for a command (function).""" help_text = pydoc.text.document(command) # remove backspaces return re.subn('.\\x08', '', help_text)[0]
[ "def", "help_for_command", "(", "command", ")", ":", "help_text", "=", "pydoc", ".", "text", ".", "document", "(", "command", ")", "# remove backspaces", "return", "re", ".", "subn", "(", "'.\\\\x08'", ",", "''", ",", "help_text", ")", "[", "0", "]" ]
44
7.4
def list_keys(self, secret=False): """List the keys currently in the keyring. The GnuPG option '--show-photos', according to the GnuPG manual, "does not work with --with-colons", but since we can't rely on all versions of GnuPG to explicitly handle this correctly, we should probably include it in the args. >>> import shutil >>> shutil.rmtree("doctests") >>> gpg = GPG(homedir="doctests") >>> input = gpg.gen_key_input() >>> result = gpg.gen_key(input) >>> print1 = result.fingerprint >>> result = gpg.gen_key(input) >>> print2 = result.fingerprint >>> pubkeys = gpg.list_keys() >>> assert print1 in pubkeys.fingerprints >>> assert print2 in pubkeys.fingerprints """ which = 'public-keys' if secret: which = 'secret-keys' args = [] args.append("--fixed-list-mode") args.append("--fingerprint") args.append("--with-colons") args.append("--list-options no-show-photos") args.append("--list-%s" % (which)) p = self._open_subprocess(args) # there might be some status thingumy here I should handle... (amk) # ...nope, unless you care about expired sigs or keys (stevegt) # Get the response information result = self._result_map['list'](self) self._collect_output(p, result, stdin=p.stdin) lines = result.data.decode(self._encoding, self._decode_errors).splitlines() self._parse_keys(result) return result
[ "def", "list_keys", "(", "self", ",", "secret", "=", "False", ")", ":", "which", "=", "'public-keys'", "if", "secret", ":", "which", "=", "'secret-keys'", "args", "=", "[", "]", "args", ".", "append", "(", "\"--fixed-list-mode\"", ")", "args", ".", "appe...
36.837209
15.046512
def run(self): """Continously scan for BLE advertisements.""" self.socket = self.bluez.hci_open_dev(self.bt_device_id) filtr = self.bluez.hci_filter_new() self.bluez.hci_filter_all_events(filtr) self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT) self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr) self.set_scan_parameters() self.toggle_scan(True) while self.keep_going: pkt = self.socket.recv(255) event = to_int(pkt[1]) subevent = to_int(pkt[3]) if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT: # we have an BLE advertisement self.process_packet(pkt) self.socket.close()
[ "def", "run", "(", "self", ")", ":", "self", ".", "socket", "=", "self", ".", "bluez", ".", "hci_open_dev", "(", "self", ".", "bt_device_id", ")", "filtr", "=", "self", ".", "bluez", ".", "hci_filter_new", "(", ")", "self", ".", "bluez", ".", "hci_fi...
38.55
17.35
def record_affiliations(self, key, value): """Populate the ``record_affiliations`` key.""" record = get_record_ref(value.get('z'), 'institutions') return { 'curated_relation': record is not None, 'record': record, 'value': value.get('a'), }
[ "def", "record_affiliations", "(", "self", ",", "key", ",", "value", ")", ":", "record", "=", "get_record_ref", "(", "value", ".", "get", "(", "'z'", ")", ",", "'institutions'", ")", "return", "{", "'curated_relation'", ":", "record", "is", "not", "None", ...
30.333333
17.111111
def init_app(self, app): """Flask application initialization.""" self.init_config(app) self.cache = Cache(app) self.is_authenticated_callback = _callback_factory( app.config['CACHE_IS_AUTHENTICATED_CALLBACK']) app.extensions['invenio-cache'] = self
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "init_config", "(", "app", ")", "self", ".", "cache", "=", "Cache", "(", "app", ")", "self", ".", "is_authenticated_callback", "=", "_callback_factory", "(", "app", ".", "config", "[", ...
42
11.285714
def get_token_from_offset(self, offset): """ Returns the token containing the given character offset (0-based position in source text), or the preceeding token if the position is between tokens. """ return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
[ "def", "get_token_from_offset", "(", "self", ",", "offset", ")", ":", "return", "self", ".", "_tokens", "[", "bisect", ".", "bisect", "(", "self", ".", "_token_offsets", ",", "offset", ")", "-", "1", "]" ]
46.833333
17.833333
def rse(label, pred): """computes the root relative squared error (condensed using standard deviation formula)""" numerator = np.sqrt(np.mean(np.square(label - pred), axis = None)) denominator = np.std(label, axis = None) return numerator / denominator
[ "def", "rse", "(", "label", ",", "pred", ")", ":", "numerator", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "square", "(", "label", "-", "pred", ")", ",", "axis", "=", "None", ")", ")", "denominator", "=", "np", ".", "std", ...
52.8
11.8
def f_add_result(self, *args, **kwargs): """Adds a result under the current node. There are two ways to add a new result either by adding a result instance: >>> new_result = Result('group1.group2.myresult', 1666, x=3, y=4, comment='Example!') >>> traj.f_add_result(new_result) Or by passing the values directly to the function, with the name being the first (non-keyword!) argument: >>> traj.f_add_result('group1.group2.myresult', 1666, x=3, y=3,comment='Example!') If you want to create a different result than the standard result, you can give the constructor as the first (non-keyword!) argument followed by the name (non-keyword!): >>> traj.f_add_result(PickleResult,'group1.group2.myresult', 1666, x=3, y=3, comment='Example!') Additional arguments (here `1666`) or keyword arguments (here `x=3, y=3`) are passed onto the constructor of the result. Adds the full name of the current node as prefix to the name of the result. If current node is a single run (root) adds the prefix `'results.runs.run_08%d%'` to the full name where `'08%d'` is replaced by the index of the current run. """ return self._nn_interface._add_generic(self, type_name=RESULT, group_type_name=RESULT_GROUP, args=args, kwargs=kwargs)
[ "def", "f_add_result", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_nn_interface", ".", "_add_generic", "(", "self", ",", "type_name", "=", "RESULT", ",", "group_type_name", "=", "RESULT_GROUP", ",", "args", ...
44.65625
34.375
def do_db_auth(host, connection, db_name): """ Attempts to authenticate against the mongo instance. Tries: - Auth'ing against admin as 'admin' ; credentials: <host>/arctic/admin/admin - Auth'ing against db_name (which may be None if auth'ing against admin above) returns True if authentication succeeded. """ admin_creds = get_auth(host, 'admin', 'admin') user_creds = get_auth(host, 'arctic', db_name) # Attempt to authenticate the connection # Try at 'admin level' first as this allows us to enableSharding, which we want if admin_creds is None: # Get ordinary credentials for authenticating against the DB if user_creds is None: logger.error("You need credentials for db '%s' on '%s', or admin credentials" % (db_name, host)) return False if not authenticate(connection[db_name], user_creds.user, user_creds.password): logger.error("Failed to authenticate to db '%s' on '%s', using user credentials" % (db_name, host)) return False return True elif not authenticate(connection.admin, admin_creds.user, admin_creds.password): logger.error("Failed to authenticate to '%s' as Admin. Giving up." % (host)) return False # Ensure we attempt to auth against the user DB, for non-priviledged users to get access authenticate(connection[db_name], user_creds.user, user_creds.password) return True
[ "def", "do_db_auth", "(", "host", ",", "connection", ",", "db_name", ")", ":", "admin_creds", "=", "get_auth", "(", "host", ",", "'admin'", ",", "'admin'", ")", "user_creds", "=", "get_auth", "(", "host", ",", "'arctic'", ",", "db_name", ")", "# Attempt to...
47.6
27.866667
def _connect(self): """ Connect to the statsd server """ if not statsd: return if hasattr(statsd, 'StatsClient'): self.connection = statsd.StatsClient( host=self.host, port=self.port ).pipeline() else: # Create socket self.connection = statsd.Connection( host=self.host, port=self.port, sample_rate=1.0 )
[ "def", "_connect", "(", "self", ")", ":", "if", "not", "statsd", ":", "return", "if", "hasattr", "(", "statsd", ",", "'StatsClient'", ")", ":", "self", ".", "connection", "=", "statsd", ".", "StatsClient", "(", "host", "=", "self", ".", "host", ",", ...
25.631579
13.315789
def join(left, right, key=None, lkey=None, rkey=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): """ Perform an equi-join on the given tables. E.g.:: >>> import petl as etl >>> table1 = [['id', 'colour'], ... [1, 'blue'], ... [2, 'red'], ... [3, 'purple']] >>> table2 = [['id', 'shape'], ... [1, 'circle'], ... [3, 'square'], ... [4, 'ellipse']] >>> table3 = etl.join(table1, table2, key='id') >>> table3 +----+----------+----------+ | id | colour | shape | +====+==========+==========+ | 1 | 'blue' | 'circle' | +----+----------+----------+ | 3 | 'purple' | 'square' | +----+----------+----------+ >>> # if no key is given, a natural join is tried ... table4 = etl.join(table1, table2) >>> table4 +----+----------+----------+ | id | colour | shape | +====+==========+==========+ | 1 | 'blue' | 'circle' | +----+----------+----------+ | 3 | 'purple' | 'square' | +----+----------+----------+ >>> # note behaviour if the key is not unique in either or both tables ... table5 = [['id', 'colour'], ... [1, 'blue'], ... [1, 'red'], ... [2, 'purple']] >>> table6 = [['id', 'shape'], ... [1, 'circle'], ... [1, 'square'], ... [2, 'ellipse']] >>> table7 = etl.join(table5, table6, key='id') >>> table7 +----+----------+-----------+ | id | colour | shape | +====+==========+===========+ | 1 | 'blue' | 'circle' | +----+----------+-----------+ | 1 | 'blue' | 'square' | +----+----------+-----------+ | 1 | 'red' | 'circle' | +----+----------+-----------+ | 1 | 'red' | 'square' | +----+----------+-----------+ | 2 | 'purple' | 'ellipse' | +----+----------+-----------+ >>> # compound keys are supported ... table8 = [['id', 'time', 'height'], ... [1, 1, 12.3], ... [1, 2, 34.5], ... [2, 1, 56.7]] >>> table9 = [['id', 'time', 'weight'], ... [1, 2, 4.5], ... [2, 1, 6.7], ... [2, 2, 8.9]] >>> table10 = etl.join(table8, table9, key=['id', 'time']) >>> table10 +----+------+--------+--------+ | id | time | height | weight | +====+======+========+========+ | 1 | 2 | 34.5 | 4.5 | +----+------+--------+--------+ | 2 | 1 | 56.7 | 6.7 | +----+------+--------+--------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments. """ # TODO don't read data twice (occurs if using natural key) lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix)
[ "def", "join", "(", "left", ",", "right", ",", "key", "=", "None", ",", "lkey", "=", "None", ",", "rkey", "=", "None", ",", "presorted", "=", "False", ",", "buffersize", "=", "None", ",", "tempdir", "=", "None", ",", "cache", "=", "True", ",", "l...
38.063158
11.852632
def _get_movielens_path(): """ Get path to the movielens dataset file. """ return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'movielens.zip')
[ "def", "_get_movielens_path", "(", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "'movielens.zip'", ")" ]
27.142857
12
def uri(self, value, strip_iri=True): """ Converts py_uri or ttl uri to a http://... full uri format Args: value: the string to convert Returns: full uri of an abbreivated uri """ return self.convert_to_uri(value, strip_iri=strip_iri)
[ "def", "uri", "(", "self", ",", "value", ",", "strip_iri", "=", "True", ")", ":", "return", "self", ".", "convert_to_uri", "(", "value", ",", "strip_iri", "=", "strip_iri", ")" ]
26.454545
18
def save(self, filename): '''save fence points to a file''' f = open(filename, mode='w') for p in self.rally_points: f.write("RALLY %f\t%f\t%f\t%f\t%f\t%d\n" % (p.lat * 1e-7, p.lng * 1e-7, p.alt, p.break_alt, p.land_dir, p.flags)) f.close()
[ "def", "save", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "mode", "=", "'w'", ")", "for", "p", "in", "self", ".", "rally_points", ":", "f", ".", "write", "(", "\"RALLY %f\\t%f\\t%f\\t%f\\t%f\\t%d\\n\"", "%", "(", "p...
47.714286
21.142857
def interrupt_kernel(self): """ Attempts to interrupt the running kernel. Also unsets _reading flag, to avoid runtime errors if raw_input is called again. """ if self.custom_interrupt: self._reading = False self.custom_interrupt_requested.emit() elif self.kernel_manager.has_kernel: self._reading = False self.kernel_manager.interrupt_kernel() else: self._append_plain_text('Kernel process is either remote or ' 'unspecified. Cannot interrupt.\n')
[ "def", "interrupt_kernel", "(", "self", ")", ":", "if", "self", ".", "custom_interrupt", ":", "self", ".", "_reading", "=", "False", "self", ".", "custom_interrupt_requested", ".", "emit", "(", ")", "elif", "self", ".", "kernel_manager", ".", "has_kernel", "...
39.6
13.466667
def write(self, output='', flush=True, position=0): """ Args: output(str: Output string flush(bool): Flush the output stream after writing position(int): Position relative to the bottom of the screen to write output Write to stream at a given position """ if self.enabled: term = self.term stream = self.stream try: term.move_to(0, term.height - position) # Include \r and term call to cover most conditions if NEEDS_UNICODE_HELP: # pragma: no cover (Version dependent 2.6) encoding = stream.encoding or 'UTF-8' stream.write(('\r' + term.clear_eol + output).encode(encoding)) else: # pragma: no cover (Version dependent >= 2.7) stream.write('\r' + term.clear_eol + output) finally: # Reset position and scrolling self._set_scroll_area() if flush: stream.flush()
[ "def", "write", "(", "self", ",", "output", "=", "''", ",", "flush", "=", "True", ",", "position", "=", "0", ")", ":", "if", "self", ".", "enabled", ":", "term", "=", "self", ".", "term", "stream", "=", "self", ".", "stream", "try", ":", "term", ...
36.413793
21.310345
def add_item(self, item): """Updates the list of items in the current transaction""" _idx = len(self.items) self.items.update({"item_" + str(_idx + 1): item})
[ "def", "add_item", "(", "self", ",", "item", ")", ":", "_idx", "=", "len", "(", "self", ".", "items", ")", "self", ".", "items", ".", "update", "(", "{", "\"item_\"", "+", "str", "(", "_idx", "+", "1", ")", ":", "item", "}", ")" ]
44.75
10.75
def short_doc(self): """Gets the "short" documentation of a command. By default, this is the :attr:`brief` attribute. If that lookup leads to an empty string then the first line of the :attr:`help` attribute is used instead. """ if self.brief is not None: return self.brief if self.help is not None: return self.help.split('\n', 1)[0] return ''
[ "def", "short_doc", "(", "self", ")", ":", "if", "self", ".", "brief", "is", "not", "None", ":", "return", "self", ".", "brief", "if", "self", ".", "help", "is", "not", "None", ":", "return", "self", ".", "help", ".", "split", "(", "'\\n'", ",", ...
35.166667
14.166667
def fetch_friend_ids(self, user): """ fethces friend id's from twitter Return: collection of friend ids """ friends = self.fetch_friends(user) friend_ids = [] for friend in friends: friend_ids.append(friend.id) return friend_ids
[ "def", "fetch_friend_ids", "(", "self", ",", "user", ")", ":", "friends", "=", "self", ".", "fetch_friends", "(", "user", ")", "friend_ids", "=", "[", "]", "for", "friend", "in", "friends", ":", "friend_ids", ".", "append", "(", "friend", ".", "id", ")...
25.5
10
def get_template_loader(self, app, subdir='templates', create=False): ''' Returns a template loader object for the given app name in the given subdir. For example, get_template_loader('homepage', 'styles') will return a loader for the styles/ directory in the homepage app. The app parameter can be either an app name or an AppConfig instance. The subdir parameter is normally 'templates', 'scripts', or 'styles', but it can be any subdirectory name of the given app. Normally, you should not have to call this method. Django automatically generates two shortcut functions for every DMP-registered apps, and these shortcut functions are the preferred way to render templates. This method is useful when you want a custom template loader to a directory that does not conform to the app_dir/templates/* pattern. If the loader is not found in the DMP cache, one of two things occur: 1. If create=True, it is created automatically and returned. This overrides the need to register the app as a DMP app. 2. If create=False, a TemplateDoesNotExist is raised. This is the default behavior. ''' # ensure we have an AppConfig if app is None: raise TemplateDoesNotExist("Cannot locate loader when app is None") if not isinstance(app, AppConfig): app = apps.get_app_config(app) # get the loader with the path of this app+subdir path = os.path.join(app.path, subdir) # if create=False, the loader must already exist in the cache if not create: dmp = apps.get_app_config('django_mako_plus') if not dmp.is_registered_app(app): raise ValueError("{} is not registered with DMP [hint: check urls.py for include('django_mako_plus.urls')].".format(app)) # return the template by path return self.get_template_loader_for_path(path, use_cache=True)
[ "def", "get_template_loader", "(", "self", ",", "app", ",", "subdir", "=", "'templates'", ",", "create", "=", "False", ")", ":", "# ensure we have an AppConfig", "if", "app", "is", "None", ":", "raise", "TemplateDoesNotExist", "(", "\"Cannot locate loader when app i...
49.8
29.65
def set_provider_links(self, resource_ids=None): """Sets a provider chain in order from the most recent source to the originating source. :param resource_ids: the new source :type resource_ids: ``osid.id.Id[]`` :raise: ``InvalidArgument`` -- ``resource_ids`` is invalid :raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true`` :raise: ``NullArgument`` -- ``resource_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if resource_ids is None: raise NullArgument() metadata = Metadata(**settings.METADATA['provider_link_ids']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(resource_ids, metadata, array=True): self._my_map['providerLinkIds'] = [] for i in resource_ids: self._my_map['providerLinkIds'].append(str(i)) else: raise InvalidArgument()
[ "def", "set_provider_links", "(", "self", ",", "resource_ids", "=", "None", ")", ":", "if", "resource_ids", "is", "None", ":", "raise", "NullArgument", "(", ")", "metadata", "=", "Metadata", "(", "*", "*", "settings", ".", "METADATA", "[", "'provider_link_id...
40.458333
17
def InsertData(self, table_id, fd, schema, job_id): """Insert data into a bigquery table. If the table specified doesn't exist, it will be created with the specified schema. Args: table_id: string table id fd: open file descriptor containing the newline separated JSON schema: BigQuery schema dict job_id: string job id Returns: API response object on success, None on failure """ configuration = { "schema": { "fields": schema }, "destinationTable": { "projectId": self.project_id, "tableId": table_id, "datasetId": self.dataset_id }, "sourceFormat": "NEWLINE_DELIMITED_JSON", } body = { "configuration": { "load": configuration }, "jobReference": { "projectId": self.project_id, "jobId": job_id } } # File content can be gzipped for bandwidth efficiency. The server handles # it correctly without any changes to the request. mediafile = http.MediaFileUpload( fd.name, mimetype="application/octet-stream") job = self.service.jobs().insert( projectId=self.project_id, body=body, media_body=mediafile) try: response = job.execute() return response except errors.HttpError as e: if self.GetDataset(self.dataset_id): logging.exception("Error with job: %s", job_id) else: # If this is our first export ever, we need to create the dataset. logging.info("Attempting to create dataset: %s", self.dataset_id) self.CreateDataset() return self.RetryUpload(job, job_id, e)
[ "def", "InsertData", "(", "self", ",", "table_id", ",", "fd", ",", "schema", ",", "job_id", ")", ":", "configuration", "=", "{", "\"schema\"", ":", "{", "\"fields\"", ":", "schema", "}", ",", "\"destinationTable\"", ":", "{", "\"projectId\"", ":", "self", ...
30.388889
19.5
def update_property(self, name, old_value, new_value): # type: (str, Any, Any) -> None """ Handles a property changed event :param name: The changed property name :param old_value: The previous property value :param new_value: The new property value """ with self._lock: self.__safe_handlers_callback( "on_property_change", name, old_value, new_value )
[ "def", "update_property", "(", "self", ",", "name", ",", "old_value", ",", "new_value", ")", ":", "# type: (str, Any, Any) -> None", "with", "self", ".", "_lock", ":", "self", ".", "__safe_handlers_callback", "(", "\"on_property_change\"", ",", "name", ",", "old_v...
34.230769
11.615385
def get_code(self): """Opens the link and returns the response's content.""" if self.code is None: self.code = urlopen(self.url).read() return self.code
[ "def", "get_code", "(", "self", ")", ":", "if", "self", ".", "code", "is", "None", ":", "self", ".", "code", "=", "urlopen", "(", "self", ".", "url", ")", ".", "read", "(", ")", "return", "self", ".", "code" ]
36.8
11.2
def _get_ll_pointer_type(self, target_data, context=None): """ Convert this type object to an LLVM type. """ from . import Module, GlobalVariable from ..binding import parse_assembly if context is None: m = Module() else: m = Module(context=context) foo = GlobalVariable(m, self, name="foo") with parse_assembly(str(m)) as llmod: return llmod.get_global_variable(foo.name).type
[ "def", "_get_ll_pointer_type", "(", "self", ",", "target_data", ",", "context", "=", "None", ")", ":", "from", ".", "import", "Module", ",", "GlobalVariable", "from", ".", ".", "binding", "import", "parse_assembly", "if", "context", "is", "None", ":", "m", ...
33.785714
11.785714
def commit(self): """ Insert the text at the current cursor position. """ # Backup and remove the currently selected text (may be none). tc = self.qteWidget.textCursor() self.selText = tc.selection().toHtml() self.selStart = tc.selectionStart() self.selEnd = tc.selectionEnd() tc.removeSelectedText() # Move to the start of the (just deleted) text block and insert # the characters there. tc.setPosition(self.selStart) # If the MIME data contained an image then create a new HTML # resource for it and insert it with the HTML syntax for adding # an image. On the other hand, if the resource was simply a string, # then just add it. if self.isImage: imgName = "pastedImage_{}".format(str(self.pasteCnt)) document = self.qteWidget.document() document.addResource(QtGui.QTextDocument.ImageResource, QtCore.QUrl(imgName), self.data) self.qteWidget.setDocument(document) tc.insertHtml('<img src={}>'.format(imgName)) else: tc.insertText(self.data) # Update the text cursor in the document. self.qteWidget.setTextCursor(tc)
[ "def", "commit", "(", "self", ")", ":", "# Backup and remove the currently selected text (may be none).", "tc", "=", "self", ".", "qteWidget", ".", "textCursor", "(", ")", "self", ".", "selText", "=", "tc", ".", "selection", "(", ")", ".", "toHtml", "(", ")", ...
39.21875
17.59375
def GetRunlevelsLSB(states): """Accepts a string and returns a list of strings of numeric LSB runlevels.""" if not states: return set() valid = set(["0", "1", "2", "3", "4", "5", "6"]) _LogInvalidRunLevels(states, valid) return valid.intersection(set(states.split()))
[ "def", "GetRunlevelsLSB", "(", "states", ")", ":", "if", "not", "states", ":", "return", "set", "(", ")", "valid", "=", "set", "(", "[", "\"0\"", ",", "\"1\"", ",", "\"2\"", ",", "\"3\"", ",", "\"4\"", ",", "\"5\"", ",", "\"6\"", "]", ")", "_LogInv...
39.285714
11.571429
def to_string(input_): """Format an input for representation as text This method is just a convenience that handles default LaTeX formatting """ usetex = rcParams['text.usetex'] if isinstance(input_, units.UnitBase): return input_.to_string('latex_inline') if isinstance(input_, (float, int)) and usetex: return tex.float_to_latex(input_) if usetex: return tex.label_to_latex(input_) return str(input_)
[ "def", "to_string", "(", "input_", ")", ":", "usetex", "=", "rcParams", "[", "'text.usetex'", "]", "if", "isinstance", "(", "input_", ",", "units", ".", "UnitBase", ")", ":", "return", "input_", ".", "to_string", "(", "'latex_inline'", ")", "if", "isinstan...
34.384615
12.538462
def lml(self): """ Log of the marginal likelihood. Let 𝐲 = vec(Y), M = A⊗X, and H = MᵀK⁻¹M. The restricted log of the marginal likelihood is given by [R07]_:: 2⋅log(p(𝐲)) = -(n⋅p - c⋅p) log(2π) + log(|MᵀM|) - log(|K|) - log(|H|) - (𝐲-𝐦)ᵀ K⁻¹ (𝐲-𝐦), where 𝐦 = M𝛃 for 𝛃 = H⁻¹MᵀK⁻¹𝐲. For implementation purpose, let X = (L₀ ⊗ G) and R = (L₁ ⊗ I)(L₁ ⊗ I)ᵀ. The covariance can be written as:: K = XXᵀ + R. From the Woodbury matrix identity, we have 𝐲ᵀK⁻¹𝐲 = 𝐲ᵀR⁻¹𝐲 - 𝐲ᵀR⁻¹XZ⁻¹XᵀR⁻¹𝐲, where Z = I + XᵀR⁻¹X. Note that R⁻¹ = (U₁S₁⁻¹U₁ᵀ) ⊗ I and :: XᵀR⁻¹𝐲 = (L₀ᵀW ⊗ Gᵀ)𝐲 = vec(GᵀYWL₀), where W = U₁S₁⁻¹U₁ᵀ. The term GᵀY can be calculated only once and it will form a r×p matrix. We similarly have :: XᵀR⁻¹M = (L₀ᵀWA) ⊗ (GᵀX), for which GᵀX is pre-computed. The log-determinant of the covariance matrix is given by log(|K|) = log(|Z|) - log(|R⁻¹|) = log(|Z|) - 2·n·log(|U₁S₁⁻½|). The log of the marginal likelihood can be rewritten as:: 2⋅log(p(𝐲)) = -(n⋅p - c⋅p) log(2π) + log(|MᵀM|) - log(|Z|) + 2·n·log(|U₁S₁⁻½|) - log(|MᵀR⁻¹M - MᵀR⁻¹XZ⁻¹XᵀR⁻¹M|) - 𝐲ᵀR⁻¹𝐲 + (𝐲ᵀR⁻¹X)Z⁻¹(XᵀR⁻¹𝐲) - 𝐦ᵀR⁻¹𝐦 + (𝐦ᵀR⁻¹X)Z⁻¹(XᵀR⁻¹𝐦) + 2𝐲ᵀR⁻¹𝐦 - 2(𝐲ᵀR⁻¹X)Z⁻¹(XᵀR⁻¹𝐦). Returns ------- lml : float Log of the marginal likelihood. References ---------- .. [R07] LaMotte, L. R. (2007). A direct derivation of the REML likelihood function. Statistical Papers, 48(2), 321-327. """ terms = self._terms yKiy = terms["yKiy"] mKiy = terms["mKiy"] mKim = terms["mKim"] lml = -self._df * log2pi + self._logdet_MM - self._logdetK lml -= self._logdetH lml += -yKiy - mKim + 2 * mKiy return lml / 2
[ "def", "lml", "(", "self", ")", ":", "terms", "=", "self", ".", "_terms", "yKiy", "=", "terms", "[", "\"yKiy\"", "]", "mKiy", "=", "terms", "[", "\"mKiy\"", "]", "mKim", "=", "terms", "[", "\"mKim\"", "]", "lml", "=", "-", "self", ".", "_df", "*"...
29.569231
22.8
def show_address_scope(self, address_scope, **_params): """Fetches information of a certain address scope.""" return self.get(self.address_scope_path % (address_scope), params=_params)
[ "def", "show_address_scope", "(", "self", ",", "address_scope", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "address_scope_path", "%", "(", "address_scope", ")", ",", "params", "=", "_params", ")" ]
55.25
10.5
def get_locations(self): ''' a method to retrieve all the locations tracked by the model :return: dictionary with location id keys NOTE: results are added to self.locations property { 'location.id': { ' } } ''' import requests url = self.endpoint + '/locations' params = { 'group': self.group_name } response = requests.get(url, params=params) response_details = response.json() if 'locations' in response_details.keys(): self.locations = response_details['locations'] return self.locations
[ "def", "get_locations", "(", "self", ")", ":", "import", "requests", "url", "=", "self", ".", "endpoint", "+", "'/locations'", "params", "=", "{", "'group'", ":", "self", ".", "group_name", "}", "response", "=", "requests", ".", "get", "(", "url", ",", ...
26.222222
21.407407
def notebook_mode(m): """ Configure whether this module should assume that it is being run from a jupyter notebook. This sets some global variables related to how progress for long measurement sequences is indicated. :param bool m: If True, assume to be in notebook. :return: None :rtype: NoneType """ global NOTEBOOK_MODE global TRANGE NOTEBOOK_MODE = m if NOTEBOOK_MODE: TRANGE = tqdm.tnrange else: TRANGE = tqdm.trange
[ "def", "notebook_mode", "(", "m", ")", ":", "global", "NOTEBOOK_MODE", "global", "TRANGE", "NOTEBOOK_MODE", "=", "m", "if", "NOTEBOOK_MODE", ":", "TRANGE", "=", "tqdm", ".", "tnrange", "else", ":", "TRANGE", "=", "tqdm", ".", "trange" ]
27.882353
22.235294
def get_user_activities(self, username, tournament=1): """Get user activities (works for all users!). Args: username (str): name of the user tournament (int): ID of the tournament (optional, defaults to 1) Returns: list: list of user activities (`dict`) Each activity in the list as the following structure: * resolved (`bool`) * roundNumber (`int`) * tournament (`int`) * submission (`dict`) * concordance (`bool`) * consistency (`float`) * date (`datetime`) * liveLogloss (`float`) * liveAuroc (`float`) * validationLogloss (`float`) * validationAuroc (`float`) * stake (`dict`) * confidence (`decimal.Decimal`) * date (`datetime`) * nmrEarned (`decimal.Decimal`) * staked (`bool`) * usdEarned (`decimal.Decimal`) * burned (`bool`) Example: >>> NumerAPI().get_user_activities("slyfox", 5) [{'tournament': 5, 'submission': { 'validationLogloss': 0.6928141372700635, 'validationAuroc': 0.52, 'liveLogloss': None, 'liveAuroc': None, 'date': datetime.datetime(2018, 7, 14, 17, 5, 27, 206042, tzinfo=tzutc()), 'consistency': 83.33333333333334, 'concordance': True}, 'stake': {'value': Decimal('0.10'), 'usdEarned': None, 'staked': True, 'nmrEarned': None, 'date': datetime.datetime(2018, 7, 14, 17, 7, 7, 877845, tzinfo=tzutc()), 'confidence': Decimal('0.100000000000000000')}, 'burned': False 'roundNumber': 116, 'resolved': False}, {'tournament': 5, 'submission': {'validationLogloss': 0.6928141372700635, ... ] """ query = ''' query($tournament: Int! $username: String!) { userActivities(tournament: $tournament username: $username) { resolved roundNumber tournament submission { concordance consistency date liveLogloss liveAuroc validationLogloss validationAuroc } stake { confidence date nmrEarned staked usdEarned value burned } } } ''' arguments = {'tournament': tournament, 'username': username} data = self.raw_query(query, arguments)['data']['userActivities'] # filter rounds with no activity data = [item for item in data if item['submission']['date'] is not None] for item in data: # remove stakes with all values set to None if item['stake']['date'] is None: del item['stake'] # parse else: utils.replace(item['stake'], "date", utils.parse_datetime_string) for col in ['confidence', 'value', 'nmrEarned', 'usdEarned']: utils.replace(item['stake'], col, utils.parse_float_string) # parse for item in data: utils.replace(item['submission'], "date", utils.parse_datetime_string) return data
[ "def", "get_user_activities", "(", "self", ",", "username", ",", "tournament", "=", "1", ")", ":", "query", "=", "'''\n query($tournament: Int!\n $username: String!) {\n userActivities(tournament: $tournament\n username: $usern...
35.027778
15.388889
def get_codec(path): """Find the codec implementation for this path.""" if '.' not in path or path.rfind('/') > path.rfind('.'): return Codec for endings, codec_class in FILE_ENDINGS: if any(path.endswith(e) for e in endings): log.debug('Using {0} codec: {1}'.format(endings, path)) return codec_class return NoCodec
[ "def", "get_codec", "(", "path", ")", ":", "if", "'.'", "not", "in", "path", "or", "path", ".", "rfind", "(", "'/'", ")", ">", "path", ".", "rfind", "(", "'.'", ")", ":", "return", "Codec", "for", "endings", ",", "codec_class", "in", "FILE_ENDINGS", ...
33.090909
19.454545
def visualize(self, show_ports=False): """Visualize the Compound using nglview. Allows for visualization of a Compound within a Jupyter Notebook. Parameters ---------- show_ports : bool, optional, default=False Visualize Ports in addition to Particles """ nglview = import_('nglview') from mdtraj.geometry.sasa import _ATOMIC_RADII if run_from_ipython(): remove_digits = lambda x: ''.join(i for i in x if not i.isdigit() or i == '_') for particle in self.particles(): particle.name = remove_digits(particle.name).upper() if not particle.name: particle.name = 'UNK' tmp_dir = tempfile.mkdtemp() self.save(os.path.join(tmp_dir, 'tmp.mol2'), show_ports=show_ports, overwrite=True) widget = nglview.show_file(os.path.join(tmp_dir, 'tmp.mol2')) widget.clear() widget.add_ball_and_stick(cylinderOnly=True) elements = set([particle.name for particle in self.particles()]) scale = 50.0 for element in elements: try: widget.add_ball_and_stick('_{}'.format( element.upper()), aspect_ratio=_ATOMIC_RADII[element.title()]**1.5 * scale) except KeyError: ids = [str(i) for i, particle in enumerate(self.particles()) if particle.name == element] widget.add_ball_and_stick( '@{}'.format( ','.join(ids)), aspect_ratio=0.17**1.5 * scale, color='grey') if show_ports: widget.add_ball_and_stick('_VS', aspect_ratio=1.0, color='#991f00') return widget else: raise RuntimeError('Visualization is only supported in Jupyter ' 'Notebooks.')
[ "def", "visualize", "(", "self", ",", "show_ports", "=", "False", ")", ":", "nglview", "=", "import_", "(", "'nglview'", ")", "from", "mdtraj", ".", "geometry", ".", "sasa", "import", "_ATOMIC_RADII", "if", "run_from_ipython", "(", ")", ":", "remove_digits",...
43.604167
16.958333