text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def calc_qdgz1_qdgz2_v1(self): """Seperate total direct flow into a small and a fast component. Required control parameters: |A1| |A2| Required flux sequence: |QDGZ| Calculated state sequences: |QDGZ1| |QDGZ2| Basic equation: :math:`QDGZ2 = \\frac{(QDGZ-A2)^2}{QDGZ+A1-A2}` :math:`QDGZ1 = QDGZ - QDGZ1` Examples: The formula for calculating the amount of the fast component of direct flow is borrowed from the famous curve number approach. Parameter |A2| would be the initial loss and parameter |A1| the maximum storage, but one should not take this analogy too serious. Instead, with the value of parameter |A1| set to zero, parameter |A2| just defines the maximum amount of "slow" direct runoff per time step: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> a1(0.0) Let us set the value of |A2| to 4 mm/d, which is 2 mm/12h with respect to the selected simulation step size: >>> a2(4.0) >>> a2 a2(4.0) >>> a2.value 2.0 Define a test function and let it calculate |QDGZ1| and |QDGZ1| for values of |QDGZ| ranging from -10 to 100 mm/12h: >>> from hydpy import UnitTest >>> test = UnitTest(model, ... model.calc_qdgz1_qdgz2_v1, ... last_example=6, ... parseqs=(fluxes.qdgz, ... states.qdgz1, ... states.qdgz2)) >>> test.nexts.qdgz = -10.0, 0.0, 1.0, 2.0, 3.0, 100.0 >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 2.0 | 0.0 | | 5 | 3.0 | 2.0 | 1.0 | | 6 | 100.0 | 2.0 | 98.0 | Setting |A2| to zero and |A1| to 4 mm/d (or 2 mm/12h) results in a smoother transition: >>> a2(0.0) >>> a1(4.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | -------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 0.666667 | 0.333333 | | 4 | 2.0 | 1.0 | 1.0 | | 5 | 3.0 | 1.2 | 1.8 | | 6 | 100.0 | 1.960784 | 98.039216 | Alternatively, one can mix these two configurations by setting the values of both parameters to 2 mm/h: >>> a2(2.0) >>> a1(2.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 1.5 | 0.5 | | 5 | 3.0 | 1.666667 | 1.333333 | | 6 | 100.0 | 1.99 | 98.01 | Note the similarity of the results for very high values of total direct flow |QDGZ| in all three examples, which converge to the sum of the values of parameter |A1| and |A2|, representing the maximum value of `slow` direct flow generation per simulation step """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess if flu.qdgz > con.a2: sta.qdgz2 = (flu.qdgz-con.a2)**2/(flu.qdgz+con.a1-con.a2) sta.qdgz1 = flu.qdgz-sta.qdgz2 else: sta.qdgz2 = 0. sta.qdgz1 = flu.qdgz
[ "def", "calc_qdgz1_qdgz2_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "sta", "=", "self", ".", "sequences", ".", "states", "....
34.158879
18.672897
def get_env_macros(self, data): """Get all environment macros from data For each object in data :: * Fetch all macros in object.__class__.macros * Fetch all customs macros in o.custom :param data: data to get macro :type data: :return: dict with macro name as key and macro value as value :rtype: dict """ env = {} for obj in data: cls = obj.__class__ macros = cls.macros for macro in macros: if macro.startswith("USER"): continue prop = macros[macro] value = self._get_value_from_element(obj, prop) env['%s%s' % (self.env_prefix, macro)] = value if hasattr(obj, 'customs'): # make NAGIOS__HOSTMACADDR from _MACADDR for cmacro in obj.customs: new_env_name = '%s_%s%s' % (self.env_prefix, obj.__class__.__name__.upper(), cmacro[1:].upper()) env[new_env_name] = obj.customs[cmacro] return env
[ "def", "get_env_macros", "(", "self", ",", "data", ")", ":", "env", "=", "{", "}", "for", "obj", "in", "data", ":", "cls", "=", "obj", ".", "__class__", "macros", "=", "cls", ".", "macros", "for", "macro", "in", "macros", ":", "if", "macro", ".", ...
35.121212
17.787879
def count_list(the_list): """ Generates a count of the number of times each unique item appears in a list """ count = the_list.count result = [(item, count(item)) for item in set(the_list)] result.sort() return result
[ "def", "count_list", "(", "the_list", ")", ":", "count", "=", "the_list", ".", "count", "result", "=", "[", "(", "item", ",", "count", "(", "item", ")", ")", "for", "item", "in", "set", "(", "the_list", ")", "]", "result", ".", "sort", "(", ")", ...
29.75
16.75
def send(self, cmd): """ Send a command to the bridge. :param cmd: List of command bytes. """ self._bridge.send(cmd, wait=self.wait, reps=self.reps)
[ "def", "send", "(", "self", ",", "cmd", ")", ":", "self", ".", "_bridge", ".", "send", "(", "cmd", ",", "wait", "=", "self", ".", "wait", ",", "reps", "=", "self", ".", "reps", ")" ]
29.333333
14
def _copy_selection(self, *event): """Copies the current selection to the clipboard. """ if react_to_event(self.view, self.view.editor, event): logger.debug("copy selection") global_clipboard.copy(self.model.selection) return True
[ "def", "_copy_selection", "(", "self", ",", "*", "event", ")", ":", "if", "react_to_event", "(", "self", ".", "view", ",", "self", ".", "view", ".", "editor", ",", "event", ")", ":", "logger", ".", "debug", "(", "\"copy selection\"", ")", "global_clipboa...
40.571429
8.857143
def when_children_replaced ( self, object, listener, remove ): """ Sets up or removes a listener for children being replaced on a specified object. """ object.on_trait_change( listener, "subgraphs", remove = remove, dispatch = "fast_ui" ) object.on_trait_change( listener, "clusters", remove = remove, dispatch = "fast_ui" ) object.on_trait_change( listener, "nodes", remove = remove, dispatch = "fast_ui" ) object.on_trait_change( listener, "edges", remove = remove, dispatch = "fast_ui" )
[ "def", "when_children_replaced", "(", "self", ",", "object", ",", "listener", ",", "remove", ")", ":", "object", ".", "on_trait_change", "(", "listener", ",", "\"subgraphs\"", ",", "remove", "=", "remove", ",", "dispatch", "=", "\"fast_ui\"", ")", "object", ...
55.583333
17
def show_periodical_tree_by_issn(issn): """ Render tree using ISSN. """ trees = tree_handler().trees_by_issn(issn) if not trees: abort(404, "Dokument s ISSN '%s' není dostupný." % issn) return render_trees( trees, partial(web_tools.compose_tree_path, issn=True) )
[ "def", "show_periodical_tree_by_issn", "(", "issn", ")", ":", "trees", "=", "tree_handler", "(", ")", ".", "trees_by_issn", "(", "issn", ")", "if", "not", "trees", ":", "abort", "(", "404", ",", "\"Dokument s ISSN '%s' není dostupný.\" %", "i", "sn)", "", "ret...
23.461538
18.384615
def _split(self, split_cls, editor_buffer=None): """ Split horizontal or vertical. (when editor_buffer is None, show the current buffer there as well.) """ if editor_buffer is None: editor_buffer = self.active_window.editor_buffer active_split = self._get_active_split() index = active_split.index(self.active_window) new_window = Window(editor_buffer) if isinstance(active_split, split_cls): # Add new window to active split. active_split.insert(index, new_window) else: # Split in the other direction. active_split[index] = split_cls([active_split[index], new_window]) # Focus new window. self.active_window = new_window
[ "def", "_split", "(", "self", ",", "split_cls", ",", "editor_buffer", "=", "None", ")", ":", "if", "editor_buffer", "is", "None", ":", "editor_buffer", "=", "self", ".", "active_window", ".", "editor_buffer", "active_split", "=", "self", ".", "_get_active_spli...
36.238095
15.285714
def get_composition_search_session(self, proxy): """Gets a composition search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSearchSession) - a CompositionSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_search() is false compliance: optional - This method must be implemented if supports_composition_search() is true. """ if not self.supports_composition_search(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionSearchSession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_composition_search_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_composition_search", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":...
38.791667
15.958333
def setSample(self, sample): """Set sample points from the population. Should be a RDD""" if not isinstance(sample, RDD): raise TypeError("samples should be a RDD, received %s" % type(sample)) self._sample = sample
[ "def", "setSample", "(", "self", ",", "sample", ")", ":", "if", "not", "isinstance", "(", "sample", ",", "RDD", ")", ":", "raise", "TypeError", "(", "\"samples should be a RDD, received %s\"", "%", "type", "(", "sample", ")", ")", "self", ".", "_sample", "...
49.2
13.2
def novatel_diag_send(self, timeStatus, receiverStatus, solStatus, posType, velType, posSolAge, csFails, force_mavlink1=False): ''' Transmits the diagnostics data from the Novatel OEMStar GPS timeStatus : The Time Status. See Table 8 page 27 Novatel OEMStar Manual (uint8_t) receiverStatus : Status Bitfield. See table 69 page 350 Novatel OEMstar Manual (uint32_t) solStatus : solution Status. See table 44 page 197 (uint8_t) posType : position type. See table 43 page 196 (uint8_t) velType : velocity type. See table 43 page 196 (uint8_t) posSolAge : Age of the position solution in seconds (float) csFails : Times the CRC has failed since boot (uint16_t) ''' return self.send(self.novatel_diag_encode(timeStatus, receiverStatus, solStatus, posType, velType, posSolAge, csFails), force_mavlink1=force_mavlink1)
[ "def", "novatel_diag_send", "(", "self", ",", "timeStatus", ",", "receiverStatus", ",", "solStatus", ",", "posType", ",", "velType", ",", "posSolAge", ",", "csFails", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ...
77.714286
55.142857
def get_column_cursor_position(self, column): """ Return the relative cursor position for this column at the current line. (It will stay between the boundaries of the line in case of a larger number.) """ line_length = len(self.current_line) current_column = self.cursor_position_col column = max(0, min(line_length, column)) return column - current_column
[ "def", "get_column_cursor_position", "(", "self", ",", "column", ")", ":", "line_length", "=", "len", "(", "self", ".", "current_line", ")", "current_column", "=", "self", ".", "cursor_position_col", "column", "=", "max", "(", "0", ",", "min", "(", "line_len...
38.090909
14.090909
def contains_point( self, x, y ): """Is the point (x,y) on this curve?""" return ( y * y - ( x * x * x + self.__a * x + self.__b ) ) % self.__p == 0
[ "def", "contains_point", "(", "self", ",", "x", ",", "y", ")", ":", "return", "(", "y", "*", "y", "-", "(", "x", "*", "x", "*", "x", "+", "self", ".", "__a", "*", "x", "+", "self", ".", "__b", ")", ")", "%", "self", ".", "__p", "==", "0" ...
51.333333
15
def to_json(self): """Returns an input shard state for the remaining inputs. Returns: A JSON serializable version of the remaining input to read. """ params = dict(self.__params) # Shallow copy. if self._PROTOTYPE_REQUEST_PARAM in params: prototype_request = params[self._PROTOTYPE_REQUEST_PARAM] params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request.Encode() if self._OFFSET_PARAM in params: params[self._OFFSET_PARAM] = base64.b64encode(params[self._OFFSET_PARAM]) return params
[ "def", "to_json", "(", "self", ")", ":", "params", "=", "dict", "(", "self", ".", "__params", ")", "# Shallow copy.", "if", "self", ".", "_PROTOTYPE_REQUEST_PARAM", "in", "params", ":", "prototype_request", "=", "params", "[", "self", ".", "_PROTOTYPE_REQUEST_...
37.571429
20.857143
def do_chan_log_all(self, line): """Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all""" self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS)) print('Channel log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS))
[ "def", "do_chan_log_all", "(", "self", ",", "line", ")", ":", "self", ".", "application", ".", "channel", ".", "SetLogFilters", "(", "openpal", ".", "LogFilters", "(", "opendnp3", ".", "levels", ".", "ALL_COMMS", ")", ")", "print", "(", "'Channel log filteri...
75
27.75
def evolved_transformer_big_tpu(): """Big parameters for Evolved Transformer model on TPU.""" hparams = add_evolved_transformer_hparams(transformer.transformer_big_tpu()) hparams.learning_rate_constant = 1 / hparams.learning_rate_warmup_steps ** 0.5 hparams.learning_rate_schedule = ( "constant*single_cycle_cos_decay") return hparams
[ "def", "evolved_transformer_big_tpu", "(", ")", ":", "hparams", "=", "add_evolved_transformer_hparams", "(", "transformer", ".", "transformer_big_tpu", "(", ")", ")", "hparams", ".", "learning_rate_constant", "=", "1", "/", "hparams", ".", "learning_rate_warmup_steps", ...
49.142857
16
def acquire(self, blocking=True, delay=DELAY_INCREMENT, max_delay=MAX_DELAY, timeout=None): """Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool """ if delay < 0: raise ValueError("Delay must be greater than or equal to zero") if timeout is not None and timeout < 0: raise ValueError("Timeout must be greater than or equal to zero") if delay >= max_delay: max_delay = delay self._do_open() watch = _utils.StopWatch(duration=timeout) r = _utils.Retry(delay, max_delay, sleep_func=self.sleep_func, watch=watch) with watch: gotten = r(self._try_acquire, blocking, watch) if not gotten: self.acquired = False return False else: self.acquired = True self.logger.log(_utils.BLATHER, "Acquired file lock `%s` after waiting %0.3fs [%s" " attempts were required]", self.path, watch.elapsed(), r.attempts) return True
[ "def", "acquire", "(", "self", ",", "blocking", "=", "True", ",", "delay", "=", "DELAY_INCREMENT", ",", "max_delay", "=", "MAX_DELAY", ",", "timeout", "=", "None", ")", ":", "if", "delay", "<", "0", ":", "raise", "ValueError", "(", "\"Delay must be greater...
42.928571
17.214286
def expire_at(self, key, _time): """ Sets the expiration time of @key to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970) """ return self._client.expireat(self.get_key(key), round(_time))
[ "def", "expire_at", "(", "self", ",", "key", ",", "_time", ")", ":", "return", "self", ".", "_client", ".", "expireat", "(", "self", ".", "get_key", "(", "key", ")", ",", "round", "(", "_time", ")", ")" ]
48.2
14.4
def merge_versioned(releases, schema=None, merge_rules=None): """ Merges a list of releases into a versionedRelease. """ if not merge_rules: merge_rules = get_merge_rules(schema) merged = OrderedDict() for release in sorted(releases, key=lambda release: release['date']): release = release.copy() # Don't version the OCID. ocid = release.pop('ocid') merged[('ocid',)] = ocid releaseID = release['id'] date = release['date'] # Prior to OCDS 1.1.4, `tag` didn't set "omitWhenMerged": true. tag = release.pop('tag', None) flat = flatten(release, merge_rules) processed = process_flattened(flat) for key, value in processed.items(): # If value is unchanged, don't add to history. if key in merged and value == merged[key][-1]['value']: continue if key not in merged: merged[key] = [] merged[key].append(OrderedDict([ ('releaseID', releaseID), ('releaseDate', date), ('releaseTag', tag), ('value', value), ])) return unflatten(merged, merge_rules)
[ "def", "merge_versioned", "(", "releases", ",", "schema", "=", "None", ",", "merge_rules", "=", "None", ")", ":", "if", "not", "merge_rules", ":", "merge_rules", "=", "get_merge_rules", "(", "schema", ")", "merged", "=", "OrderedDict", "(", ")", "for", "re...
30.564103
16.25641
def resize(self, size:Union[int,TensorImageSize])->'Image': "Resize the image to `size`, size can be a single int." assert self._flow is None if isinstance(size, int): size=(self.shape[0], size, size) if tuple(size)==tuple(self.shape): return self self.flow = _affine_grid(size) return self
[ "def", "resize", "(", "self", ",", "size", ":", "Union", "[", "int", ",", "TensorImageSize", "]", ")", "->", "'Image'", ":", "assert", "self", ".", "_flow", "is", "None", "if", "isinstance", "(", "size", ",", "int", ")", ":", "size", "=", "(", "sel...
47.428571
16
def _check_roSet(orb,kwargs,funcName): """Function to check whether ro is set, because it's required for funcName""" if not orb._roSet and kwargs.get('ro',None) is None: warnings.warn("Method %s(.) requires ro to be given at Orbit initialization or at method evaluation; using default ro which is %f kpc" % (funcName,orb._ro), galpyWarning)
[ "def", "_check_roSet", "(", "orb", ",", "kwargs", ",", "funcName", ")", ":", "if", "not", "orb", ".", "_roSet", "and", "kwargs", ".", "get", "(", "'ro'", ",", "None", ")", "is", "None", ":", "warnings", ".", "warn", "(", "\"Method %s(.) requires ro to be...
74.8
29.4
def wasb_read(self, remote_log_location, return_error=False): """ Returns the log found at the remote_log_location. Returns '' if no logs are found or there is an error. :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param return_error: if True, returns a string error message if an error occurs. Otherwise returns '' when an error occurs. :type return_error: bool """ try: return self.hook.read_file(self.wasb_container, remote_log_location) except AzureHttpError: msg = 'Could not read logs from {}'.format(remote_log_location) self.log.exception(msg) # return error if needed if return_error: return msg
[ "def", "wasb_read", "(", "self", ",", "remote_log_location", ",", "return_error", "=", "False", ")", ":", "try", ":", "return", "self", ".", "hook", ".", "read_file", "(", "self", ".", "wasb_container", ",", "remote_log_location", ")", "except", "AzureHttpErro...
45.222222
17.444444
def cross_validation(learner, dataset, k=10, trials=1): """Do k-fold cross_validate and return their mean. That is, keep out 1/k of the examples for testing on each of k runs. Shuffle the examples first; If trials>1, average over several shuffles.""" if k is None: k = len(dataset.examples) if trials > 1: return mean([cross_validation(learner, dataset, k, trials=1) for t in range(trials)]) else: n = len(dataset.examples) random.shuffle(dataset.examples) return mean([train_and_test(learner, dataset, i*(n/k), (i+1)*(n/k)) for i in range(k)])
[ "def", "cross_validation", "(", "learner", ",", "dataset", ",", "k", "=", "10", ",", "trials", "=", "1", ")", ":", "if", "k", "is", "None", ":", "k", "=", "len", "(", "dataset", ".", "examples", ")", "if", "trials", ">", "1", ":", "return", "mean...
45.5
14.642857
async def uvarint(self, elem): """ Uvarint untouched :param elem: :return: """ if self.writing: return await dump_varint(self.iobj, elem) else: return await load_varint(self.iobj)
[ "async", "def", "uvarint", "(", "self", ",", "elem", ")", ":", "if", "self", ".", "writing", ":", "return", "await", "dump_varint", "(", "self", ".", "iobj", ",", "elem", ")", "else", ":", "return", "await", "load_varint", "(", "self", ".", "iobj", "...
25
13.2
def build_upstream_edge_predicate(nodes: Iterable[BaseEntity]) -> EdgePredicate: """Build an edge predicate that pass for relations for which one of the given nodes is the object.""" nodes = set(nodes) def upstream_filter(graph: BELGraph, u: BaseEntity, v: BaseEntity, k: str) -> bool: """Pass for relations for which one of the given nodes is the object.""" return v in nodes and graph[u][v][k][RELATION] in CAUSAL_RELATIONS return upstream_filter
[ "def", "build_upstream_edge_predicate", "(", "nodes", ":", "Iterable", "[", "BaseEntity", "]", ")", "->", "EdgePredicate", ":", "nodes", "=", "set", "(", "nodes", ")", "def", "upstream_filter", "(", "graph", ":", "BELGraph", ",", "u", ":", "BaseEntity", ",",...
52.666667
25.888889
def spline_interpolate(x_axis, y_axis, x_new_axis): """Interpolate a y = f(x) function using Spline interpolation algorithm, x_new_axis has to be in range of x_axis. `Spline interpolation <https://en.wikipedia.org/wiki/Spline_interpolation>`_ is a popular interpolation method. Way more accurate than linear interpolate in average """ f = interp1d(x_axis, y_axis, kind="cubic") return f(x_new_axis)
[ "def", "spline_interpolate", "(", "x_axis", ",", "y_axis", ",", "x_new_axis", ")", ":", "f", "=", "interp1d", "(", "x_axis", ",", "y_axis", ",", "kind", "=", "\"cubic\"", ")", "return", "f", "(", "x_new_axis", ")" ]
42.3
18.3
def main(argString=None): """The main function of this module. :param argString: the options. :type argString: list These are the steps: 1. Runs a plate bias analysis using Plink (:py:func:`executePlateBiasAnalysis`). 2. Extracts the list of significant markers after plate bias analysis (:py:func:`extractSignificantSNPs`). 3. Computes the frequency of all significant markers after plate bias analysis (:py:func:`computeFrequencyOfSignificantSNPs`). """ # Getting and checking the options args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) # Run plink logger.info("Running Plink to check the plate bias") executePlateBiasAnalysis(args) # Extract significant SNPs logger.info("Extracting significant SNPs") assocResults = extractSignificantSNPs(args.out) # Remove significant SNPs using plink logger.info("Computing frequency of significant SNPs") maf = computeFrequencyOfSignificantSNPs(args) # Create the final summary file logger.info("Creating the summary file") createSummaryFile(assocResults, maf, args.out)
[ "def", "main", "(", "argString", "=", "None", ")", ":", "# Getting and checking the options", "args", "=", "parseArgs", "(", "argString", ")", "checkArgs", "(", "args", ")", "logger", ".", "info", "(", "\"Options used:\"", ")", "for", "key", ",", "value", "i...
31
18.725
def rem_active_module(module): '''Remove a module from CPENV_ACTIVE_MODULES environment variable''' modules = set(get_active_modules()) modules.discard(module) new_modules_path = os.pathsep.join([m.path for m in modules]) os.environ['CPENV_ACTIVE_MODULES'] = str(new_modules_path)
[ "def", "rem_active_module", "(", "module", ")", ":", "modules", "=", "set", "(", "get_active_modules", "(", ")", ")", "modules", ".", "discard", "(", "module", ")", "new_modules_path", "=", "os", ".", "pathsep", ".", "join", "(", "[", "m", ".", "path", ...
42.142857
20.428571
def img_from_vgg(x): '''Decondition an image from the VGG16 model.''' x = x.transpose((1, 2, 0)) x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 x = x[:,:,::-1] # to RGB return x
[ "def", "img_from_vgg", "(", "x", ")", ":", "x", "=", "x", ".", "transpose", "(", "(", "1", ",", "2", ",", "0", ")", ")", "x", "[", ":", ",", ":", ",", "0", "]", "+=", "103.939", "x", "[", ":", ",", ":", ",", "1", "]", "+=", "116.779", "...
27.125
15.875
def get_library_mapping(logger, prod_folder, token, host): """ returns a pair of library mappings, the first mapping library uri to a library name for all libraries in the production folder, and the second mapping library name to info for libraries in the production folder with parsable versions Parameters ---------- logger: logging object configured in cli_commands.py prod_folder: string name of folder in Databricks UI containing production libraries token: string Databricks API key host: string Databricks account url (e.g. https://fake-organization.cloud.databricks.com) Returns ------- dictionary mapping a library uri to a library name dictionary mapping library UI path to base name, major version, minor version, and id number """ res = requests.get( host + '/api/1.2/libraries/list', auth=('token', token), ) if res.status_code == 200: library_list = res.json() library_map = {} id_nums = {} for library in library_list: status_res = ( requests .get( host + '/api/1.2/libraries/status?libraryId={}' .format(library['id']), auth=('token', token), ) ) if status_res.status_code == 200: library_info = status_res.json() # only do any of this for libraries in the production folder if library_info['folder'] != prod_folder: logger.debug( 'excluded folder: {} in {}, not prod folder ({})' .format( library_info['name'], library_info['folder'], prod_folder, ) ) continue if library_info['libType'] == 'python-egg': full_name = library_info['name'] + '.egg' elif library_info['libType'] == 'java-jar': full_name = library_info['name'] + '.jar' else: logger.debug( 'excluded library type: {} is of libType {}, ' 'not jar or egg' .format( library_info['name'], library_info['libType'], ) ) continue try: name_match = FileNameMatch(full_name) # map uri to name match object library_map[library_info['files'][0]] = name_match # map name to name match object and id number # we'll need the id number to clean up old libraries id_nums[library_info['name']] = { 'name_match': name_match, 'id_num': library_info['id'], } except FileNameError: logger.debug( 'FileNameError: {} file name is not parsable' .format(full_name) ) pass else: raise APIError(status_res) return library_map, id_nums else: raise APIError(res)
[ "def", "get_library_mapping", "(", "logger", ",", "prod_folder", ",", "token", ",", "host", ")", ":", "res", "=", "requests", ".", "get", "(", "host", "+", "'/api/1.2/libraries/list'", ",", "auth", "=", "(", "'token'", ",", "token", ")", ",", ")", "if", ...
37.611111
16.833333
def render_impl(self, template, context, **opts): """ Render given template file and return the result. :param template: Template file path :param context: A dict or dict-like object to instantiate given template file :param opts: Options such as: - at_paths: Template search paths - at_encoding: Template encoding - other keyword options passed to jinja2.Envrionment. Please note that 'loader' option is not supported because anytemplate does not support to load template except for files :return: Rendered string """ return self._render(os.path.basename(template), context, True, **opts)
[ "def", "render_impl", "(", "self", ",", "template", ",", "context", ",", "*", "*", "opts", ")", ":", "return", "self", ".", "_render", "(", "os", ".", "path", ".", "basename", "(", "template", ")", ",", "context", ",", "True", ",", "*", "*", "opts"...
42.058824
17.941176
def web(port, debug=False, theme="modern", ssh_config=None): """Starts the web UI.""" from storm import web as _web _web.run(port, debug, theme, ssh_config)
[ "def", "web", "(", "port", ",", "debug", "=", "False", ",", "theme", "=", "\"modern\"", ",", "ssh_config", "=", "None", ")", ":", "from", "storm", "import", "web", "as", "_web", "_web", ".", "run", "(", "port", ",", "debug", ",", "theme", ",", "ssh...
41.25
7.75
def apply_optimization(self, update_embedding_with, grad, **kwargs): """ Calculating (Obtaining) the learning rate (eta) and apply optimizations on the embedding states by the specified method. Parameters ---------- update_embedding_with : function Function used to update the state of RiemannianRelaxation class (Y or S). grad : (n x s) array Gradients used in updating the embedding. calc_loss : function (used by its child function) Function used to calculated the loss from the temperary state of RiemannianRelaxation instance. (YT or ST) loss : float (used by its child function) Loss of the current state of RiemannianRelaxation instance. """ if self.linesearch: return self._apply_linesearch_optimzation(update_embedding_with, grad, **kwargs) else: return self._apply_fixed_optimization(update_embedding_with, grad, **kwargs)
[ "def", "apply_optimization", "(", "self", ",", "update_embedding_with", ",", "grad", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "linesearch", ":", "return", "self", ".", "_apply_linesearch_optimzation", "(", "update_embedding_with", ",", "grad", ",", ...
40.962963
23.037037
def write(url, content, **args): """Put an object into a ftp URL.""" relay = urlparse.urlparse(args.pop('relay', 'lmtp://localhot')) try: smtplib_SMTPS = functools.partial(smtplib.SMTP_SSL, keyfile=args.pop('keyfile', None), certfile=args.pop('certfile', None)) except AttributeError: def smtplib_SMTPS(): raise ValueError(relay.geturl()) filename = args.pop('filename', '') content_type, encoding = mimetypes.guess_type(filename) content_type = args.pop('content_type', content_type) encoding = args.pop('content_encoding', encoding) maintype, subtype = content_type.split('/') content = content_types.get(content_types).format(content, **args) content = content_encodings.get(encoding).encode(content) message = { 'application': application.MIMEApplication, 'text': text.MIMEText}[maintype](content, subtype) if filename: message.set_param('filename', ('UTF-8', '', filename.decode('UTF-8'))) if encoding: message['Content-Encoding'] = encoding message['To'] = urllib.unquote(url.path) for name, value in urlparse.parse_qsl(url.query): message[name.replace('_', '-')] = value if message['From'] is None: username = os.environ.get('USERNAME') username = os.environ.get('LOGNAME', username) username = os.environ.get('USER', username) message['From'] = '{}@{}'.format(username, socket.getfqdn()) # ``mailto`` scheme allow for a body param. We don't. del message['body'] # Send the email. client = {'smtp': smtplib.SMTP, 'lmtp': smtplib.LMTP, 'smtps': smtplib_SMTPS}[relay.scheme]() client.connect(''.join([relay.hostname, relay.path]), relay.port) if relay.username and relay.password: client.login(relay.username, relay.password) client.sendmail(message['From'], [message['To']], message.as_string()) client.quit()
[ "def", "write", "(", "url", ",", "content", ",", "*", "*", "args", ")", ":", "relay", "=", "urlparse", ".", "urlparse", "(", "args", ".", "pop", "(", "'relay'", ",", "'lmtp://localhot'", ")", ")", "try", ":", "smtplib_SMTPS", "=", "functools", ".", "...
44.288889
17.555556
def _get_item(self, question_id): """we need a middle-man method to convert the unique "assessment-session" authority question_ids into "real" itemIds BUT this also has to return the "magic" item, so we can't rely on question = self.get_question(question_id) ils = self._get_item_lookup_session() return ils.get_item(Id(question._my_map['itemId'])) """ question_map = self._get_question_map(question_id) # Throws NotFound() real_question_id = Id(question_map['questionId']) return self._get_item_lookup_session().get_item(real_question_id)
[ "def", "_get_item", "(", "self", ",", "question_id", ")", ":", "question_map", "=", "self", ".", "_get_question_map", "(", "question_id", ")", "# Throws NotFound()", "real_question_id", "=", "Id", "(", "question_map", "[", "'questionId'", "]", ")", "return", "se...
53.75
18.916667
def bulk_copy(self, ids): """Bulk copy a set of results. :param ids: Int list of result IDs. :return: :class:`results.Result <results.Result>` list """ schema = ResultSchema() return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema)
[ "def", "bulk_copy", "(", "self", ",", "ids", ")", ":", "schema", "=", "ResultSchema", "(", ")", "return", "self", ".", "service", ".", "bulk_copy", "(", "self", ".", "base", ",", "self", ".", "RESOURCE", ",", "ids", ",", "schema", ")" ]
35.75
15.625
def is_in_virtualenv(): """ Check virtualenv membership dynamically :return: True or false depending on whether we are in a regular virtualenv or not :rtype: bool """ pipenv_active = os.environ.get("PIPENV_ACTIVE", False) virtual_env = None use_system = False ignore_virtualenvs = bool(os.environ.get("PIPENV_IGNORE_VIRTUALENVS", False)) if not pipenv_active and not ignore_virtualenvs: virtual_env = os.environ.get("VIRTUAL_ENV") use_system = bool(virtual_env) return (use_system or virtual_env) and not (pipenv_active or ignore_virtualenvs)
[ "def", "is_in_virtualenv", "(", ")", ":", "pipenv_active", "=", "os", ".", "environ", ".", "get", "(", "\"PIPENV_ACTIVE\"", ",", "False", ")", "virtual_env", "=", "None", "use_system", "=", "False", "ignore_virtualenvs", "=", "bool", "(", "os", ".", "environ...
34.647059
21.941176
def GetStoredHostname(self): """Retrieves the stored hostname. The hostname is determined based on the preprocessing information that is stored inside the storage file. Returns: str: hostname. """ store_number = len(self._hostnames) return self._hostnames.get(store_number, None)
[ "def", "GetStoredHostname", "(", "self", ")", ":", "store_number", "=", "len", "(", "self", ".", "_hostnames", ")", "return", "self", ".", "_hostnames", ".", "get", "(", "store_number", ",", "None", ")" ]
27.727273
16.636364
def floor(x, context=None): """ Return the next lower or equal integer to x. If the result is not exactly representable, it will be rounded according to the current context. Note that it's possible for the result to be larger than ``x``. See the documentation of the :func:`ceil` function for more information. .. note:: This function corresponds to the MPFR function ``mpfr_rint_floor``, not to ``mpfr_floor``. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_rint_floor, (BigFloat._implicit_convert(x),), context, )
[ "def", "floor", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_rint_floor", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ...
27.545455
23.272727
def renames(from_path, to_path, user=None): """ Rename ``from_path`` to ``to_path``, creating parents as needed. """ to_dir = path.dirname(to_path) if to_dir: mkdir(to_dir, user=user) rename(from_path, to_path, user=user)
[ "def", "renames", "(", "from_path", ",", "to_path", ",", "user", "=", "None", ")", ":", "to_dir", "=", "path", ".", "dirname", "(", "to_path", ")", "if", "to_dir", ":", "mkdir", "(", "to_dir", ",", "user", "=", "user", ")", "rename", "(", "from_path"...
30.75
9
def to_bqm(self, model): """Given a pysmt model, return a bqm. Adds the values of the biases as determined by the SMT solver to a bqm. Args: model: A pysmt model. Returns: :obj:`dimod.BinaryQuadraticModel` """ linear = ((v, float(model.get_py_value(bias))) for v, bias in self.linear.items()) quadratic = ((u, v, float(model.get_py_value(bias))) for (u, v), bias in self.quadratic.items()) offset = float(model.get_py_value(self.offset)) return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN)
[ "def", "to_bqm", "(", "self", ",", "model", ")", ":", "linear", "=", "(", "(", "v", ",", "float", "(", "model", ".", "get_py_value", "(", "bias", ")", ")", ")", "for", "v", ",", "bias", "in", "self", ".", "linear", ".", "items", "(", ")", ")", ...
33.263158
23.368421
def fai_from_bam(ref_file, bam_file, out_file, data): """Create a fai index with only contigs in the input BAM file. """ contigs = set([x.contig for x in idxstats(bam_file, data)]) if not utils.file_uptodate(out_file, bam_file): with open(ref.fasta_idx(ref_file, data["config"])) as in_handle: with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in (l for l in in_handle if l.strip()): if line.split()[0] in contigs: out_handle.write(line) return out_file
[ "def", "fai_from_bam", "(", "ref_file", ",", "bam_file", ",", "out_file", ",", "data", ")", ":", "contigs", "=", "set", "(", "[", "x", ".", "contig", "for", "x", "in", "idxstats", "(", "bam_file", ",", "data", ")", "]", ")", "if", "not", "utils", "...
52.166667
16.25
def add_group(data_api, data_setters, group_index): """Add the data for a whole group. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param group_index the index for this group""" group_type_ind = data_api.group_type_list[group_index] atom_count = len(data_api.group_list[group_type_ind]["atomNameList"]) insertion_code = data_api.ins_code_list[group_index] data_setters.set_group_info(data_api.group_list[group_type_ind]["groupName"], data_api.group_id_list[group_index], insertion_code, data_api.group_list[group_type_ind]["chemCompType"], atom_count, data_api.num_bonds, data_api.group_list[group_type_ind]["singleLetterCode"], data_api.sequence_index_list[group_index], data_api.sec_struct_list[group_index]) for group_atom_ind in range(atom_count): add_atom_data(data_api, data_setters, data_api.group_list[group_type_ind]["atomNameList"], data_api.group_list[group_type_ind]["elementList"], data_api.group_list[group_type_ind]["formalChargeList"], group_atom_ind) data_api.atom_counter +=1 add_group_bonds(data_setters, data_api.group_list[group_type_ind]["bondAtomList"], data_api.group_list[group_type_ind]["bondOrderList"]) return atom_count
[ "def", "add_group", "(", "data_api", ",", "data_setters", ",", "group_index", ")", ":", "group_type_ind", "=", "data_api", ".", "group_type_list", "[", "group_index", "]", "atom_count", "=", "len", "(", "data_api", ".", "group_list", "[", "group_type_ind", "]", ...
60.192308
22.653846
def _split_cell(cell, module): """ Split a hybrid %%sql cell into the Python code and the queries. Populates a module with the queries. Args: cell: the contents of the %%sql cell. module: the module that the contents will populate. Returns: The default (last) query for the module. """ lines = cell.split('\n') code = None last_def = -1 name = None define_wild_re = re.compile('^DEFINE\s+.*$', re.IGNORECASE) define_re = re.compile('^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$', re.IGNORECASE) select_re = re.compile('^SELECT\s*.*$', re.IGNORECASE) standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$', re.IGNORECASE) # TODO(gram): a potential issue with this code is if we have leading Python code followed # by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see # if we can address this. for i, line in enumerate(lines): define_match = define_re.match(line) select_match = select_re.match(line) standard_sql_match = standard_sql_re.match(line) if i: prior_content = ''.join(lines[:i]).strip() if select_match: # Avoid matching if previous token was '(' or if Standard SQL is found # TODO: handle the possibility of comments immediately preceding SELECT select_match = len(prior_content) == 0 or \ (prior_content[-1] != '(' and not standard_sql_re.match(prior_content)) if standard_sql_match: standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content) if define_match or select_match or standard_sql_match: # If this is the first query, get the preceding Python code. if code is None: code = ('\n'.join(lines[:i])).strip() if len(code): code += '\n' elif last_def >= 0: # This is not the first query, so gather the previous query text. query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip() if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0: # Avoid DEFINE query name\nSELECT ... being seen as an empty DEFINE followed by SELECT continue # Save the query statement = datalab.data.SqlStatement(query, module) module.__dict__[name] = statement # And set the 'last' query to be this too module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement # Get the query name and strip off our syntactic sugar if appropriate. if define_match: name = define_match.group(1) lines[i] = define_match.group(2) else: name = datalab.data._utils._SQL_MODULE_MAIN # Save the starting line index of the new query last_def = i else: define_wild_match = define_wild_re.match(line) if define_wild_match: raise Exception('Expected "DEFINE QUERY <name>"') if last_def >= 0: # We were in a query so save this tail query. query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip() statement = datalab.data.SqlStatement(query, module) module.__dict__[name] = statement module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement if code is None: code = '' module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module) return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None)
[ "def", "_split_cell", "(", "cell", ",", "module", ")", ":", "lines", "=", "cell", ".", "split", "(", "'\\n'", ")", "code", "=", "None", "last_def", "=", "-", "1", "name", "=", "None", "define_wild_re", "=", "re", ".", "compile", "(", "'^DEFINE\\s+.*$'"...
39.809524
24.261905
def _redis_notifier(state): """Notify of configuration update through redis. Arguments: state (_WaffleState): Object that contains reference to app and its configstore. """ tstamp = time.time() state._tstamp = tstamp conf = state.app.config # Notify timestamp r = redis.client.StrictRedis() r.publish(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'), tstamp)
[ "def", "_redis_notifier", "(", "state", ")", ":", "tstamp", "=", "time", ".", "time", "(", ")", "state", ".", "_tstamp", "=", "tstamp", "conf", "=", "state", ".", "app", ".", "config", "# Notify timestamp", "r", "=", "redis", ".", "client", ".", "Stric...
28.642857
19
def main(command_line=True, **kwargs): """ NAME jr6_jr6_magic.py DESCRIPTION converts JR6 .jr6 format files to magic_measurements format files SYNTAX jr6_jr6_magic.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify input file, or -F FILE: specify output file, default is magic_measurements.txt -Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet) -spc NUM : specify number of characters to designate a specimen, default = 1 -loc LOCNAME : specify location/study name -A: don't average replicate measurements -ncn NCON: specify sample naming convention (6 and 7 not yet implemented) -mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented -JR IODP samples measured on the JOIDES RESOLUTION -v NUM : specify the volume in cc of the sample, default 2.5^3cc Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail ltauxe@ucsd.edu for help. INPUT JR6 .jr6 format file """ # initialize some stuff noave=0 #volume=2.5**3 #default volume is a 2.5cm cube volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed inst="" samp_con,Z='1',"" missing=1 demag="N" er_location_name="unknown" citation='This study' args=sys.argv meth_code="LP-NO" specnum=1 version_num=pmag.get_version() Samps=[] # keeps track of sample orientations user="" mag_file="" dir_path='.' MagRecs=[] ErSamps=[] SampOuts=[] samp_file = 'er_samples.txt' meas_file = 'magic_measurements.txt' tmp_file= "fixed.jr6" meth_code,JR="",0 # # get command line arguments # if command_line: if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-ID' in sys.argv: ind = sys.argv.index('-ID') input_dir_path = sys.argv[ind+1] else: input_dir_path = dir_path output_dir_path = dir_path if "-h" in args: print(main.__doc__) return False if '-F' in args: ind=args.index("-F") meas_file = args[ind+1] if '-Fsa' in args: ind = args.index("-Fsa") samp_file = args[ind+1] #try: # open(samp_file,'r') # ErSamps,file_type=pmag.magic_read(samp_file) # print 'sample information will be appended to ', samp_file #except: # print samp_file,' not found: sample information will be stored in new er_samples.txt file' # samp_file = output_dir_path+'/er_samples.txt' if '-f' in args: ind = args.index("-f") mag_file= args[ind+1] if "-spc" in args: ind = args.index("-spc") specnum = int(args[ind+1]) if "-ncn" in args: ind=args.index("-ncn") samp_con=sys.argv[ind+1] if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-A" in args: noave=1 if "-mcd" in args: ind=args.index("-mcd") meth_code=args[ind+1] if "-JR" in args: meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code=meth_code.strip(":") JR=1 samp_con='5' if "-v" in args: ind=args.index("-v") volume=float(args[ind+1])*1e-6 # enter volume in cc, convert to m^3 if not command_line: dir_path = kwargs.get('dir_path', '.') input_dir_path = kwargs.get('input_dir_path', dir_path) output_dir_path = dir_path meas_file = kwargs.get('meas_file', 'magic_measurements.txt') mag_file = kwargs.get('mag_file') samp_file = kwargs.get('samp_file', 'er_samples.txt') specnum = kwargs.get('specnum', 1) samp_con = kwargs.get('samp_con', '1') er_location_name = kwargs.get('er_location_name', '') noave = kwargs.get('noave', 0) # default (0) means DO average meth_code = kwargs.get('meth_code', "LP-NO") volume = float(kwargs.get('volume', 0)) if not volume: volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed else: #convert cm^3 to m^3 volume *= 1e-6 JR = kwargs.get('JR', 0) if JR: if meth_code == "LP-NO": meth_code = "" meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code=meth_code.strip(":") samp_con='5' # format variables mag_file = input_dir_path+"/" + mag_file meas_file = output_dir_path+"/" + meas_file samp_file = output_dir_path+"/" + samp_file tmp_file = output_dir_path+"/" + tmp_file if specnum!=0: specnum=-specnum if "4" in samp_con: if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "option [4] must be in form 4-Z where Z is an integer" else: Z=samp_con.split("-")[1] samp_con="4" if "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z=samp_con.split("-")[1] samp_con="7" ErSampRec,ErSiteRec={},{} # parse data # fix .jr6 file so that there are spaces between all the columns. pre_data=open(mag_file, 'r') tmp_data=open(tmp_file, 'w') line=pre_data.readline() while line !='': line=line.replace('-',' -') #print "line=", line tmp_data.write(line) line=pre_data.readline() tmp_data.close() pre_data.close() data=pd.read_csv(tmp_file, delim_whitespace=True,header=None) if JR==0: # data.columns=['er_specimen_name','step','x','y','z','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd'] cart=np.array([data['x'],data['y'],data['z']]).transpose() else: # measured on the Joides Resolution JR6 data.columns=['er_specimen_name','step','negz','y','x','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd'] cart=np.array([data['x'],data['y'],-data['negz']]).transpose() dir= pmag.cart2dir(cart).transpose() data['measurement_dec']=dir[0] data['measurement_inc']=dir[1] data['measurement_magn_moment']=dir[2]*(10.0**data['expon'])*volume # the data are in A/m - this converts to Am^2 data['measurement_magn_volume']=dir[2]*(10.0**data['expon']) # A/m - data in A/m data['sample_dip']=-data['sample_dip'] DGEOs,IGEOs=[],[] for ind in range(len(data)): dgeo,igeo=pmag.dogeo(data.iloc[ind]['measurement_dec'],data.iloc[ind]['measurement_inc'],data.iloc[ind]['sample_azimuth'],data.iloc[ind]['sample_dip']) DGEOs.append(dgeo) IGEOs.append(igeo) data['specimen_dec']=DGEOs data['specimen_inc']=IGEOs data['specimen_tilt']='1' if specnum!=0: data['er_sample_name']=data['er_specimen_name'][:specnum] else: data['er_sample_name']=data['er_specimen_name'] if int(samp_con) in [1, 2, 3, 4, 5, 7]: data['er_site_name']=pmag.parse_site(data['er_sample_name'],samp_con,Z) # else: # if 'er_site_name' in ErSampRec.keys():er_site_name=ErSampRec['er_site_name'] # if 'er_location_name' in ErSampRec.keys():er_location_name=ErSampRec['er_location_name'] # Configure the er_sample table for rowNum, row in data.iterrows(): sampleFlag=0 for sampRec in SampOuts: if sampRec['er_sample_name'] == row['er_sample_name']: sampleFlag=1 break if sampleFlag == 0: ErSampRec['er_sample_name']=row['er_sample_name'] ErSampRec['sample_azimuth']=str(row['sample_azimuth']) ErSampRec['sample_dip']=str(row['sample_dip']) ErSampRec['magic_method_codes']=meth_code ErSampRec['er_location_name']=er_location_name ErSampRec['er_site_name']=row['er_site_name'] ErSampRec['er_citation_names']='This study' SampOuts.append(ErSampRec.copy()) # Configure the magic_measurements table for rowNum, row in data.iterrows(): MagRec={} # MagRec['measurement_description']='Date: '+date MagRec["er_citation_names"]="This study" MagRec['er_location_name']=er_location_name MagRec['er_site_name']=row['er_site_name'] MagRec['er_sample_name']=row['er_sample_name'] MagRec['magic_software_packages']=version_num MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_flag"]='g' MagRec["measurement_standard"]='u' MagRec["measurement_number"]='1' MagRec["er_specimen_name"]=row['er_specimen_name'] MagRec["treatment_ac_field"]='0' if row['step'] == 'NRM': meas_type="LT-NO" elif row['step'][0:2] == 'AD': meas_type="LT-AF-Z" treat=float(row['step'][2:]) MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'TD': meas_type="LT-T-Z" treat=float(row['step'][2:]) MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin else: # need to add IRM, and ARM options print("measurement type unknown", row['step']) return False, "measurement type unknown" MagRec["measurement_magn_moment"]=str(row['measurement_magn_moment']) MagRec["measurement_magn_volume"]=str(row['measurement_magn_volume']) MagRec["measurement_dec"]=str(row['measurement_dec']) MagRec["measurement_inc"]=str(row['measurement_inc']) MagRec['magic_method_codes']=meas_type MagRecs.append(MagRec.copy()) pmag.magic_write(samp_file,SampOuts,'er_samples') print("sample orientations put in ",samp_file) MagOuts=pmag.measurements_methods(MagRecs,noave) pmag.magic_write(meas_file,MagOuts,'magic_measurements') print("results put in ",meas_file) print("exit!") return True, meas_file
[ "def", "main", "(", "command_line", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# initialize some stuff", "noave", "=", "0", "#volume=2.5**3 #default volume is a 2.5cm cube", "volume", "=", "2.5", "*", "1e-6", "#default volume is a 2.5 cm cube, translated to meters c...
40.420863
20.471223
def sql_datetime_literal(dt: DateTimeLikeType, subsecond: bool = False) -> str: """ Transforms a Python object that is of duck type ``datetime.datetime`` into an ANSI SQL literal string, like ``'2000-12-31 23:59:59'``, or if ``subsecond=True``, into the (non-ANSI) format ``'2000-12-31 23:59:59.123456'`` or similar. """ # ANSI SQL: http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt # <timestamp string> # ... the subsecond part is non-ANSI fmt = "'%Y-%m-%d %H:%M:%S{}'".format(".%f" if subsecond else "") return dt.strftime(fmt)
[ "def", "sql_datetime_literal", "(", "dt", ":", "DateTimeLikeType", ",", "subsecond", ":", "bool", "=", "False", ")", "->", "str", ":", "# ANSI SQL: http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt", "# <timestamp string>", "# ... the subsecond part is non-ANSI", "fmt",...
45.692308
15.230769
def _compute_edges(self): """Compute the edges of the current surface. Returns: Tuple[~curve.Curve, ~curve.Curve, ~curve.Curve]: The edges of the surface. """ nodes1, nodes2, nodes3 = _surface_helpers.compute_edge_nodes( self._nodes, self._degree ) edge1 = _curve_mod.Curve(nodes1, self._degree, _copy=False) edge2 = _curve_mod.Curve(nodes2, self._degree, _copy=False) edge3 = _curve_mod.Curve(nodes3, self._degree, _copy=False) return edge1, edge2, edge3
[ "def", "_compute_edges", "(", "self", ")", ":", "nodes1", ",", "nodes2", ",", "nodes3", "=", "_surface_helpers", ".", "compute_edge_nodes", "(", "self", ".", "_nodes", ",", "self", ".", "_degree", ")", "edge1", "=", "_curve_mod", ".", "Curve", "(", "nodes1...
39.357143
19.857143
def coverage(args): """ %prog coverage fastafile ctg bedfile1 bedfile2 .. Plot coverage from a set of BED files that contain the read mappings. The paired read span will be converted to a new bedfile that contain the happy mates. ctg is the chr/scf/ctg that you want to plot the histogram on. If the bedfiles already contain the clone spans, turn on --spans. """ from jcvi.formats.bed import mates, bedpe p = OptionParser(coverage.__doc__) p.add_option("--ymax", default=None, type="int", help="Limit ymax [default: %default]") p.add_option("--spans", default=False, action="store_true", help="BED files already contain clone spans [default: %default]") opts, args, iopts = p.set_image_options(args, figsize="8x5") if len(args) < 3: sys.exit(not p.print_help()) fastafile, ctg = args[0:2] bedfiles = args[2:] sizes = Sizes(fastafile) size = sizes.mapping[ctg] plt.figure(1, (iopts.w, iopts.h)) ax = plt.gca() bins = 100 # smooth the curve lines = [] legends = [] not_covered = [] yy = .9 for bedfile, c in zip(bedfiles, "rgbcky"): if not opts.spans: pf = bedfile.rsplit(".", 1)[0] matesfile = pf + ".mates" if need_update(bedfile, matesfile): matesfile, matesbedfile = mates([bedfile, "--lib"]) bedspanfile = pf + ".spans.bed" if need_update(matesfile, bedspanfile): bedpefile, bedspanfile = bedpe([bedfile, "--span", "--mates={0}".format(matesfile)]) bedfile = bedspanfile bedsum = Bed(bedfile).sum(seqid=ctg) notcoveredbases = size - bedsum legend = bedfile.split(".")[0] msg = "{0}: {1} bp not covered".format(legend, thousands(notcoveredbases)) not_covered.append(msg) print(msg, file=sys.stderr) ax.text(.1, yy, msg, color=c, size=9, transform=ax.transAxes) yy -= .08 cov = Coverage(bedfile, sizes.filename) x, y = cov.get_plot_data(ctg, bins=bins) line, = ax.plot(x, y, '-', color=c, lw=2, alpha=.5) lines.append(line) legends.append(legend) leg = ax.legend(lines, legends, shadow=True, fancybox=True) leg.get_frame().set_alpha(.5) ylabel = "Average depth per {0}Kb".format(size / bins / 1000) ax.set_xlim(0, size) ax.set_ylim(0, opts.ymax) ax.set_xlabel(ctg) ax.set_ylabel(ylabel) set_human_base_axis(ax) figname ="{0}.{1}.pdf".format(fastafile, ctg) savefig(figname, dpi=iopts.dpi, iopts=iopts)
[ "def", "coverage", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "mates", ",", "bedpe", "p", "=", "OptionParser", "(", "coverage", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--ymax\"", ",", "default", "=", "None...
33.246753
19.922078
def access_token(self): """return an Oauth 2.0 Bearer access token if it can be found""" access_token = self.get_auth_bearer() if not access_token: access_token = self.query_kwargs.get('access_token', '') if not access_token: access_token = self.body_kwargs.get('access_token', '') return access_token
[ "def", "access_token", "(", "self", ")", ":", "access_token", "=", "self", ".", "get_auth_bearer", "(", ")", "if", "not", "access_token", ":", "access_token", "=", "self", ".", "query_kwargs", ".", "get", "(", "'access_token'", ",", "''", ")", "if", "not",...
40.666667
17.111111
def get_property(host=None, admin_username=None, admin_password=None, property=None): ''' .. versionadded:: Fluorine Return specific property host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. property: The property which should be get. CLI Example: .. code-block:: bash salt dell dracr.get_property property=System.ServerOS.HostName ''' if property is None: raise SaltException('No property specified!') ret = __execute_ret('get \'{0}\''.format(property), host=host, admin_username=admin_username, admin_password=admin_password) return ret
[ "def", "get_property", "(", "host", "=", "None", ",", "admin_username", "=", "None", ",", "admin_password", "=", "None", ",", "property", "=", "None", ")", ":", "if", "property", "is", "None", ":", "raise", "SaltException", "(", "'No property specified!'", "...
25.033333
25.566667
def fit_pRF_radius(ctx, retinotopy=Ellipsis, mask=None, weight=Ellipsis, slope_only=False): ''' fit_pRF_radius(ctx) fits a line, m*eccen + b, to the pRF radius and yields the tuple (m, b). The following options may be given: * retinotopy (default: Ellipsis) specifies the prefix for the retinotopy (passed to retinotopy_data() to find the retinotopic dataset). * mask (default: None) specifies the mask over which to perform the calculation. This is passed to the to_mask() function. In the case that mask is a set or frozenset, then it is treated as a conjunction (intersection) of masks. * weight (default: None) specifies that a weight should be used; if this is True or Ellipsis, will use the variance_explained if it is part of the retinotopy dataset; if this is False or None, uses no weight; otherwise, this must be a weight property or property name. * slope_only (default: False) may be set to True to instead fit radius = m*eccen and return only m. ''' rdat = retinotopy_data(ctx, retinotopy) if 'radius' not in rdat: raise ValueError('No pRF radius found in dataset %s' % retinotopy) rad = rdat['radius'] (ang,ecc) = as_retinotopy(rdat, 'visual') if isinstance(mask, (set, frozenset)): mask = reduce(np.intersect1d, [ctx.mask(m, indices=True) for m in mask]) else: mask = ctx.mask(mask, indices=True) # get a weight if provided: if weight in [False, None]: wgt = np.ones(rad.shape) elif weight in [True, Ellipsis]: if 'variance_explained' in rdat: wgt = rdat['variance_explained'] else: wgt = np.ones(rad.shape) else: wgt = ctx.property(weight) # get the relevant eccen and radius values (ecc,rad,wgt) = [x[mask] for x in (ecc,rad,wgt)] # fit a line... if slope_only: ecc = np.reshape(ecc * wgt, (len(ecc), 1)) rad = np.reshape(rad * wgt, (len(rad), 1)) return np.linalg.lstsq(ecc, rad)[0] else: return tuple(np.polyfit(ecc, rad, 1, w=wgt))
[ "def", "fit_pRF_radius", "(", "ctx", ",", "retinotopy", "=", "Ellipsis", ",", "mask", "=", "None", ",", "weight", "=", "Ellipsis", ",", "slope_only", "=", "False", ")", ":", "rdat", "=", "retinotopy_data", "(", "ctx", ",", "retinotopy", ")", "if", "'radi...
53.131579
25.815789
def save(self, refreshing=None, next_action=None, json_last_refresh=None, data_blob=None): """ save or update the component on the Ariane server cache :param refreshing: the new refreshing value - default None and ignored :param next_action: the new next action - default None and ignored :param json_last_refresh: the new json last refresh - default the date of this call :param data_blob: the new data blob of this component - default None and ignored :return: """ LOGGER.debug("InjectorCachedComponent.save") ret = True if refreshing is not None: self.refreshing = refreshing if next_action is not None: self.next_action = next_action if json_last_refresh is not None: try: self.json_last_refresh = json_last_refresh.strftime("%Y-%m-%d %H:%M:%S.%f") except AttributeError: self.json_last_refresh = json_last_refresh if data_blob is not None: self.blob = data_blob if self.service is None: self.service = InjectorCachedComponentService.make_refresh_on_demand_service(self) if self.service is not None and not self.service.is_started: self.service.start() args = {'properties': {'OPERATION': 'PUSH_COMPONENT_IN_CACHE', 'REMOTE_COMPONENT': str(self.injector_component_2_json(properties_only=True)).replace("'", '"'), 'CACHE_ID': InjectorCachedComponentService.cache_id}, 'body': self.blob} result = InjectorCachedComponentService.requester.call(args).get() if result.rc != 0: err_msg = 'InjectorCachedComponent.save - Problem while saving component ( id : ' + self.id + \ 'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \ " (" + str(result.rc) + ")" LOGGER.warning(err_msg) ret = False return ret
[ "def", "save", "(", "self", ",", "refreshing", "=", "None", ",", "next_action", "=", "None", ",", "json_last_refresh", "=", "None", ",", "data_blob", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "\"InjectorCachedComponent.save\"", ")", "ret", "=", "...
46.75
25.386364
def csiszar_vimco(f, p_log_prob, q, num_draws, num_batch_draws=1, seed=None, name=None): """Use VIMCO to lower the variance of gradient[csiszar_function(Avg(logu))]. This function generalizes VIMCO [(Mnih and Rezende, 2016)][1] to Csiszar f-Divergences. Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`, consider using `monte_carlo_csiszar_f_divergence`. The VIMCO loss is: ```none vimco = f(Avg{logu[i] : i=0,...,m-1}) where, logu[i] = log( p(x, h[i]) / q(h[i] | x) ) h[i] iid~ q(H | x) ``` Interestingly, the VIMCO gradient is not the naive gradient of `vimco`. Rather, it is characterized by: ```none grad[vimco] - variance_reducing_term where, variance_reducing_term = Sum{ grad[log q(h[i] | x)] * (vimco - f(log Avg{h[j;i] : j=0,...,m-1})) : i=0, ..., m-1 } h[j;i] = { u[j] j!=i { GeometricAverage{ u[k] : k!=i} j==i ``` (We omitted `stop_gradient` for brevity. See implementation for more details.) The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th element has been replaced by the leave-`i`-out Geometric-average. This implementation prefers numerical precision over efficiency, i.e., `O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`. (The constant may be fairly large, perhaps around 12.) Args: f: Python `callable` representing a Csiszar-function in log-space. p_log_prob: Python `callable` representing the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. num_batch_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: vimco: The Csiszar f-Divergence generalized VIMCO objective. Raises: ValueError: if `num_draws < 2`. #### References [1]: Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo objectives. In _International Conference on Machine Learning_, 2016. https://arxiv.org/abs/1602.06725 """ with tf.compat.v1.name_scope(name, "csiszar_vimco", [num_draws, num_batch_draws]): if num_draws < 2: raise ValueError("Must specify num_draws > 1.") stop = tf.stop_gradient # For readability. x = stop(q.sample(sample_shape=[num_draws, num_batch_draws], seed=seed)) logqx = q.log_prob(x) logu = p_log_prob(x) - logqx f_log_avg_u, f_log_sooavg_u = [f(r) for r in csiszar_vimco_helper(logu)] dotprod = tf.reduce_sum( input_tensor=logqx * stop(f_log_avg_u - f_log_sooavg_u), axis=0) # Sum over iid samples. # We now rewrite f_log_avg_u so that: # `grad[f_log_avg_u] := grad[f_log_avg_u + dotprod]`. # To achieve this, we use a trick that # `f(x) - stop(f(x)) == zeros_like(f(x))` # but its gradient is grad[f(x)]. # Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence # this trick loses no precision. For more discussion regarding the relevant # portions of the IEEE754 standard, see the StackOverflow question, # "Is there a floating point value of x, for which x-x == 0 is false?" # http://stackoverflow.com/q/2686644 f_log_avg_u += dotprod - stop(dotprod) # Add zeros_like(dot_prod). return tf.reduce_mean(input_tensor=f_log_avg_u, axis=0)
[ "def", "csiszar_vimco", "(", "f", ",", "p_log_prob", ",", "q", ",", "num_draws", ",", "num_batch_draws", "=", "1", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ...
39.55102
23.132653
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0): """ :param uri: Bolt uri :type uri: str :param username: Neo4j username :type username: str :param password: Neo4j password :type password: str :param encrypted: Use TLS :type encrypted: Boolean :param max_pool_size: Maximum number of idle sessions :type max_pool_size: Integer :param trust: Trust cert on first use (0) or do not accept unknown cert (1) :type trust: Integer :return: Neo4j driver :rtype: neo4j.v1.session.Driver """ return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted, max_pool_size=max_pool_size, trust=trust)
[ "def", "get_db_driver", "(", "uri", ",", "username", "=", "None", ",", "password", "=", "None", ",", "encrypted", "=", "True", ",", "max_pool_size", "=", "50", ",", "trust", "=", "0", ")", ":", "return", "GraphDatabase", ".", "driver", "(", "uri", ",",...
39.157895
18.315789
def QAM_bb(N_symb,Ns,mod_type='16qam',pulse='rect',alpha=0.35): """ QAM_BB_TX: A complex baseband transmitter x,b,tx_data = QAM_bb(K,Ns,M) //////////// Inputs ////////////////////////////////////////////////// N_symb = the number of symbols to process Ns = number of samples per symbol mod_type = modulation type: qpsk, 16qam, 64qam, or 256qam alpha = squareroot raised codine pulse shape bandwidth factor. For DOCSIS alpha = 0.12 to 0.18. In general alpha can range over 0 < alpha < 1. SRC = pulse shape: 0-> rect, 1-> SRC //////////// Outputs ///////////////////////////////////////////////// x = complex baseband digital modulation b = transmitter shaping filter, rectangle or SRC tx_data = xI+1j*xQ = inphase symbol sequence + 1j*quadrature symbol sequence Mark Wickert November 2014 """ # Filter the impulse train waveform with a square root raised # cosine pulse shape designed as follows: # Design the filter to be of duration 12 symbols and # fix the excess bandwidth factor at alpha = 0.35 # If SRC = 0 use a simple rectangle pulse shape if pulse.lower() == 'src': b = sqrt_rc_imp(Ns,alpha,6) elif pulse.lower() == 'rc': b = rc_imp(Ns,alpha,6) elif pulse.lower() == 'rect': b = np.ones(int(Ns)) #alt. rect. pulse shape else: raise ValueError('pulse shape must be src, rc, or rect') if mod_type.lower() == 'qpsk': M = 2 # bits per symbol elif mod_type.lower() == '16qam': M = 4 elif mod_type.lower() == '64qam': M = 8 elif mod_type.lower() == '256qam': M = 16 else: raise ValueError('Unknown mod_type') # Create random symbols for the I & Q channels xI = np.random.randint(0,M,N_symb) xI = 2*xI - (M-1) xQ = np.random.randint(0,M,N_symb) xQ = 2*xQ - (M-1) # Employ differential encoding to counter phase ambiquities # Create a zero padded (interpolated by Ns) symbol sequence. # This prepares the symbol sequence for arbitrary pulse shaping. symbI = np.hstack((xI.reshape(N_symb,1),np.zeros((N_symb,int(Ns)-1)))) symbI = symbI.flatten() symbQ = np.hstack((xQ.reshape(N_symb,1),np.zeros((N_symb,int(Ns)-1)))) symbQ = symbQ.flatten() symb = symbI + 1j*symbQ if M > 2: symb /= (M-1) # The impulse train waveform contains one pulse per Ns (or Ts) samples # imp_train = [ones(K,1) zeros(K,Ns-1)]'; # imp_train = reshape(imp_train,Ns*K,1); # Filter the impulse train signal x = signal.lfilter(b,1,symb) x = x.flatten() # out is a 1D vector # Scale shaping filter to have unity DC gain b = b/sum(b) return x, b, xI+1j*xQ
[ "def", "QAM_bb", "(", "N_symb", ",", "Ns", ",", "mod_type", "=", "'16qam'", ",", "pulse", "=", "'rect'", ",", "alpha", "=", "0.35", ")", ":", "# Filter the impulse train waveform with a square root raised", "# cosine pulse shape designed as follows:", "# Design the filter...
37.520548
16.643836
def roster(self, year): """Returns the roster table for the given year. :year: The year for which we want the roster; defaults to current year. :returns: A DataFrame containing roster information for that year. """ doc = self.get_year_doc('{}_roster'.format(year)) roster_table = doc('table#games_played_team') df = sportsref.utils.parse_table(roster_table) starter_table = doc('table#starters') if not starter_table.empty: start_df = sportsref.utils.parse_table(starter_table) start_df = start_df.dropna(axis=0, subset=['position']) starters = start_df.set_index('position').player_id df['is_starter'] = df.player_id.isin(starters) df['starting_pos'] = df.player_id.map( lambda pid: (starters[starters == pid].index[0] if pid in starters.values else None) ) return df
[ "def", "roster", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "'{}_roster'", ".", "format", "(", "year", ")", ")", "roster_table", "=", "doc", "(", "'table#games_played_team'", ")", "df", "=", "sportsref", ".", "utils...
47.35
19.25
def _uminumaxvmin(self,*args,**kwargs): """ NAME: _uminumaxvmin PURPOSE: evaluate u_min, u_max, and v_min INPUT: Either: a) R,vR,vT,z,vz b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well c= True/False; overrides the object's c= keyword to use C or not OUTPUT: (umin,umax,vmin) HISTORY: 2017-12-12 - Written - Bovy (UofT) """ delta= kwargs.pop('delta',self._delta) if ((self._c and not ('c' in kwargs and not kwargs['c']))\ or (ext_loaded and (('c' in kwargs and kwargs['c'])))) \ and _check_c(self._pot): if len(args) == 5: #R,vR.vT, z, vz R,vR,vT, z, vz= args elif len(args) == 6: #R,vR.vT, z, vz, phi R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz if isinstance(R,float): R= nu.array([R]) vR= nu.array([vR]) vT= nu.array([vT]) z= nu.array([z]) vz= nu.array([vz]) Lz= R*vT if self._useu0: #First calculate u0 if 'u0' in kwargs: u0= nu.asarray(kwargs['u0']) else: E= nu.array([_evaluatePotentials(self._pot,R[ii],z[ii]) +vR[ii]**2./2.+vz[ii]**2./2.+vT[ii]**2./2. for ii in range(len(R))]) u0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(\ E,Lz,self._pot,delta)[0] kwargs.pop('u0',None) else: u0= None umin, umax, vmin, err= \ actionAngleStaeckel_c.actionAngleUminUmaxVminStaeckel_c(\ self._pot,delta,R,vR,vT,z,vz,u0=u0) if err == 0: return (umin,umax,vmin) else: #pragma: no cover raise RuntimeError("C-code for calculation actions failed; try with c=False") else: if 'c' in kwargs and kwargs['c'] and not self._c: #pragma: no cover warnings.warn("C module not used because potential does not have a C implementation",galpyWarning) kwargs.pop('c',None) if (len(args) == 5 or len(args) == 6) \ and isinstance(args[0],nu.ndarray): oumin= nu.zeros((len(args[0]))) oumax= nu.zeros((len(args[0]))) ovmin= nu.zeros((len(args[0]))) for ii in range(len(args[0])): if len(args) == 5: targs= (args[0][ii],args[1][ii],args[2][ii], args[3][ii],args[4][ii]) elif len(args) == 6: targs= (args[0][ii],args[1][ii],args[2][ii], args[3][ii],args[4][ii],args[5][ii]) tkwargs= copy.copy(kwargs) try: tkwargs['delta']= delta[ii] except TypeError: tkwargs['delta']= delta tumin,tumax,tvmin= self._uminumaxvmin(\ *targs,**tkwargs) oumin[ii]= tumin oumax[ii]= tumax ovmin[ii]= tvmin return (oumin,oumax,ovmin) else: #Set up the actionAngleStaeckelSingle object aASingle= actionAngleStaeckelSingle(*args,pot=self._pot, delta=delta) umin, umax= aASingle.calcUminUmax() vmin= aASingle.calcVmin() return (umin,umax,vmin)
[ "def", "_uminumaxvmin", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "delta", "=", "kwargs", ".", "pop", "(", "'delta'", ",", "self", ".", "_delta", ")", "if", "(", "(", "self", ".", "_c", "and", "not", "(", "'c'", "in", "k...
42.695652
14.565217
def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('auth', 'region_name'), log_message=False, ) or config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('cloud', 'region_name') )
[ "def", "get_configured_provider", "(", ")", ":", "return", "config", ".", "is_provider_configured", "(", "__opts__", ",", "__active_provider_name__", "or", "__virtualname__", ",", "(", "'auth'", ",", "'region_name'", ")", ",", "log_message", "=", "False", ",", ")"...
34.272727
16.090909
def start_monitor(self): """Start the monitor.""" stdout_file, stderr_file = self.new_log_files("monitor") process_info = ray.services.start_monitor( self._redis_address, stdout_file=stdout_file, stderr_file=stderr_file, autoscaling_config=self._ray_params.autoscaling_config, redis_password=self._ray_params.redis_password) assert ray_constants.PROCESS_TYPE_MONITOR not in self.all_processes self.all_processes[ray_constants.PROCESS_TYPE_MONITOR] = [process_info]
[ "def", "start_monitor", "(", "self", ")", ":", "stdout_file", ",", "stderr_file", "=", "self", ".", "new_log_files", "(", "\"monitor\"", ")", "process_info", "=", "ray", ".", "services", ".", "start_monitor", "(", "self", ".", "_redis_address", ",", "stdout_fi...
50.363636
16.909091
def _get_hashed_path(self, path): """Returns an md5 hash for the specified file path.""" return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
[ "def", "_get_hashed_path", "(", "self", ",", "path", ")", ":", "return", "self", ".", "_get_path", "(", "'%s.pkl'", "%", "hashlib", ".", "md5", "(", "path", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", ")" ]
60.666667
18
def new_result(self, data_mode='value', time_mode='framewise'): ''' Create a new result Attributes ---------- data_object : MetadataObject id_metadata : MetadataObject audio_metadata : MetadataObject frame_metadata : MetadataObject label_metadata : MetadataObject parameters : dict ''' from datetime import datetime result = AnalyzerResult(data_mode=data_mode, time_mode=time_mode) # Automatically write known metadata result.id_metadata.date = datetime.now().replace( microsecond=0).isoformat(' ') result.id_metadata.version = timeside.core.__version__ result.id_metadata.author = 'TimeSide' result.id_metadata.id = self.id() result.id_metadata.name = self.name() result.id_metadata.description = self.description() result.id_metadata.unit = self.unit() result.id_metadata.proc_uuid = self.uuid() result.audio_metadata.uri = self.mediainfo()['uri'] result.audio_metadata.sha1 = self.mediainfo()['sha1'] result.audio_metadata.start = self.mediainfo()['start'] result.audio_metadata.duration = self.mediainfo()['duration'] result.audio_metadata.is_segment = self.mediainfo()['is_segment'] result.audio_metadata.channels = self.channels() result.parameters = Parameters(self.get_parameters()) if time_mode == 'framewise': result.data_object.frame_metadata.samplerate = self.result_samplerate result.data_object.frame_metadata.blocksize = self.result_blocksize result.data_object.frame_metadata.stepsize = self.result_stepsize return result
[ "def", "new_result", "(", "self", ",", "data_mode", "=", "'value'", ",", "time_mode", "=", "'framewise'", ")", ":", "from", "datetime", "import", "datetime", "result", "=", "AnalyzerResult", "(", "data_mode", "=", "data_mode", ",", "time_mode", "=", "time_mode...
37.586957
20.5
def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. """ version = self.get_metadata_version() if six.PY2: def write_field(key, value): file.write("%s: %s\n" % (key, self._encode_field(value))) else: def write_field(key, value): file.write("%s: %s\n" % (key, value)) write_field('Metadata-Version', str(version)) write_field('Name', self.get_name()) write_field('Version', self.get_version()) write_field('Summary', self.get_description()) write_field('Home-page', self.get_url()) if version < StrictVersion('1.2'): write_field('Author', self.get_contact()) write_field('Author-email', self.get_contact_email()) else: optional_fields = ( ('Author', 'author'), ('Author-email', 'author_email'), ('Maintainer', 'maintainer'), ('Maintainer-email', 'maintainer_email'), ) for field, attr in optional_fields: attr_val = getattr(self, attr) if attr_val is not None: write_field(field, attr_val) write_field('License', self.get_license()) if self.download_url: write_field('Download-URL', self.download_url) for project_url in self.project_urls.items(): write_field('Project-URL', '%s, %s' % project_url) long_desc = rfc822_escape(self.get_long_description()) write_field('Description', long_desc) keywords = ','.join(self.get_keywords()) if keywords: write_field('Keywords', keywords) if version >= StrictVersion('1.2'): for platform in self.get_platforms(): write_field('Platform', platform) else: self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) # PEP 314 self._write_list(file, 'Requires', self.get_requires()) self._write_list(file, 'Provides', self.get_provides()) self._write_list(file, 'Obsoletes', self.get_obsoletes()) # Setuptools specific for PEP 345 if hasattr(self, 'python_requires'): write_field('Requires-Python', self.python_requires) # PEP 566 if self.long_description_content_type: write_field( 'Description-Content-Type', self.long_description_content_type ) if self.provides_extras: for extra in self.provides_extras: write_field('Provides-Extra', extra)
[ "def", "write_pkg_file", "(", "self", ",", "file", ")", ":", "version", "=", "self", ".", "get_metadata_version", "(", ")", "if", "six", ".", "PY2", ":", "def", "write_field", "(", "key", ",", "value", ")", ":", "file", ".", "write", "(", "\"%s: %s\\n\...
32.72973
16.567568
def _convert_nominal_form( analysis ): ''' Converts nominal categories of the input analysis. Performs one-to-one conversions only. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' for idx, pattern_items in enumerate(_noun_conversion_rules): pattern_str, replacement = pattern_items if pattern_str in analysis[FORM]: analysis[FORM] = analysis[FORM].replace( pattern_str, replacement ) return analysis
[ "def", "_convert_nominal_form", "(", "analysis", ")", ":", "assert", "FORM", "in", "analysis", ",", "'(!) The input analysis does not contain \"'", "+", "FORM", "+", "'\" key.'", "for", "idx", ",", "pattern_items", "in", "enumerate", "(", "_noun_conversion_rules", ")"...
53.555556
18.666667
def run(self, conf, arg, err): """ WeldContext is currently hidden from the Python API. We create a new context per Weld run and give ownership of it to the resulting value. NOTE: This can leak the context if the result of the Weld run is an error. """ weld_context_new = weld.weld_context_new weld_context_new.argtypes = [c_weld_conf] weld_context_new.restype = c_weld_context ctx = weld_context_new(conf.conf) weld_module_run = weld.weld_module_run # module, context, arg, &err weld_module_run.argtypes = [ c_weld_module, c_weld_context, c_weld_value, c_weld_err] weld_module_run.restype = c_weld_value ret = weld_module_run(self.module, ctx, arg.val, err.error) return WeldValue(ret, assign=True, _ctx=ctx)
[ "def", "run", "(", "self", ",", "conf", ",", "arg", ",", "err", ")", ":", "weld_context_new", "=", "weld", ".", "weld_context_new", "weld_context_new", ".", "argtypes", "=", "[", "c_weld_conf", "]", "weld_context_new", ".", "restype", "=", "c_weld_context", ...
41.6
16.9
def staticMovingAverage2(x, N=3, mode='reflect'): """ moving average filter for 1d arrays supported modes for boundary handling: 'reflect' , 'constant' """ assert N > 1 x2 = np.empty(shape=x.shape[0] + N, dtype=x.dtype) start = N - 2 if N == 2: start = 1 end = N - start x2[start:-end] = x # boundaries if mode == 'reflect': x2[:start] = x[0] + x[0] - x[start - 1::-1] x2[-end:] = x[-1] + x[-1] - x[-2:-end - 2:-1] elif mode == 'nearest': x2[:start] = x[0] x2[-end:] = x[-1] else: raise NotImplementedError("mode='%s' not supported" % mode) a1 = np.cumsum(x2) a1 = (a1[N:] - a1[:-N]) / N a2 = np.cumsum(x2[::-1]) a2 = (a2[N:] - a2[:-N]) / N return 0.5 * (a1 + a2[::-1]) # TODO: unreachable code cumsum = np.cumsum(x2) return (cumsum[N:] - cumsum[:-N]) / N
[ "def", "staticMovingAverage2", "(", "x", ",", "N", "=", "3", ",", "mode", "=", "'reflect'", ")", ":", "assert", "N", ">", "1", "x2", "=", "np", ".", "empty", "(", "shape", "=", "x", ".", "shape", "[", "0", "]", "+", "N", ",", "dtype", "=", "x...
25.382353
18.558824
def repair(source, validate_archive=False): """Use auditwheel (https://github.com/pypa/auditwheel) to attempt and repair all wheels in a wagon. The repair process will: 1. Extract the wagon and its metadata 2. Repair all wheels 3. Update the metadata with the new wheel names and platform 4. Repack the wagon """ _assert_auditwheel_exists() logger.info('Repairing: %s', source) processed_source = get_source(source) metadata = _get_metadata(processed_source) new_metadata = _repair_wheels(processed_source, metadata) archive_name = _set_archive_name( new_metadata['package_name'], new_metadata['package_version'], new_metadata['supported_python_versions'], new_metadata['supported_platform'], new_metadata['build_tag']) _generate_metadata_file( processed_source, archive_name, new_metadata['supported_platform'], new_metadata['supported_python_versions'], new_metadata['package_name'], new_metadata['package_version'], new_metadata['build_tag'], new_metadata['package_source'], new_metadata['wheels']) archive_path = os.path.join(os.getcwd(), archive_name) _create_wagon_archive(processed_source, archive_path) if validate_archive: validate(archive_path) logger.info('Wagon created successfully at: %s', archive_path) return archive_path
[ "def", "repair", "(", "source", ",", "validate_archive", "=", "False", ")", ":", "_assert_auditwheel_exists", "(", ")", "logger", ".", "info", "(", "'Repairing: %s'", ",", "source", ")", "processed_source", "=", "get_source", "(", "source", ")", "metadata", "=...
33.380952
13.452381
async def teardown_conn(self, context): """Teardown a connection from a client.""" client_id = context.user_data self._logger.info("Tearing down client connection: %s", client_id) if client_id not in self.clients: self._logger.warning("client_id %s did not exist in teardown_conn", client_id) else: del self.clients[client_id]
[ "async", "def", "teardown_conn", "(", "self", ",", "context", ")", ":", "client_id", "=", "context", ".", "user_data", "self", ".", "_logger", ".", "info", "(", "\"Tearing down client connection: %s\"", ",", "client_id", ")", "if", "client_id", "not", "in", "s...
38.3
19.7
def load_dict(self, source, namespace=''): """ Load values from a dictionary structure. Nesting can be used to represent namespaces. >>> c = ConfigDict() >>> c.load_dict({'some': {'namespace': {'key': 'value'} } }) {'some.namespace.key': 'value'} """ for key, value in source.items(): if isinstance(key, str): nskey = (namespace + '.' + key).strip('.') if isinstance(value, dict): self.load_dict(value, namespace=nskey) else: self[nskey] = value else: raise TypeError('Key has type %r (not a string)' % type(key)) return self
[ "def", "load_dict", "(", "self", ",", "source", ",", "namespace", "=", "''", ")", ":", "for", "key", ",", "value", "in", "source", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "str", ")", ":", "nskey", "=", "(", "namespace", ...
39.833333
13.166667
def _psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False): """ Z - MxQ mu - NxQ S - NxQ """ variance, lengthscale = kern.variance, kern.lengthscale N,M,Q = self.get_dimensions(Z, variational_posterior) self._initGPUCache(N,M,Q) self.sync_params(lengthscale, Z, variational_posterior.mean, variational_posterior.variance) psi1_gpu = self.gpuCache['psi1_gpu'] psi2_gpu = self.gpuCache['psi2_gpu'] psi2n_gpu = self.gpuCache['psi2n_gpu'] l_gpu = self.gpuCache['l_gpu'] Z_gpu = self.gpuCache['Z_gpu'] mu_gpu = self.gpuCache['mu_gpu'] S_gpu = self.gpuCache['S_gpu'] log_denom1_gpu = self.gpuCache['log_denom1_gpu'] log_denom2_gpu = self.gpuCache['log_denom2_gpu'] psi0 = np.empty((N,)) psi0[:] = variance self.g_psi1computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi1_gpu.gpudata, log_denom1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q)) self.g_psi2computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi2_gpu.gpudata, psi2n_gpu.gpudata, log_denom2_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q)) # t = self.g_psi1computations(psi1_gpu, log_denom1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True) # print 'g_psi1computations '+str(t) # t = self.g_psi2computations(psi2_gpu, psi2n_gpu, log_denom2_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True) # print 'g_psi2computations '+str(t) if self.GPU_direct: return psi0, psi1_gpu, psi2_gpu else: if return_psi2_n: return psi0, psi1_gpu.get(), psi2n_gpu.get() else: return psi0, psi1_gpu.get(), psi2_gpu.get()
[ "def", "_psicomputations", "(", "self", ",", "kern", ",", "Z", ",", "variational_posterior", ",", "return_psi2_n", "=", "False", ")", ":", "variance", ",", "lengthscale", "=", "kern", ".", "variance", ",", "kern", ".", "lengthscale", "N", ",", "M", ",", ...
58.567568
35.756757
def parse_code(self): """ Read the source code and return all the import statements. Returns: list of dict: the import statements. """ code = open(self.path, encoding='utf-8').read() try: body = ast.parse(code).body except SyntaxError: try: code = code.encode('utf-8') body = ast.parse(code).body except SyntaxError: return [] return self.get_imports(body)
[ "def", "parse_code", "(", "self", ")", ":", "code", "=", "open", "(", "self", ".", "path", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "try", ":", "body", "=", "ast", ".", "parse", "(", "code", ")", ".", "body", "except", "Syntax...
29.470588
13.588235
def is_multifile_object_without_children(self, location: str) -> bool: """ Returns True if an item with this location is present as a multifile object without children. For this implementation, this means that there is a file with the appropriate name but without extension :param location: :return: """ # (1) Find the base directory and base name if isdir(location): # special case: parent location is the root folder where all the files are. return len(self.find_multifile_object_children(location)) == 0 else: # TODO same comment than in find_multifile_object_children if exists(location): # location is a file without extension. We can accept that as being a multifile object without children return True else: return False
[ "def", "is_multifile_object_without_children", "(", "self", ",", "location", ":", "str", ")", "->", "bool", ":", "# (1) Find the base directory and base name", "if", "isdir", "(", "location", ")", ":", "# special case: parent location is the root folder where all the files are....
48.888889
30.222222
def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None): """Generate a DownloadJob that actually triggers a file download.""" pattern = NgdConfig.get_fileending(filetype) filename, expected_checksum = get_name_and_checksum(checksums, pattern) base_url = convert_ftp_url(entry['ftp_path']) full_url = '{}/{}'.format(base_url, filename) local_file = os.path.join(directory, filename) full_symlink = None if symlink_path is not None: full_symlink = os.path.join(symlink_path, filename) # Keep metadata around mtable = metadata.get() mtable.add(entry, local_file) return DownloadJob(full_url, local_file, expected_checksum, full_symlink)
[ "def", "download_file_job", "(", "entry", ",", "directory", ",", "checksums", ",", "filetype", "=", "'genbank'", ",", "symlink_path", "=", "None", ")", ":", "pattern", "=", "NgdConfig", ".", "get_fileending", "(", "filetype", ")", "filename", ",", "expected_ch...
44.375
19.75
def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))] return 'random_uniform', new_attrs, inputs
[ "def", "random_uniform", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "try", ":", "from", "onnx", ".", "mapping", "import", "TENSOR_TYPE_TO_NP_TYPE", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Onnx and protobuf need to be installed. \"",...
53.4
21.4
def fact(name, puppet=False): ''' Run facter for a specific fact CLI Example: .. code-block:: bash salt '*' puppet.fact kernel ''' opt_puppet = '--puppet' if puppet else '' ret = __salt__['cmd.run_all']( 'facter {0} {1}'.format(opt_puppet, name), python_shell=False) if ret['retcode'] != 0: raise CommandExecutionError(ret['stderr']) if not ret['stdout']: return '' return ret['stdout']
[ "def", "fact", "(", "name", ",", "puppet", "=", "False", ")", ":", "opt_puppet", "=", "'--puppet'", "if", "puppet", "else", "''", "ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'facter {0} {1}'", ".", "format", "(", "opt_puppet", ",", "name", "...
21.857143
20.904762
def get_project(self, project_id): """ Get project info """ try: result = self._request('/getproject/', {'projectid': project_id}) return TildaProject(**result) except NetworkError: return []
[ "def", "get_project", "(", "self", ",", "project_id", ")", ":", "try", ":", "result", "=", "self", ".", "_request", "(", "'/getproject/'", ",", "{", "'projectid'", ":", "project_id", "}", ")", "return", "TildaProject", "(", "*", "*", "result", ")", "exce...
34.875
12.125
def jr6_jr6(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", specnum=1, samp_con='1', location='unknown', lat='', lon='', noave=False, meth_code="LP-NO", volume=12, JR=False, user=""): """ Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY """ version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) specnum = - int(specnum) samp_con = str(samp_con) volume = float(volume) * 1e-6 # need to add these meas_file = pmag.resolve_file_name(meas_file, output_dir_path) spec_file = pmag.resolve_file_name(spec_file, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, output_dir_path) site_file = pmag.resolve_file_name(site_file, output_dir_path) loc_file = pmag.resolve_file_name(loc_file, output_dir_path) mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if JR: if meth_code == "LP-NO": meth_code = "" meth_code = meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code = meth_code.strip(":") samp_con = '5' # format variables tmp_file = mag_file.split(os.extsep)[0]+os.extsep+'tmp' mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if samp_con.startswith("4"): if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "4" elif samp_con.startswith("7"): if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "7" else: Z = 1 # parse data # fix .jr6 file so that there are spaces between all the columns. pre_data = open(mag_file, 'r') tmp_data = open(tmp_file, 'w') if samp_con != '2': fixed_data = pre_data.read().replace('-', ' -') else: fixed_data = "" for line in pre_data.readlines(): entries = line.split() if len(entries) < 2: continue fixed_line = entries[0] + ' ' + reduce( lambda x, y: x+' '+y, [x.replace('-', ' -') for x in entries[1:]]) fixed_data += fixed_line+os.linesep tmp_data.write(fixed_data) tmp_data.close() pre_data.close() if not JR: column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] else: # measured on the Joides Resolution JR6 column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if isinstance(data['x'][0], str): column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if JR: data['z'] = -data['negz'] cart = np.array([data['x'], data['y'], data['z']]).transpose() dir_dat = pmag.cart2dir(cart).transpose() data['dir_dec'] = dir_dat[0] data['dir_inc'] = dir_dat[1] # the data are in A/m - this converts to Am^2 data['magn_moment'] = dir_dat[2]*(10.0**data['expon'])*volume data['magn_volume'] = dir_dat[2] * \ (10.0**data['expon']) # A/m - data in A/m data['dip'] = -data['dip'] data['specimen'] # put data into magic tables MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] for rowNum, row in data.iterrows(): MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} specimen = row['specimen'] if specnum != 0: sample = specimen[:specnum] else: sample = specimen site = pmag.parse_site(sample, samp_con, Z) if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec["citations"] = "This study" SpecRec["analysts"] = user SpecRec['volume'] = volume SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec["citations"] = "This study" SampRec["analysts"] = user SampRec['azimuth'] = row['azimuth'] SampRec['dip'] = row['dip'] SampRec['bed_dip_direction'] = row['bed_dip_direction'] SampRec['bed_dip'] = row['bed_dip'] SampRec['method_codes'] = meth_code SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec["citations"] = "This study" SiteRec["analysts"] = user SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec["citations"] = "This study" LocRec["analysts"] = user LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) MeasRec["citations"] = "This study" MeasRec["analysts"] = user MeasRec["specimen"] = specimen MeasRec['software_packages'] = version_num MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' MeasRec["treat_step_num"] = 0 MeasRec["treat_ac_field"] = '0' if row['step'] == 'NRM': meas_type = "LT-NO" elif 'step_unit' in row and row['step_unit'] == 'C': meas_type = "LT-T-Z" treat = float(row['step']) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0:2] == 'AD': meas_type = "LT-AF-Z" treat = float(row['step'][2:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'A': meas_type = "LT-AF-Z" treat = float(row['step'][1:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'TD': meas_type = "LT-T-Z" treat = float(row['step'][2:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0] == 'T': meas_type = "LT-T-Z" treat = float(row['step'][1:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin else: # need to add IRM, and ARM options print("measurement type unknown", row['step']) return False, "measurement type unknown" MeasRec["magn_moment"] = str(row['magn_moment']) MeasRec["magn_volume"] = str(row['magn_volume']) MeasRec["dir_dec"] = str(row['dir_dec']) MeasRec["dir_inc"] = str(row['dir_inc']) MeasRec['method_codes'] = meas_type MagRecs.append(MeasRec) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MagRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file) con.tables['samples'].write_magic_file(custom_name=samp_file) con.tables['sites'].write_magic_file(custom_name=site_file) con.tables['locations'].write_magic_file(custom_name=loc_file) con.tables['measurements'].write_magic_file(custom_name=meas_file) try: os.remove(tmp_file) except (OSError, IOError) as e: print("couldn't remove temperary fixed JR6 file %s" % tmp_file) return True, meas_file
[ "def", "jr6_jr6", "(", "mag_file", ",", "dir_path", "=", "\".\"", ",", "input_dir_path", "=", "\"\"", ",", "meas_file", "=", "\"measurements.txt\"", ",", "spec_file", "=", "\"specimens.txt\"", ",", "samp_file", "=", "\"samples.txt\"", ",", "site_file", "=", "\"s...
42.581749
20.087452
def community_topic_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/topics#show-topic" api_path = "/api/v2/community/topics/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "community_topic_show", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/community/topics/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", ...
53.6
13.6
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None): """Initialize the state from the encoder outputs. Parameters ---------- encoder_outputs : list encoder_valid_length : NDArray or None Returns ------- decoder_states : list The decoder states, includes: - rnn_states : NDArray - attention_vec : NDArray - mem_value : NDArray - mem_masks : NDArray, optional """ mem_value, rnn_states = encoder_outputs batch_size, _, mem_size = mem_value.shape attention_vec = mx.nd.zeros(shape=(batch_size, mem_size), ctx=mem_value.context) decoder_states = [rnn_states, attention_vec, mem_value] mem_length = mem_value.shape[1] if encoder_valid_length is not None: mem_masks = mx.nd.broadcast_lesser( mx.nd.arange(mem_length, ctx=encoder_valid_length.context).reshape((1, -1)), encoder_valid_length.reshape((-1, 1))) decoder_states.append(mem_masks) return decoder_states
[ "def", "init_state_from_encoder", "(", "self", ",", "encoder_outputs", ",", "encoder_valid_length", "=", "None", ")", ":", "mem_value", ",", "rnn_states", "=", "encoder_outputs", "batch_size", ",", "_", ",", "mem_size", "=", "mem_value", ".", "shape", "attention_v...
37.965517
16.517241
def targeted_dropout(inputs, k, keep_prob, targeting_fn, is_training, do_prune=False): """Applies targeted dropout. Applies dropout at a rate of `1 - keep_prob` to only those elements of `inputs` marked by `targeting_fn`. See below and paper for more detail: "Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang, Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton. Args: inputs: Tensor, inputs to apply targeted dropout to. k: Scalar Tensor or python scalar, sets the number of elements to target in `inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with second argument of `targeting_fn`. keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument. targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a boolean mask the same shape as `inputs` where True indicates an element will be dropped, and False not. is_training: bool, indicates whether currently training. do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)` elements of `inputs` expected to be dropped each forwards pass. Returns: Tensor, same shape and dtype as `inputs`. """ if not is_training and do_prune: k = tf.round(to_float(k) * to_float(1. - keep_prob)) mask = targeting_fn(inputs, k) mask = tf.cast(mask, inputs.dtype) if is_training: return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask elif do_prune: return inputs * (1 - mask) else: return inputs
[ "def", "targeted_dropout", "(", "inputs", ",", "k", ",", "keep_prob", ",", "targeting_fn", ",", "is_training", ",", "do_prune", "=", "False", ")", ":", "if", "not", "is_training", "and", "do_prune", ":", "k", "=", "tf", ".", "round", "(", "to_float", "("...
37.619048
22.071429
def update_roles(self, roles='view'): """ Updates the roles of this permission :return: Success / Failure :rtype: bool """ if not self.object_id: return False url = self.build_url(self._endpoints.get('permission').format( driveitem_id=self.driveitem_id, id=self.object_id)) if roles in {'view', 'read'}: data = {'roles': ['read']} elif roles == {'edit', 'write'}: data = {'roles': ['write']} else: raise ValueError('"{}" is not a valid share_type'.format(roles)) response = self.con.patch(url, data=data) if not response: return False self.roles = data.get('roles', []) return True
[ "def", "update_roles", "(", "self", ",", "roles", "=", "'view'", ")", ":", "if", "not", "self", ".", "object_id", ":", "return", "False", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'permission'", ")", ".", ...
29.52
17.64
def getComponentByName(self, name, default=noValue, instantiate=True): """Returns |ASN.1| type component by name. Equivalent to Python :class:`dict` subscription operation (e.g. `[]`). Parameters ---------- name: :class:`str` |ASN.1| type component name Keyword Args ------------ default: :class:`object` If set and requested component is a schema object, return the `default` object instead of the requested component. instantiate: :class:`bool` If `True` (default), inner component will be automatically instantiated. If 'False' either existing component or the `noValue` object will be returned. Returns ------- : :py:class:`~pyasn1.type.base.PyAsn1Item` Instantiate |ASN.1| component type or return existing component value """ if self._componentTypeLen: idx = self.componentType.getPositionByName(name) else: try: idx = self._dynamicNames.getPositionByName(name) except KeyError: raise error.PyAsn1Error('Name %s not found' % (name,)) return self.getComponentByPosition(idx, default=default, instantiate=instantiate)
[ "def", "getComponentByName", "(", "self", ",", "name", ",", "default", "=", "noValue", ",", "instantiate", "=", "True", ")", ":", "if", "self", ".", "_componentTypeLen", ":", "idx", "=", "self", ".", "componentType", ".", "getPositionByName", "(", "name", ...
35.361111
25.388889
def copy(self, **replacements): """Returns a clone of this JarDependency with the given replacements kwargs overlaid.""" cls = type(self) kwargs = self._asdict() for key, val in replacements.items(): if key == 'excludes': val = JarDependency._prepare_excludes(val) kwargs[key] = val org = kwargs.pop('org') base_name = kwargs.pop('base_name') return cls(org, base_name, **kwargs)
[ "def", "copy", "(", "self", ",", "*", "*", "replacements", ")", ":", "cls", "=", "type", "(", "self", ")", "kwargs", "=", "self", ".", "_asdict", "(", ")", "for", "key", ",", "val", "in", "replacements", ".", "items", "(", ")", ":", "if", "key", ...
37.909091
8.818182
def wns_send_message( uri, message=None, xml_data=None, raw_data=None, application_id=None, **kwargs ): """ Sends a notification request to WNS. There are four notification types that WNS can send: toast, tile, badge and raw. Toast, tile, and badge can all be customized to use different templates/icons/sounds/launch params/etc. See docs for more information: https://msdn.microsoft.com/en-us/library/windows/apps/br212853.aspx There are multiple ways to input notification data: 1. The simplest and least custom notification to send is to just pass a string to `message`. This will create a toast notification with one text element. e.g.: "This is my notification title" 2. You can also pass a dictionary to `message`: it can only contain one or both keys: ["text", "image"]. The value of each key must be a list with the text and src respectively. e.g.: { "text": ["text1", "text2"], "image": ["src1", "src2"], } 3. Passing a dictionary to `xml_data` will create one of three types of notifications depending on the dictionary data (toast, tile, badge). See `dict_to_xml_schema` docs for more information on dictionary formatting. 4. Passing a value to `raw_data` will create a `raw` notification and send the input data as is. :param uri: str: The device's unique notification uri. :param message: str|dict: The notification data to be sent. :param xml_data: dict: A dictionary containing data to be converted to an xml tree. :param raw_data: str: Data to be sent via a `raw` notification. """ # Create a simple toast notification if message: wns_type = "wns/toast" if isinstance(message, str): message = { "text": [message, ], } prepared_data = _wns_prepare_toast(data=message, **kwargs) # Create a toast/tile/badge notification from a dictionary elif xml_data: xml = dict_to_xml_schema(xml_data) wns_type = "wns/%s" % xml.tag prepared_data = ET.tostring(xml) # Create a raw notification elif raw_data: wns_type = "wns/raw" prepared_data = raw_data else: raise TypeError( "At least one of the following parameters must be set:" "`message`, `xml_data`, `raw_data`" ) return _wns_send( uri=uri, data=prepared_data, wns_type=wns_type, application_id=application_id )
[ "def", "wns_send_message", "(", "uri", ",", "message", "=", "None", ",", "xml_data", "=", "None", ",", "raw_data", "=", "None", ",", "application_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Create a simple toast notification", "if", "message", ":...
34.857143
24.31746
def page(self, status=values.unset, phone_number=values.unset, incoming_phone_number_sid=values.unset, friendly_name=values.unset, unique_name=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of HostedNumberOrderInstance records from the API. Request is executed immediately :param HostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of HostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderPage """ params = values.of({ 'Status': status, 'PhoneNumber': phone_number, 'IncomingPhoneNumberSid': incoming_phone_number_sid, 'FriendlyName': friendly_name, 'UniqueName': unique_name, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return HostedNumberOrderPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "status", "=", "values", ".", "unset", ",", "phone_number", "=", "values", ".", "unset", ",", "incoming_phone_number_sid", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "unique_name", ...
45.421053
23.842105
def get_yaml_decorators(rq): """ Returns the yaml decorator metadata only (this is needed by triple pattern fragments) """ # glogger.debug('Guessing decorators for query {}'.format(rq)) if not rq: return None if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer) yaml_string = rq['grlc'] query_string = rq else: # classic query yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')]) query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')]) query_metadata = None if type(yaml_string) == dict: query_metadata = yaml_string elif type(yaml_string) == str: try: # Invalid YAMLs will produce empty metadata query_metadata = yaml.load(yaml_string) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e: try: query_metadata = json.loads(yaml_string) except json.JSONDecodeError: glogger.warning("Query decorators could not be parsed; check your YAML syntax") # If there is no YAML string if query_metadata is None: query_metadata = {} query_metadata['query'] = query_string # glogger.debug("Parsed query decorators: {}".format(query_metadata)) return query_metadata
[ "def", "get_yaml_decorators", "(", "rq", ")", ":", "# glogger.debug('Guessing decorators for query {}'.format(rq))", "if", "not", "rq", ":", "return", "None", "if", "isinstance", "(", "rq", ",", "dict", ")", "and", "'grlc'", "in", "rq", ":", "# json query (sparql tr...
38.057143
22.685714
def open_tablebase(directory: PathLike, *, load_wdl: bool = True, load_dtz: bool = True, max_fds: Optional[int] = 128, VariantBoard: Type[chess.Board] = chess.Board) -> Tablebase: """ Opens a collection of tables for probing. See :class:`~chess.syzygy.Tablebase`. .. note:: Generally probing requires tablebase files for the specific material composition, **as well as** tablebase files with less pieces. This is important because 6-piece and 5-piece files are often distributed seperately, but are both required for 6-piece positions. Use :func:`~chess.syzygy.Tablebase.add_directory()` to load tables from additional directories. """ tables = Tablebase(max_fds=max_fds, VariantBoard=VariantBoard) tables.add_directory(directory, load_wdl=load_wdl, load_dtz=load_dtz) return tables
[ "def", "open_tablebase", "(", "directory", ":", "PathLike", ",", "*", ",", "load_wdl", ":", "bool", "=", "True", ",", "load_dtz", ":", "bool", "=", "True", ",", "max_fds", ":", "Optional", "[", "int", "]", "=", "128", ",", "VariantBoard", ":", "Type", ...
49.882353
29.411765
def register(cls, namespace, name): """Class decorator""" def func(kind): cls._FOREIGN[(namespace, name)] = kind() return kind return func
[ "def", "register", "(", "cls", ",", "namespace", ",", "name", ")", ":", "def", "func", "(", "kind", ")", ":", "cls", ".", "_FOREIGN", "[", "(", "namespace", ",", "name", ")", "]", "=", "kind", "(", ")", "return", "kind", "return", "func" ]
25.857143
16
def is_unitary(self, atol=None, rtol=None): """Return True if QuantumChannel is a unitary channel.""" try: op = self.to_operator() return op.is_unitary(atol=atol, rtol=rtol) except QiskitError: return False
[ "def", "is_unitary", "(", "self", ",", "atol", "=", "None", ",", "rtol", "=", "None", ")", ":", "try", ":", "op", "=", "self", ".", "to_operator", "(", ")", "return", "op", ".", "is_unitary", "(", "atol", "=", "atol", ",", "rtol", "=", "rtol", ")...
37.142857
11.285714
def add_line(psr,f,A,offset=0.5): """ Add a line of frequency `f` [Hz] and amplitude `A` [s], with origin at a fraction `offset` through the dataset. """ t = psr.toas() t0 = offset * (N.max(t) - N.min(t)) sine = A * N.cos(2 * math.pi * f * day * (t - t0)) psr.stoas[:] += sine / day
[ "def", "add_line", "(", "psr", ",", "f", ",", "A", ",", "offset", "=", "0.5", ")", ":", "t", "=", "psr", ".", "toas", "(", ")", "t0", "=", "offset", "*", "(", "N", ".", "max", "(", "t", ")", "-", "N", ".", "min", "(", "t", ")", ")", "si...
28.181818
15.272727
def parse_sgf_to_examples(sgf_path): """Return supervised examples from positions NOTE: last move is not played because no p.next_move after. """ return zip(*[(p.position, p.next_move, p.result) for p in sgf_wrapper.replay_sgf_file(sgf_path)])
[ "def", "parse_sgf_to_examples", "(", "sgf_path", ")", ":", "return", "zip", "(", "*", "[", "(", "p", ".", "position", ",", "p", ".", "next_move", ",", "p", ".", "result", ")", "for", "p", "in", "sgf_wrapper", ".", "replay_sgf_file", "(", "sgf_path", ")...
33.875
18
def release_address(self, address): """ Release a previously allocated address returned by C{allocate_address}. @return: C{True} if the operation succeeded. """ query = self.query_factory( action="ReleaseAddress", creds=self.creds, endpoint=self.endpoint, other_params={"PublicIp": address}) d = query.submit() return d.addCallback(self.parser.truth_return)
[ "def", "release_address", "(", "self", ",", "address", ")", ":", "query", "=", "self", ".", "query_factory", "(", "action", "=", "\"ReleaseAddress\"", ",", "creds", "=", "self", ".", "creds", ",", "endpoint", "=", "self", ".", "endpoint", ",", "other_param...
38.909091
15.818182
def _initstr(modname, imports, from_imports, inject_execstr, withheader=True): """ Calls the other string makers """ header = _make_module_header() if withheader else '' import_str = _make_imports_str(imports, modname) fromimport_str = _make_fromimport_str(from_imports, modname) initstr = '\n'.join([str_ for str_ in [ header, import_str, fromimport_str, inject_execstr, ] if len(str_) > 0]) return initstr
[ "def", "_initstr", "(", "modname", ",", "imports", ",", "from_imports", ",", "inject_execstr", ",", "withheader", "=", "True", ")", ":", "header", "=", "_make_module_header", "(", ")", "if", "withheader", "else", "''", "import_str", "=", "_make_imports_str", "...
39
18.583333
def close(self): """close() Disconnects the object from the bus. """ os.close(self._fd) self._fd = -1 self._addr = -1 self._pec = 0
[ "def", "close", "(", "self", ")", ":", "os", ".", "close", "(", "self", ".", "_fd", ")", "self", ".", "_fd", "=", "-", "1", "self", ".", "_addr", "=", "-", "1", "self", ".", "_pec", "=", "0" ]
20
15.222222
def name(self): """ Similar to :attr:`complete`, but return the whole word, for example:: subrout would return `subroutine`. """ if hasattr(self._element, "name"): return self._element.name else: return str(self._element)
[ "def", "name", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "_element", ",", "\"name\"", ")", ":", "return", "self", ".", "_element", ".", "name", "else", ":", "return", "str", "(", "self", ".", "_element", ")" ]
23.307692
16.692308
def load_reconstruction(folder, slice_start=0, slice_end=-1): """Load a volume from folder, also returns the corresponding partition. Parameters ---------- folder : str Path to the folder where the DICOM files are stored. slice_start : int Index of the first slice to use. Used for subsampling. slice_end : int Index of the final slice to use. Returns ------- partition : `odl.RectPartition` Partition describing the geometric positioning of the voxels. data : `numpy.ndarray` Volumetric data. Scaled such that data = 1 for water (0 HU). Notes ----- DICOM data is highly non trivial. Typically, each slice has been computed with a slice tickness (e.g. 3mm) but the slice spacing might be different from that. Further, the coordinates in DICOM is typically the *middle* of the pixel, not the corners as in ODL. This function should handle all of these peculiarities and give a volume with the correct coordinate system attached. """ file_names = sorted([f for f in os.listdir(folder) if f.endswith(".IMA")]) if len(file_names) == 0: raise ValueError('No DICOM files found in {}'.format(folder)) volumes = [] datasets = [] file_names = file_names[slice_start:slice_end] for file_name in tqdm.tqdm(file_names, 'loading volume data'): # read the file dataset = dicom.read_file(folder + '/' + file_name) # Get parameters pixel_size = np.array(dataset.PixelSpacing) pixel_thickness = float(dataset.SliceThickness) rows = dataset.Rows cols = dataset.Columns # Get data array and convert to correct coordinates data_array = np.array(np.frombuffer(dataset.PixelData, 'H'), dtype='float32') data_array = data_array.reshape([cols, rows], order='C') data_array = np.rot90(data_array, -1) # Convert from storage type to densities # TODO: Optimize these computations hu_values = (dataset.RescaleSlope * data_array + dataset.RescaleIntercept) densities = (hu_values + 1000) / 1000 # Store results volumes.append(densities) datasets.append(dataset) voxel_size = np.array(list(pixel_size) + [pixel_thickness]) shape = np.array([rows, cols, len(volumes)]) # Compute geometry parameters mid_pt = (np.array(dataset.ReconstructionTargetCenterPatient) - np.array(dataset.DataCollectionCenterPatient)) reconstruction_size = (voxel_size * shape) min_pt = mid_pt - reconstruction_size / 2 max_pt = mid_pt + reconstruction_size / 2 # axis 1 has reversed convention min_pt[1], max_pt[1] = -max_pt[1], -min_pt[1] if len(datasets) > 1: slice_distance = np.abs( float(datasets[1].DataCollectionCenterPatient[2]) - float(datasets[0].DataCollectionCenterPatient[2])) else: # If we only have one slice, we must approximate the distance. slice_distance = pixel_thickness # The middle of the minimum/maximum slice can be computed from the # DICOM attribute "DataCollectionCenterPatient". Since ODL uses corner # points (e.g. edge of volume) we need to add half a voxel thickness to # both sides. min_pt[2] = -np.array(datasets[0].DataCollectionCenterPatient)[2] min_pt[2] -= 0.5 * slice_distance max_pt[2] = -np.array(datasets[-1].DataCollectionCenterPatient)[2] max_pt[2] += 0.5 * slice_distance partition = odl.uniform_partition(min_pt, max_pt, shape) volume = np.transpose(np.array(volumes), (1, 2, 0)) return partition, volume
[ "def", "load_reconstruction", "(", "folder", ",", "slice_start", "=", "0", ",", "slice_end", "=", "-", "1", ")", ":", "file_names", "=", "sorted", "(", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "folder", ")", "if", "f", ".", "endswith", ...
35.411765
22.333333
def list_receivers(self): """ Prints a list of all registered receivers. Including signal, plugin name and description. """ print("Receiver list") print("*************\n") for key, receiver in self.app.signals.receivers.items(): print("%s <-- %s (%s):\n %s\n" % (receiver.name, receiver.signal, receiver.plugin.name, receiver.description))
[ "def", "list_receivers", "(", "self", ")", ":", "print", "(", "\"Receiver list\"", ")", "print", "(", "\"*************\\n\"", ")", "for", "key", ",", "receiver", "in", "self", ".", "app", ".", "signals", ".", "receivers", ".", "items", "(", ")", ":", "pr...
48.272727
19.545455
def _negf(ins): ''' Changes sign of top of the stack (48 bits) ''' output = _float_oper(ins.quad[2]) output.append('call __NEGF') output.extend(_fpush()) REQUIRES.add('negf.asm') return output
[ "def", "_negf", "(", "ins", ")", ":", "output", "=", "_float_oper", "(", "ins", ".", "quad", "[", "2", "]", ")", "output", ".", "append", "(", "'call __NEGF'", ")", "output", ".", "extend", "(", "_fpush", "(", ")", ")", "REQUIRES", ".", "add", "(",...
26.625
15.875
def _check_seismogenic_depths(self, upper_depth, lower_depth): ''' Checks the seismic depths for physical consistency :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenic depth (km) ''' # Simple check on depths if upper_depth: if upper_depth < 0.: raise ValueError('Upper seismogenic depth must be greater than' ' or equal to 0.0!') else: self.upper_depth = upper_depth else: self.upper_depth = 0.0 if not lower_depth: raise ValueError('Lower seismogenic depth must be defined for ' 'simple fault source!') if lower_depth < self.upper_depth: raise ValueError('Lower seismogenic depth must take a greater' ' value than upper seismogenic depth') self.lower_depth = lower_depth
[ "def", "_check_seismogenic_depths", "(", "self", ",", "upper_depth", ",", "lower_depth", ")", ":", "# Simple check on depths", "if", "upper_depth", ":", "if", "upper_depth", "<", "0.", ":", "raise", "ValueError", "(", "'Upper seismogenic depth must be greater than'", "'...
36.740741
18.666667
def get_cached_colour(element, orbital, colours=None, cache=None): """Get a colour for a particular elemental and orbital combination. If the element is not specified in the colours dictionary, the cache is checked. If this element-orbital combination has not been chached before, a new colour is drawn from the current matplotlib colour cycle and cached. The default cache is sumo.plotting.colour_cache. To reset this cache, use ``sumo.plotting.colour_cache.clear()``. Args: element (:obj:`str`): The element. orbital (:obj:`str`): The orbital. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. cache (:obj:`dict`, optional): Cache of colour values already assigned. The format is the same as the custom colours dict. If None, the module-level cache ``sumo.plotting.colour_cache`` is used. Returns: tuple: (colour, cache) """ if cache is None: cache = colour_cache def _get_colour_with_cache(element, orbital, cache, colour_series): """Return cached colour if available, or fetch and cache from cycle""" from itertools import chain if element in cache and orbital in cache[element]: return cache[element][orbital], cache else: # Iterate through colours to find one which is unused for colour in colour_series: # Iterate through cache to check if colour already used if colour not in chain(*[[col for _, col in orb.items()] for _, orb in cache.items()]): break else: raise Exception('Not enough colours available for orbitals! ' 'Try a different theme.') if element not in cache: cache[element] = {} cache[element].update({orbital: colour}) return colour, cache colour_series = matplotlib.rcParams['axes.prop_cycle'].by_key()['color'] if isinstance(colours, configparser.ConfigParser): try: return colours.get(element, orbital), cache except(configparser.NoSectionError, configparser.NoOptionError): return _get_colour_with_cache(element, orbital, cache, colour_series) elif isinstance(colours, dict): try: return colours[element][orbital] except KeyError: return _get_colour_with_cache(element, orbital, cache, colour_series) elif colours is None: return _get_colour_with_cache(element, orbital, cache, colour_series) else: raise TypeError('Argument "colours" should be dict, ' 'ConfigParser or None.')
[ "def", "get_cached_colour", "(", "element", ",", "orbital", ",", "colours", "=", "None", ",", "cache", "=", "None", ")", ":", "if", "cache", "is", "None", ":", "cache", "=", "colour_cache", "def", "_get_colour_with_cache", "(", "element", ",", "orbital", "...
39.936709
23.253165