code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def setPhysicalMinimum(self, edfsignal, physical_minimum): if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_min'] = physical_minimum self.update_header()
Sets the physical_minimum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_minimum: float Sets the physical minimum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
def scan(self): self.logger.info('{0} registered scan functions, starting {0} threads ' 'to scan candidate proxy lists...' .format(len(self.scan_funcs))) for i in range(len(self.scan_funcs)): t = threading.Thread( name=self.scan_funcs[i].__name__, target=self.scan_funcs[i], kwargs=self.scan_kwargs[i]) t.daemon = True self.scan_threads.append(t) t.start()
Start a thread for each registered scan function to scan proxy lists
def _remove_trailing_new_line(l): for n in sorted(new_lines_bytes, key=lambda x: len(x), reverse=True): if l.endswith(n): remove_new_line = slice(None, -len(n)) return l[remove_new_line] return l
Remove a single instance of new line at the end of l if it exists. Returns: bytestring
def get_keyid(keyname): if not keyname: return None keypairs = list_keypairs(call='function') keyid = keypairs[keyname]['id'] if keyid: return keyid raise SaltCloudNotFound('The specified ssh key could not be found.')
Return the ID of the keyname
def get_app_state(app_id): try: conn = get_conn() c = conn.cursor() c.execute("SELECT state FROM app WHERE id='{0}' ".format(app_id)) result = c.fetchone() conn.close() if result: state = result[0] return state else: return None except Exception,e: raise RuntimeError('get app state failed! %s' % e)
get app state
def path(self, args, kw): params = self._pop_params(args, kw) if args or kw: raise InvalidArgumentError("Extra parameters (%s, %s) when building path for %s" % (args, kw, self.template)) return self.build_url(**params)
Builds the URL path fragment for this route.
def remove(self, fieldspec): pattern = r'(?P<field>[^.]+)(.(?P<subfield>[^.]+))?' match = re.match(pattern, fieldspec) if not match: return None grp = match.groupdict() for field in self.get_fields(grp['field']): if grp['subfield']: updated = [] for code, value in pairwise(field.subfields): if not code == grp['subfield']: updated += [code, value] if not updated: self.remove_field(field) else: field.subfields = updated else: self.remove_field(field)
Removes fields or subfields according to `fieldspec`. If a non-control field subfield removal leaves no other subfields, delete the field entirely.
def create(self, **kwargs): url_str = self.base_url if 'tenant_id' in kwargs: url_str = url_str + '?tenant_id=%s' % kwargs['tenant_id'] del kwargs['tenant_id'] data = kwargs['jsonbody'] if 'jsonbody' in kwargs else kwargs body = self.client.create(url=url_str, json=data) return body
Create a metric.
def get_clean_interp_index(arr, dim, use_coordinate=True, **kwargs): if use_coordinate: if use_coordinate is True: index = arr.get_index(dim) else: index = arr.coords[use_coordinate] if index.ndim != 1: raise ValueError( 'Coordinates used for interpolation must be 1D, ' '%s is %dD.' % (use_coordinate, index.ndim)) try: index = index.values.astype(np.float64) except (TypeError, ValueError): raise TypeError('Index must be castable to float64 to support' 'interpolation, got: %s' % type(index)) if not (np.diff(index) > 0).all(): raise ValueError("Index must be monotonicly increasing") else: axis = arr.get_axis_num(dim) index = np.arange(arr.shape[axis], dtype=np.float64) return index
get index to use for x values in interpolation. If use_coordinate is True, the coordinate that shares the name of the dimension along which interpolation is being performed will be used as the x values. If use_coordinate is False, the x values are set as an equally spaced sequence.
def gets(self): ret = self.stdin.readline() if ret == '': raise EOFError return ret.rstrip('\n')
Read line from stdin. The trailing newline will be omitted. :return: string:
def follow(the_file): with open(the_file) as f: f.seek(0, 2) while True: line = f.readline() if not line: time.sleep(0.1) continue yield line
Follow a given file and yield new lines when they are available, like `tail -f`.
def Reset(self): self.state = "INITIAL" self.state_stack = [] self.buffer = "" self.error = 0 self.verbose = 0 self.processed = 0 self.processed_buffer = ""
Reset the lexer to process a new data feed.
def read_tsv(cls, file_path: str, gene_table: ExpGeneTable = None, encoding: str = 'UTF-8', sep: str = '\t'): matrix = cls(pd.read_csv(file_path, sep=sep, index_col=0, header=0, encoding=encoding)) ind = pd.read_csv(file_path, sep=sep, usecols=[0, ], header=None, skiprows=1, encoding=encoding, na_filter=False) matrix.index = ind.iloc[:, 0] matrix.index.name = 'Genes' if gene_table is not None: matrix = matrix.filter_genes(gene_table.gene_names) return matrix
Read expression matrix from a tab-delimited text file. Parameters ---------- file_path: str The path of the text file. gene_table: `ExpGeneTable` object, optional The set of valid genes. If given, the genes in the text file will be filtered against this set of genes. (None) encoding: str, optional The file encoding. ("UTF-8") sep: str, optional The separator. ("\t") Returns ------- `ExpMatrix` The expression matrix.
def profile_delete(self): self.validate_profile_exists() profile_data = self.profiles.get(self.args.profile_name) fqfn = profile_data.get('fqfn') with open(fqfn, 'r+') as fh: data = json.load(fh) for profile in data: if profile.get('profile_name') == self.args.profile_name: data.remove(profile) fh.seek(0) fh.write(json.dumps(data, indent=2, sort_keys=True)) fh.truncate() if not data: os.remove(fqfn)
Delete an existing profile.
def hash_function(self): assert hasattr(self, 'f1') and hasattr(self, 'f2') f1, f2, g = self.f1, self.f2, self.g def czech_hash(word): v1 = f1(word) v2 = f2(word) return g[v1] + g[v2] return czech_hash
Returns the hash function proper. Ensures that `self` is not bound to the returned closure.
def unescape(self): for i, k in enumerate(self._html_escape_table): v = self._html_escape_table[k] self.obj = self.obj.replace(v, k) return self._wrap(self.obj)
Within an interpolation, evaluation, or escaping, remove HTML escaping that had been previously added.
def call(self, method_path, **kwargs): interface, method = method_path.split('.', 1) return getattr(getattr(self, interface), method)(**kwargs)
Make an API call for specific method :param method_path: format ``Interface.Method`` (e.g. ``ISteamWebAPIUtil.GetServerInfo``) :type method_path: :class:`str` :param kwargs: keyword arguments for the specific method :return: response :rtype: :class:`dict`, :class:`lxml.etree.Element` or :class:`str`
def parse_from_dict(json_dict): history_columns = json_dict['columns'] history_list = MarketHistoryList( upload_keys=json_dict['uploadKeys'], history_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']) region_id = rowset['regionID'] type_id = rowset['typeID'] history_list.set_empty_region(region_id, type_id, generated_at) for row in rowset['rows']: history_kwargs = _columns_to_kwargs( SPEC_TO_KWARG_CONVERSION, history_columns, row) historical_date = parse_datetime(history_kwargs['historical_date']) history_kwargs.update({ 'type_id': type_id, 'region_id': region_id, 'historical_date': historical_date, 'generated_at': generated_at, }) history_list.add_entry(MarketHistoryEntry(**history_kwargs)) return history_list
Given a Unified Uploader message, parse the contents and return a MarketHistoryList instance. :param dict json_dict: A Unified Uploader message as a dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within.
def _init_go2res(**kws): if 'goea_results' in kws: return {res.GO:res for res in kws['goea_results']} if 'go2nt' in kws: return kws['go2nt']
Initialize GOEA results.
def parse(file_path): _, ext = path.splitext(file_path) if ext in ('.yaml', '.yml'): func = yaml.load elif ext == '.json': func = json.load else: raise ValueError("Unrecognized config file type %s" % ext) with open(file_path, 'r') as f: return func(f)
Parse a YAML or JSON file.
def _normalize_file_paths(self, *args): paths = [] for arg in args: if arg is None: continue elif self._is_valid_file(arg): paths.append(arg) elif isinstance(arg, list) and all(self._is_valid_file(_) for _ in arg): paths = paths + arg elif not self.ignore_errors: raise TypeError('Config file paths must be string path or list of paths!') return paths
Returns all given configuration file paths as one list.
def get_shell_folder (name): try: import _winreg as winreg except ImportError: import winreg lm = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) try: key = winreg.OpenKey(lm, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") try: return winreg.QueryValueEx(key, name)[0] finally: key.Close() finally: lm.Close()
Get Windows Shell Folder locations from the registry.
def run(self, cmd): cmd = dict(cmd) client = 'minion' mode = cmd.get('mode', 'async') funparts = cmd.get('fun', '').split('.') if len(funparts) > 2 and funparts[0] in ['wheel', 'runner']: client = funparts[0] cmd['fun'] = '.'.join(funparts[1:]) if not ('token' in cmd or ('eauth' in cmd and 'password' in cmd and 'username' in cmd)): raise EauthAuthenticationError('No authentication credentials given') executor = getattr(self, '{0}_{1}'.format(client, mode)) result = executor(**cmd) return result
Execute the salt command given by cmd dict. cmd is a dictionary of the following form: { 'mode': 'modestring', 'fun' : 'modulefunctionstring', 'kwarg': functionkeywordargdictionary, 'tgt' : 'targetpatternstring', 'tgt_type' : 'targetpatterntype', 'ret' : 'returner namestring', 'timeout': 'functiontimeout', 'arg' : 'functionpositionalarg sequence', 'token': 'salttokenstring', 'username': 'usernamestring', 'password': 'passwordstring', 'eauth': 'eauthtypestring', } Implied by the fun is which client is used to run the command, that is, either the master local minion client, the master runner client, or the master wheel client. The cmd dict items are as follows: mode: either 'sync' or 'asynchronous'. Defaults to 'asynchronous' if missing fun: required. If the function is to be run on the master using either a wheel or runner client then the fun: includes either 'wheel.' or 'runner.' as a prefix and has three parts separated by '.'. Otherwise the fun: specifies a module to be run on a minion via the local minion client. Example: fun of 'wheel.config.values' run with master wheel client fun of 'runner.manage.status' run with master runner client fun of 'test.ping' run with local minion client fun of 'wheel.foobar' run with with local minion client not wheel kwarg: A dictionary of keyword function parameters to be passed to the eventual salt function specified by fun: tgt: Pattern string specifying the targeted minions when the implied client is local tgt_type: Optional target pattern type string when client is local minion. Defaults to 'glob' if missing ret: Optional name string of returner when local minion client. arg: Optional positional argument string when local minion client token: the salt token. Either token: is required or the set of username:, password: , and eauth: username: the salt username. Required if token is missing. password: the user's password. Required if token is missing. eauth: the authentication type such as 'pam' or 'ldap'. Required if token is missing
def get_resources(connection): resp = connection.describe(verbose=False).split('\r\n') resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )] return resources
Do an RTSP-DESCRIBE request, then parse out available resources from the response
def bash_rule(bash, hostnames): if isinstance(bash, dict): return make_fail('bash_rule', error_message="Run this rule with a cluster archive") return make_pass('bash_rule', bash=bash, hostname=hostnames)
Cluster rule to process bash and hostname info ``bash`` and ``hostnames`` are Pandas DataFrames for the facts collected for each host in the cluster. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. Arguments: bash (pandas.DataFrame): Includes facts from ``bash_version`` fact with columns "name" and "version" and one row per host in the cluster. hostnames (pandas.DataFrame): Includes facts from ``get_hostname`` fact with column "hostname" and one row per host in the cluster.
def run_example(example_name, environ): mod = EXAMPLE_MODULES[example_name] register_calendar("YAHOO", get_calendar("NYSE"), force=True) return run_algorithm( initialize=getattr(mod, 'initialize', None), handle_data=getattr(mod, 'handle_data', None), before_trading_start=getattr(mod, 'before_trading_start', None), analyze=getattr(mod, 'analyze', None), bundle='test', environ=environ, **merge({'capital_base': 1e7}, mod._test_args()) )
Run an example module from zipline.examples.
def dafopw(fname): fname = stypes.stringToCharP(fname) handle = ctypes.c_int() libspice.dafopw_c(fname, ctypes.byref(handle)) return handle.value
Open a DAF for subsequent write requests. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafopw_c.html :param fname: Name of DAF to be opened. :type fname: str :return: Handle assigned to DAF. :rtype: int
def set_tags(self, md5, tags): if isinstance(tags, str): tags = [tags] tag_set = set(tags) self.data_store.store_work_results({'tags': list(tag_set)}, 'tags', md5)
Set the tags for this sample
def get_mentions(self, docs=None, sort=False): result = [] if docs: docs = docs if isinstance(docs, (list, tuple)) else [docs] for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class) .filter(mention_class.document_id.in_([doc.id for doc in docs])) .order_by(mention_class.id) .all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) else: for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class).order_by(mention_class.id).all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) return result
Return a list of lists of the mentions associated with this extractor. Each list of the return will contain the Mentions for one of the mention classes associated with the MentionExtractor. :param docs: If provided, return Mentions from these documents. Else, return all Mentions. :param sort: If sort is True, then return all Mentions sorted by stable_id. :type sort: bool :return: Mentions for each mention_class. :rtype: List of lists.
def items(self, *args, **kwargs): return self.get_stream()(self.get_object(*args, **kwargs))
Returns a queryset of Actions to use based on the stream method and object.
def load_template(path_or_buffer): from itertools import groupby from operator import itemgetter path_or_buffer = _stringify_path(path_or_buffer) if is_file_like(path_or_buffer): templates = json.load(path_or_buffer) else: with open(path_or_buffer, 'r') as f: templates = json.load(f) options = [] grouper = itemgetter('page', 'extraction_method') for key, grp in groupby(sorted(templates, key=grouper), grouper): tmp_options = [_convert_template_option(e) for e in grp] if len(tmp_options) == 1: options.append(tmp_options[0]) continue option = tmp_options[0] areas = [e.get('area') for e in tmp_options] option['area'] = areas option['multiple_tables'] = True options.append(option) return options
Build tabula-py option from template file Args: file_like_obj: File like object of Tabula app template Returns: `obj`:dict: tabula-py options
def create_datapoint(value, timestamp=None, **tags): if timestamp is None: timestamp = time_millis() if type(timestamp) is datetime: timestamp = datetime_to_time_millis(timestamp) item = { 'timestamp': timestamp, 'value': value } if tags is not None: item['tags'] = tags return item
Creates a single datapoint dict with a value, timestamp and tags. :param value: Value of the datapoint. Type depends on the id's MetricType :param timestamp: Optional timestamp of the datapoint. Uses client current time if not set. Millisecond accuracy. Can be datetime instance also. :param tags: Optional datapoint tags. Not to be confused with metric definition tags
def on_button_release(self, event): self.queue_draw(self.view) x0, y0, x1, y1 = self.x0, self.y0, self.x1, self.y1 rectangle = (min(x0, x1), min(y0, y1), abs(x1 - x0), abs(y1 - y0)) selected_items = self.view.get_items_in_rectangle(rectangle, intersect=False) self.view.handle_new_selection(selected_items) return True
Select or deselect rubber banded groups of items The selection of elements is prior and never items are selected or deselected at the same time.
def minus(repo_list_a, repo_list_b): included = defaultdict(lambda: False) for repo in repo_list_b: included[repo.full_name] = True a_minus_b = list() for repo in repo_list_a: if not included[repo.full_name]: included[repo.full_name] = True a_minus_b.append(repo) return a_minus_b
Method to create a list of repositories such that the repository belongs to repo list a but not repo list b. In an ideal scenario we should be able to do this by set(a) - set(b) but as GithubRepositories have shown that set() on them is not reliable resort to this until it is all sorted out. :param repo_list_a: List of repositories. :param repo_list_b: List of repositories.
def combine_and_save(add_path_list, out_path): add_path_list = list(add_path_list) first_ds_path = add_path_list[0] print('Starting with {}'.format(first_ds_path)) combined = MLDataset(first_ds_path) for ds_path in add_path_list[1:]: try: combined = combined + MLDataset(ds_path) except: print(' Failed to add {}'.format(ds_path)) traceback.print_exc() else: print('Successfully added {}'.format(ds_path)) combined.save(out_path) return
Combines whatever datasets that can be combined, and save the bigger dataset to a given location.
def _add_logical_operator(self, operator): if not self.c_oper: raise QueryExpressionError("Logical operators must be preceded by an expression") self.current_field = None self.c_oper = None self.l_oper = inspect.currentframe().f_back.f_code.co_name self._query.append(operator) return self
Adds a logical operator in query :param operator: logical operator (str) :raise: - QueryExpressionError: if a expression hasn't been set
def power_status_update(self, POWER_STATUS): now = time.time() Vservo = POWER_STATUS.Vservo * 0.001 Vcc = POWER_STATUS.Vcc * 0.001 self.high_servo_voltage = max(self.high_servo_voltage, Vservo) if self.high_servo_voltage > 1 and Vservo < self.settings.servowarn: if now - self.last_servo_warn_time > 30: self.last_servo_warn_time = now self.say("Servo volt %.1f" % Vservo) if Vservo < 1: self.high_servo_voltage = Vservo if Vcc > 0 and Vcc < self.settings.vccwarn: if now - self.last_vcc_warn_time > 30: self.last_vcc_warn_time = now self.say("Vcc %.1f" % Vcc)
update POWER_STATUS warnings level
def combinePlinkBinaryFiles(prefixes, outPrefix): outputFile = None try: outputFile = open(outPrefix + ".files_to_merge", "w") except IOError: msg = "%(outPrefix)s.filesToMerge: can't write file" % locals() raise ProgramError(msg) for prefix in prefixes[1:]: print >>outputFile, " ".join([ prefix + i for i in [".bed", ".bim", ".fam"] ]) outputFile.close() plinkCommand = ["plink", "--noweb", "--bfile", prefixes[0], "--merge-list", outPrefix + ".files_to_merge", "--make-bed", "--out", outPrefix] runCommand(plinkCommand)
Combine Plink binary files. :param prefixes: a list of the prefix of the files that need to be combined. :param outPrefix: the prefix of the output file (the combined file). :type prefixes: list :type outPrefix: str It uses Plink to merge a list of binary files (which is a list of prefixes (strings)), and create the final data set which as ``outPrefix`` as the prefix.
def add_header(self, name, value): if self.headers is None: self.headers = [] self.headers.append(dict(Name=name, Value=value))
Attach an email header to send with the message. :param name: The name of the header value. :param value: The header value.
def raise_db_exception(self): if not self.messages: raise tds_base.Error("Request failed, server didn't send error message") msg = None while True: msg = self.messages[-1] if msg['msgno'] == 3621: self.messages = self.messages[:-1] else: break error_msg = ' '.join(m['message'] for m in self.messages) ex = _create_exception_by_message(msg, error_msg) raise ex
Raises exception from last server message This function will skip messages: The statement has been terminated
def check_key(self, key: str) -> bool: keys = self.get_keys() return key in keys
Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore
def tokenize(self, s): javabridge.call(self.jobject, "tokenize", "(Ljava/lang/String;)V", s) return TokenIterator(self)
Tokenizes the string. :param s: the string to tokenize :type s: str :return: the iterator :rtype: TokenIterator
def expired(self): self._data["_killed"] = True self.save() raise SessionExpired(self._config.expired_message)
Called when an expired session is atime
def stream_messages(self): if self._stream_messages is None: self._stream_messages = StreamMessageList( self._version, service_sid=self._solution['service_sid'], stream_sid=self._solution['sid'], ) return self._stream_messages
Access the stream_messages :returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList :rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList
async def download_file_by_id(self, file_id: base.String, destination=None, timeout: base.Integer = 30, chunk_size: base.Integer = 65536, seek: base.Boolean = True): file = await self.get_file(file_id) return await self.download_file(file_path=file.file_path, destination=destination, timeout=timeout, chunk_size=chunk_size, seek=seek)
Download file by file_id to destination if You want to automatically create destination (:class:`io.BytesIO`) use default value of destination and handle result of this method. :param file_id: str :param destination: filename or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO` :param timeout: int :param chunk_size: int :param seek: bool - go to start of file when downloading is finished :return: destination
def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'): ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position(yticks_position) ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize, pad=tick_padding, length=2, width=0.5) ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize, pad=tick_padding, length=2, width=0.5) ax.xaxis.labelpad = label_padding ax.yaxis.labelpad = label_padding ax.xaxis.label.set_size(fontsize) ax.yaxis.label.set_size(fontsize)
Set standardized axis formatting for figure.
def generate_semantic_data_key(used_semantic_keys): semantic_data_id_counter = -1 while True: semantic_data_id_counter += 1 if "semantic data key " + str(semantic_data_id_counter) not in used_semantic_keys: break return "semantic data key " + str(semantic_data_id_counter)
Create a new and unique semantic data key :param list used_semantic_keys: Handed list of keys already in use :rtype: str :return: semantic_data_id
def send_message(self, message): if self.connected: self.send( json.dumps(message.request))
Send a message down the socket. The message is expected to have a `request` attribute that holds the message to be serialized and sent.
def _parse_title(line_iter, cur_line, conf): title = [] conf['title'].append(title) title.append(('title_name', cur_line.split('title', 1)[1].strip())) while (True): line = next(line_iter) if line.startswith("title "): return line cmd, opt = _parse_cmd(line) title.append((cmd, opt))
Parse "title" in grub v1 config
def explicit_counts_map(self, pixels=None): if self.hpx._ipix is None: if self.data.ndim == 2: summed = self.counts.sum(0) if pixels is None: nz = summed.nonzero()[0] else: nz = pixels data_out = np.vstack(self.data[i].flat[nz] for i in range(self.data.shape[0])) else: if pixels is None: nz = self.data.nonzero()[0] else: nz = pixels data_out = self.data[nz] return (nz, data_out) else: if pixels is None: return (self.hpx._ipix, self.data) raise RuntimeError( 'HPX.explicit_counts_map called with pixels for a map that already has pixels')
return a counts map with explicit index scheme Parameters ---------- pixels : `np.ndarray` or None If set, grab only those pixels. If none, grab only non-zero pixels
def set_codes(self, codes): codemap = '' for cc in codes: cc = cc.upper() if cc in self.__ccodes: codemap += cc else: raise UnknownCountryCodeException(cc) self.codes = codemap
Set the country code map for the data. Codes given in a list. i.e. DE - Germany AT - Austria US - United States
def prepend_path_variable_command(variable, paths): assert isinstance(variable, basestring) assert is_iterable_typed(paths, basestring) return path_variable_setting_command( variable, paths + [expand_variable(variable)])
Returns a command that prepends the given paths to the named path variable on the current platform.
def write_relationships(self, file_name, flat=True): with open(file_name, 'w') as writer: if flat: self._write_relationships_flat(writer) else: self._write_relationships_non_flat(writer)
This module will output the eDNA tags which are used inside each calculation. If flat=True, data will be written flat, like: ADE1CA01, ADE1PI01, ADE1PI02 If flat=False, data will be written in the non-flat way, like: ADE1CA01, ADE1PI01 ADE1CA01, ADE1PI02 :param file_name: the output filename to write the relationships, which should include the '.csv' extension :param flat: True or False
def namer(cls, imageUrl, pageUrl): imgname = imageUrl.split('/')[-1] imgbase = imgname.rsplit('-', 1)[0] imgext = imgname.rsplit('.', 1)[1] return '%s.%s' % (imgbase, imgext)
Remove random junk from image names.
def isValid(folder, epoch=0): return os.path.exists(os.path.join(folder, str(epoch), "train", "silence.pkl"))
Check if the given folder is a valid preprocessed dataset
def _simplify_non_context_field_binary_composition(expression): if any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} with a ContextField ' u'operand. This should never happen.'.format(expression)) if expression.operator == u'||': if expression.left == TrueLiteral or expression.right == TrueLiteral: return TrueLiteral else: return expression elif expression.operator == u'&&': if expression.left == TrueLiteral: return expression.right if expression.right == TrueLiteral: return expression.left else: return expression else: return expression
Return a simplified BinaryComposition if either operand is a TrueLiteral. Args: expression: BinaryComposition without any ContextField operand(s) Returns: simplified expression if the given expression is a disjunction/conjunction and one of it's operands is a TrueLiteral, and the original expression otherwise
def who_has(self, subid): answer = [] for name in self.__map: if subid in self.__map[name] and not name in answer: answer.append(name) return answer
Return a list of names who own subid in their id range set.
def formation_energy(self, chemical_potentials=None, fermi_level=0): chemical_potentials = chemical_potentials if chemical_potentials else {} chempot_correction = sum([ chem_pot * (self.bulk_structure.composition[el] - self.defect.defect_composition[el]) for el, chem_pot in chemical_potentials.items() ]) formation_energy = self.energy + chempot_correction if "vbm" in self.parameters: formation_energy += self.charge * (self.parameters["vbm"] + fermi_level) else: formation_energy += self.charge * fermi_level return formation_energy
Computes the formation energy for a defect taking into account a given chemical potential and fermi_level
def run(options, http_req_handler = HttpReqHandler): global _HTTP_SERVER for x in ('server_version', 'sys_version'): if _OPTIONS.get(x) is not None: setattr(http_req_handler, x, _OPTIONS[x]) _HTTP_SERVER = threading_tcp_server.KillableThreadingHTTPServer( _OPTIONS, (_OPTIONS['listen_addr'], _OPTIONS['listen_port']), http_req_handler, name = "httpdis") for name, cmd in _COMMANDS.iteritems(): if cmd.at_start: LOG.info("at_start: %r", name) cmd.at_start(options) LOG.info("will now serve") while not _KILLED: try: _HTTP_SERVER.serve_until_killed() except (socket.error, select.error), why: if errno.EINTR == why[0]: LOG.debug("interrupted system call") elif errno.EBADF == why[0] and _KILLED: LOG.debug("server close") else: raise LOG.info("exiting")
Start and execute the server
def include(self, pattern): found = [f for f in glob(pattern) if not os.path.isdir(f)] self.extend(found) return bool(found)
Include files that match 'pattern'.
def run(self, **kwargs): self.saveas('in.idf') idd = kwargs.pop('idd', self.iddname) epw = kwargs.pop('weather', self.epw) try: run(self, weather=epw, idd=idd, **kwargs) finally: os.remove('in.idf')
Run an IDF file with a given EnergyPlus weather file. This is a wrapper for the EnergyPlus command line interface. Parameters ---------- **kwargs See eppy.runner.functions.run()
def list_domains_by_service(self, service_id): content = self._fetch("/service/%s/domain" % service_id, method="GET") return map(lambda x: FastlyDomain(self, x), content)
List the domains within a service.
def summary(self): if self.features is not None: feature_count = len(self.features) else: feature_count = 0 feature_hash = 'feathash:' + str(hash(tuple(self.features))) return (str(self.estimator), feature_count, feature_hash, self.target)
Summary of model definition for labeling. Intended to be somewhat readable but unique to a given model definition.
def draw(self, **kwargs): labels = ("Training Score", "Cross Validation Score") curves = ( (self.train_scores_mean_, self.train_scores_std_), (self.test_scores_mean_, self.test_scores_std_), ) colors = resolve_colors(n_colors=2) for idx, (mean, std) in enumerate(curves): self.ax.fill_between( self.train_sizes_, mean - std, mean+std, alpha=0.25, color=colors[idx], ) for idx, (mean, _) in enumerate(curves): self.ax.plot( self.train_sizes_, mean, 'o-', color=colors[idx], label=labels[idx], ) return self.ax
Renders the training and test learning curves.
def get_delay(self, planned, estimated): delay = 0 if estimated >= planned: delay = round((estimated - planned).seconds / 60) else: delay = round((planned - estimated).seconds / 60) * -1 return delay
Min of delay on planned departure.
def first(script, value=None, default=None, vars={}, url=None, opener=default_opener, library_paths=[]): return compile(script, vars, library_paths).first(_get_value(value, url, opener), default)
Transform object by jq script, returning the first result. Return default if result is empty.
def staticmap(ctx, mapid, output, features, lat, lon, zoom, size): access_token = (ctx.obj and ctx.obj.get('access_token')) or None if features: features = list( cligj.normalize_feature_inputs(None, 'features', [features])) service = mapbox.Static(access_token=access_token) try: res = service.image( mapid, lon=lon, lat=lat, z=zoom, width=size[0], height=size[1], features=features, sort_keys=True) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if res.status_code == 200: output.write(res.content) else: raise MapboxCLIException(res.text.strip())
Generate static map images from existing Mapbox map ids. Optionally overlay with geojson features. $ mapbox staticmap --features features.geojson mapbox.satellite out.png $ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png An access token is required, see `mapbox --help`.
def remove_rows_matching(df, column, match): df = df.copy() mask = df[column].values != match return df.iloc[mask, :]
Return a ``DataFrame`` with rows where `column` values match `match` are removed. The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared to `match`, and those rows that match are removed from the DataFrame. :param df: Pandas ``DataFrame`` :param column: Column indexer :param match: ``str`` match target :return: Pandas ``DataFrame`` filtered
def Flush(self): if self.locked and self.CheckLease() == 0: self._RaiseLockError("Flush") self._WriteAttributes() self._SyncAttributes() if self.parent: self.parent.Flush()
Syncs this object with the data store, maintaining object validity.
def on_add_rows(self, event): num_rows = self.rows_spin_ctrl.GetValue() for row in range(num_rows): self.grid.add_row() self.main_sizer.Fit(self)
add rows to grid
def get_entity(self, entity, default=None): self._ensure_loaded() return self.entities.get(str(entity), default)
Gets an entity object from the ACL. :type entity: :class:`_ACLEntity` or string :param entity: The entity to get lookup in the ACL. :type default: anything :param default: This value will be returned if the entity doesn't exist. :rtype: :class:`_ACLEntity` :returns: The corresponding entity or the value provided to ``default``.
def on_enter_specimen(self, event): new_specimen = self.specimens_box.GetValue() if new_specimen not in self.specimens: self.user_warning( "%s is not a valid specimen with measurement data, aborting" % (new_specimen)) self.specimens_box.SetValue(self.s) return self.select_specimen(new_specimen) if self.ie_open: self.ie.change_selected(self.current_fit) self.update_selection()
upon enter on the specimen box it makes that specimen the current specimen
def default_resolve_fn(source, info, **args): name = info.field_name if isinstance(source, dict): property = source.get(name) else: property = getattr(source, name, None) if callable(property): return property() return property
If a resolve function is not given, then a default resolve behavior is used which takes the property of the source object of the same name as the field and returns it as the result, or if it's a function, returns the result of calling that function.
def handle_feedback(self, pkt): self.logger.debug("handle feedback") self.frame = self.decode_frameno(pkt.z & 0o7777) - 1 self.server.controller.init_frame(self.frame) self.server.controller.set_frame(self.frame)
This part of the protocol is used by IRAF to erase a frame in the framebuffers.
def create_initialized_contract_account(self, contract_code, storage) -> None: new_account = Account( self._generate_new_address(), code=contract_code, balance=0 ) new_account.storage = storage self._put_account(new_account)
Creates a new contract account, based on the contract code and storage provided The contract code only includes the runtime contract bytecode. :param contract_code: Runtime bytecode for the contract :param storage: Initial storage for the contract :return: The new account
def update_in_hdx(self, **kwargs): self._check_load_existing_object('resource', 'id') if self.file_to_upload and 'url' in self.data: del self.data['url'] self._merge_hdx_update('resource', 'id', self.file_to_upload, **kwargs)
Check if resource exists in HDX and if so, update it Args: **kwargs: See below operation (string): Operation to perform eg. patch. Defaults to update. Returns: None
def _strip_marker_elem(elem_name, elements): extra_indexes = [] preceding_operators = ["and"] if elem_name == "extra" else ["and", "or"] for i, element in enumerate(elements): if isinstance(element, list): cancelled = _strip_marker_elem(elem_name, element) if cancelled: extra_indexes.append(i) elif isinstance(element, tuple) and element[0].value == elem_name: extra_indexes.append(i) for i in reversed(extra_indexes): del elements[i] if i > 0 and elements[i - 1] in preceding_operators: del elements[i - 1] elif elements: del elements[0] return not elements
Remove the supplied element from the marker. This is not a comprehensive implementation, but relies on an important characteristic of metadata generation: The element's operand is always associated with an "and" operator. This means that we can simply remove the operand and the "and" operator associated with it.
def _high_dim_sim(self, v, w, normalize=False, X=None, idx=0): sim = np.exp((-np.linalg.norm(v - w) ** 2) / (2*self._sigma[idx] ** 2)) if normalize: return sim / sum(map(lambda x: x[1], self._knn(idx, X, high_dim=True))) else: return sim
Similarity measurement based on Gaussian Distribution
def getMessage(self): if isinstance(self.msg, numpy.ndarray): msg = self.array2string(self.msg) else: msg = str(self.msg) if self.args: a2s = self.array2string if isinstance(self.args, Dict): args = {k: (a2s(v) if isinstance(v, numpy.ndarray) else v) for (k, v) in self.args.items()} elif isinstance(self.args, Sequence): args = tuple((a2s(a) if isinstance(a, numpy.ndarray) else a) for a in self.args) else: raise TypeError("Unexpected input '%s' with type '%s'" % (self.args, type(self.args))) msg = msg % args return msg
Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied \ arguments with the message.
def get_resource_search_session(self, proxy): if not self.supports_resource_search(): raise errors.Unimplemented() return sessions.ResourceSearchSession(proxy=proxy, runtime=self._runtime)
Gets a resource search session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceSearchSession) - ``a ResourceSearchSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_search()`` is ``true``.*
def hist_axis_func(axis_type: enum.Enum) -> Callable[[Hist], Axis]: def axis_func(hist: Hist) -> Axis: try: hist_axis_type = axis_type.value except AttributeError: hist_axis_type = axis_type if hasattr(hist, "ProjectionND") and hasattr(hist, "Projection"): return hist.GetAxis(hist_axis_type) else: axis_function_map = { TH1AxisType.x_axis.value: hist.GetXaxis, TH1AxisType.y_axis.value: hist.GetYaxis, TH1AxisType.z_axis.value: hist.GetZaxis } return_func = axis_function_map[hist_axis_type] return return_func() return axis_func
Wrapper to retrieve the axis of a given histogram. This can be convenient outside of just projections, so it's made available in the API. Args: axis_type: The type of axis to retrieve. Returns: Callable to retrieve the specified axis when given a hist.
def to_yaml(obj, stream=None, dumper_cls=yaml.Dumper, default_flow_style=False, **kwargs): class OrderedDumper(dumper_cls): pass def dict_representer(dumper, data): return dumper.represent_mapping( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) OrderedDumper.add_representer(OrderedDict, dict_representer) obj_dict = to_dict(obj, **kwargs) return yaml.dump(obj_dict, stream, OrderedDumper, default_flow_style=default_flow_style)
Serialize a Python object into a YAML stream with OrderedDict and default_flow_style defaulted to False. If stream is None, return the produced string instead. OrderedDict reference: http://stackoverflow.com/a/21912744 default_flow_style reference: http://stackoverflow.com/a/18210750 :param data: python object to be serialized :param stream: to be serialized to :param Dumper: base Dumper class to extend. :param kwargs: arguments to pass to to_dict :return: stream if provided, string if stream is None
def pop_arguments(instr, stack): needed = instr.stack_effect if needed >= 0: raise DecompilationError( "%s is does not have a negative stack effect" % instr ) for popcount, to_pop in enumerate(reversed(stack), start=1): needed += to_pop.stack_effect if not needed: break else: raise DecompilationError( "Reached end of stack without finding inputs to %s" % instr, ) popped = stack[-popcount:] stack[:] = stack[:-popcount] return popped
Pop instructions off `stack` until we pop all instructions that will produce values popped by `instr`.
def removeIndividual(self, individual): q = models.Individual.delete().where( models.Individual.id == individual.getId()) q.execute()
Removes the specified individual from this repository.
def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir): self.inventory_hostname = task_vars['inventory_hostname'] self._task_vars = task_vars self.host_vars = task_vars['hostvars'] self.delegate_to_hostname = delegate_to_hostname self.loader_basedir = loader_basedir self._mitogen_reset(mode='put')
Invoked by ActionModuleMixin to indicate a new task is about to start executing. We use the opportunity to grab relevant bits from the task-specific data. :param dict task_vars: Task variable dictionary. :param str delegate_to_hostname: :data:`None`, or the template-expanded inventory hostname this task is being delegated to. A similar variable exists on PlayContext when ``delegate_to:`` is active, however it is unexpanded. :param str loader_basedir: Loader base directory; see :attr:`loader_basedir`.
def get_future(self): now = dt.now() four_days = now + timedelta(hours=96) now = now.timestamp() four_days = four_days.timestamp() url = build_url(self.api_key, self.spot_id, self.fields, self.unit, now, four_days) return get_msw(url)
Get current and future forecasts.
def group_select(selects, length=None, depth=None): if length == None and depth == None: length = depth = len(selects[0]) getter = operator.itemgetter(depth-length) if length > 1: selects = sorted(selects, key=getter) grouped_selects = defaultdict(dict) for k, v in itertools.groupby(selects, getter): grouped_selects[k] = group_select(list(v), length-1, depth) return grouped_selects else: return list(selects)
Given a list of key tuples to select, groups them into sensible chunks to avoid duplicating indexing operations.
def show_distribution_section(config, title, section_name): payload = requests.get(config.apps_url).json() distributions = sorted(payload.keys(), reverse=True) latest_distribution = payload[distributions[0]] click.echo("{} {}".format("Release".rjust(7), title)) click.echo("------- ---------------") section = latest_distribution[section_name] names = sorted(section.keys()) for name in names: click.echo("{} {}".format(section[name].rjust(7), name))
Obtain distribution data and display latest distribution section, i.e. "demos" or "apps" or "themes".
def from_remote_hive(cls, url, *args, **kwargs): version = kwargs.pop('version', None) require = kwargs.pop('require_https', False) return cls(Hive.from_url(url, version, require), *args, **kwargs)
Download a JSON hive file from a URL, and initialize from it, paying attention to the version keyword argument.
def download_page(url, data=None): conn = urllib2.urlopen(url, data) resp = conn.read() conn.close() return resp
Returns the response for the given url. The optional data argument is passed directly to urlopen.
def shell(environment, opts): environment.require_data() environment.start_supporting_containers() return environment.interactive_shell( opts['COMMAND'], detach=opts['--detach'] )
Run a command or interactive shell within this environment Usage: datacats [-d] [-s NAME] shell [ENVIRONMENT [COMMAND...]] Options: -d --detach Run the resulting container in the background -s --site=NAME Specify a site to run the shell on [default: primary] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.'
def independentlinear60(display=False): old_seed = np.random.seed() np.random.seed(0) N = 1000 M = 60 beta = np.zeros(M) beta[0:30:3] = 1 f = lambda X: np.matmul(X, beta) X_start = np.random.randn(N, M) X = X_start - X_start.mean(0) y = f(X) + np.random.randn(N) * 1e-2 np.random.seed(old_seed) return pd.DataFrame(X), y
A simulated dataset with tight correlations among distinct groups of features.
def _set_default_serializer(self, name): try: (self._default_content_type, self._default_content_encoding, self._default_encode) = self._encoders[name] except KeyError: raise SerializerNotInstalled( "No encoder installed for %s" % name)
Set the default serialization method used by this library. :param name: The name of the registered serialization method. For example, ``json`` (default), ``pickle``, ``yaml``, or any custom methods registered using :meth:`register`. :raises SerializerNotInstalled: If the serialization method requested is not available.
def page_view(url): def decorator(func): @wraps(func) async def wrapper(self: BaseState, *args, **kwargs): user_id = self.request.user.id try: user_lang = await self.request.user.get_locale() except NotImplementedError: user_lang = '' title = self.__class__.__name__ async for p in providers(): await p.page_view(url, title, user_id, user_lang) return await func(self, *args, **kwargs) return wrapper return decorator
Page view decorator. Put that around a state handler function in order to log a page view each time the handler gets called. :param url: simili-URL that you want to give to the state
def __symlink_dir(self, dir_name, name, path): target_dir = os.path.join(self.root_dir, dir_name) if not os.path.exists(target_dir): os.makedirs(target_dir) target_path = os.path.join(self.root_dir, dir_name, name) logger.debug("Attempting to symlink %s to %s..." % (path, target_path)) if os.path.exists(target_path): if os.path.islink(target_path): os.remove(target_path) else: logger.warn("%s is not a symlink! please remove it manually." % target_path) return os.symlink(path, target_path)
Symlink an object at path to name in the dir_name folder. remove it if it already exists.
def do_help(self, arg): if not arg or arg not in self.argparse_names(): cmd.Cmd.do_help(self, arg) else: try: self.argparser.parse_args([arg, '--help']) except Exception: pass
Patched to show help for arparse commands
def is_integer(obj): if PYTHON3: return isinstance(obj, int) return isinstance(obj, (int, long))
Is this an integer. :param object obj: :return:
def __get_connection_SNS(): region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to SNS using ' 'credentials in configuration file') connection = sns.connect_to_region( region, aws_access_key_id=get_global_option( 'aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = sns.connect_to_region(region) except Exception as err: logger.error('Failed connecting to SNS: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to SNS in {0}'.format(region)) return connection
Ensure connection to SNS
def ami_lookup(region='us-east-1', name='tomcat8'): if AMI_JSON_URL: ami_dict = _get_ami_dict(AMI_JSON_URL) ami_id = ami_dict[region][name] elif GITLAB_TOKEN: warn_user('Use AMI_JSON_URL feature instead.') ami_contents = _get_ami_file(region=region) ami_dict = json.loads(ami_contents) ami_id = ami_dict[name] else: ami_id = name LOG.info('Using AMI: %s', ami_id) return ami_id
Look up AMI ID. Use _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided, _name_ is returned as the ami id. Args: region (str): AWS Region to find AMI ID. name (str): Simple AMI base name to lookup. Returns: str: AMI ID for _name_ in _region_.
def width(self): if self._width is not None: return self._width self._width = sum(fs.width for fs in self.chunks) return self._width
The number of columns it would take to display this string