content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import socket import os import time def predict_estimator(name, estimator, X, prediction_type='classification'): """ Supplementary function. Builds predictions for one estimator on a separate node (or in a separate thread) :param str name: classifier name :param estimator: estimator :type estimator: Classifier or Regressor :param X: pandas.DataFrame of shape [n_samples, n_features] :param str prediction_type: 'classification' or 'regression' or 'classification-proba' :return: ('success', (name (str), probabilities (numpy.array), time (int) )) or ('fail', (name (str), pid (int), socket (int), error (Exception) )) """ try: start = time.time() if prediction_type == 'classification': prediction = estimator.predict(X) elif prediction_type == 'classification-proba': prediction = estimator.predict_proba(X) elif prediction_type == 'regression': prediction = estimator.predict(X) else: raise NotImplementedError("Unknown problem type: {}".format(prediction_type)) return 'success', (name, prediction, time.time() - start) except Exception as e: pid = os.getpid() hostname = socket.gethostname() return 'fail', (name, pid, hostname, e)
d4aa8744f4a9865a36a50993d17084c503963407
692,064
def snippet(func): """Mark ``func`` as a snippet example function.""" func._snippet = True return func
2eccf19d866af8b44568ed82dc72cca45287d081
692,065
def createDicWith2Col(file1,key1,key2,val_list,header,multiple=True,addDublicate=False,sep="\t", lineVal=False,checkCol=False,CheckcolNumValue=[0,0],checkList=False): """ :param file1: :param key1: :param key2: :param val_list: :param header: :param multiple: :param addDublicate: :param sep: :param lineVal: :param checkCol: :param CheckcolNumValue: [columnNumber,ValueOfColumn] :return: """ if multiple: if addDublicate: print("select only one of multiple or adddublicate, both cannot be used") return {} else: if isinstance(val_list,list): print ("with sigle entry please provide only one value not list") return {} dic={} with open(file1) as filename: if header: next(filename) for line in filename: splits=line.strip().split(sep) name=splits[key1].strip()+"_"+splits[key2].strip() columnChecked=False if checkCol: if checkList: if splits[CheckcolNumValue[0]].strip() in CheckcolNumValue[1]: columnChecked=True else: if splits[CheckcolNumValue[0]].strip() == CheckcolNumValue[1]: columnChecked=True else: columnChecked=True if columnChecked: if name not in dic: if lineVal: dic[name]=line.strip().split(sep) else: if multiple: dic[name]=[] for index in val_list: dic[name].append(splits[index].strip()) else: if addDublicate: dic[name]=[splits[val_list].strip()] else: dic[name]=splits[val_list].strip() else: if addDublicate and not lineVal: if splits[val_list].strip() not in dic[name]: dic[name].append(splits[val_list].strip()) # else: # print (name,"is a duplicate entry") return dic
4be387f93efc8c9c7571bf24e77173ca9ed84f95
692,066
def new_centroid(lis): """ Update the Matrix of centroid, provided with required information to process according to nearby neighbour points. PARAMETERS ========== lis: ndarray(dtype=int,ndim=2) Matrix of Clusters, with their corresponding points count, coordinates sum. new: list List of New Centroids, using reference list, updating with each epoch. newx: float X-coordinate of New Centroids. newy: float Y-coordinate of New Centroids RETURNS ======= new: list Updated List of Centroids, as a result of change in nearby points of older centroids. """ new = [] for n in lis: newx = n[0]/n[2] newy = n[1]/n[2] new.append([newx, newy]) return new
aef4ad4fc44ce2e1bf0f613fb8684f790a4b8e1c
692,067
def remove_disambiguation(doc_id): """ Normalizes and removes disambiguation info from a document ID. """ doc_id = doc_id.replace('_', ' ').replace('-COLON-', ':') if '-LRB-' in doc_id: doc_id = doc_id[:doc_id.find('-LRB-') - 1] return doc_id
d31ff2e35f8daff0edbcb93fc10a99a53fb1de4a
692,069
def map_to_slurm(corsika_dict, mapping): """Map a dictionary's CORSIKA run keys to SLURM task keys using mapping""" res = {} for run_no, content in corsika_dict.iteritems(): res[mapping[run_no]] = content return res
68803b8b4ae4e1fbb9de59f95be0eecd4092043b
692,070
def cors(app): """WSGI middleware that sets CORS headers""" HEADERS = [ ("Access-Control-Allow-Credentials", "false"), ("Access-Control-Allow-Headers", "*"), ("Access-Control-Allow-Methods", "GET,POST,HEAD"), ("Access-Control-Allow-Origin", "*"), ("Access-Control-Max-Age", "86400"), ] def handle(environ, start_response): def _start_response(status, headers, exc_info=None): headers += HEADERS return start_response(status, headers, exc_info) return app(environ, _start_response) return handle
9bfa16ef20114119fcc22b2bbe66570a2803cab9
692,071
def calculate_proportional_dimensions(display, image): """Calculate proportional dimensions for given image based on display dimensions """ adjusted_width = int(display.resolution_h * image.width / image.height) adjusted_height = int(display.resolution_w * image.height / image.width) if adjusted_height < display.resolution_h: # Return size based on display height - adjusted image width is # too small to fill display return (adjusted_width, display.resolution_h) # Return size based on display width in the common case return (display.resolution_w, adjusted_height)
55d4a667f2fbbaae8bba7141ba7636bb8918e8fa
692,072
def make_id(namespace, name): """ pass a language name or host in with a name, get a Package.id str """ namespace = namespace.lower() if namespace in ["cran", "pypi"]: return namespace + ":" + name elif namespace == "python": return "pypi:" + name elif namespace == "r": return "cran:" + name else: raise ValueError("Invalid namespace for package id")
0bd36db60eec07ee18cbe473085b9513e0b2882f
692,073
def financial_summary_processor(totals, formatter): """ Process totals data by getting the label and hierarchy level for each value """ processed = [] for i in formatter: if i in totals: line = (totals[i], formatter[i]) processed.append(line) return processed
894add4083b32c7d8505b5150ee939f3ba64a16b
692,074
import logging def local_file_exists(filename): """ Check if local file exists. :param filename: String :return: True if file exists, else False """ # check the path try: with open(filename): return True except FileNotFoundError as e: logging.error(e) return False
7a41d40cabbe9a6591a92af88ba7c88da07e24be
692,075
def add_more_place(context: list, new: list): """Append places to context Args: context: total nearby palce data new: new data by next page tokens Returns: context: total nearby place that append new to is's with out duplicated place """ place_exist = [place['place_id'] for place in context] for place in new: # Check that place is exists or not if place['place_id'] in place_exist: continue context.append(place) return context
994bb812ca52dd12efacb25c51939f11c216136c
692,076
import re def clean_whitespace(text): """ Remove any extra whitespace and line breaks as needed. """ # Replace linebreaks with spaces text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace text = text.strip() # Remove consecutive spaces text = re.sub(' +', ' ', text) return text
769602b6945155a02f6a59937ede7e9dd655005a
692,077
import ipaddress import re def uncompress(addr): """Return the uncompressed form of an address, adding leading zeroes """ ip = ipaddress.ip_interface(addr) version = re.sub("Interface", "", ip.__class__.__name__) if version == "IPv4": return ".".join(map(lambda x: "%03d" % int(x), str(ip.ip).split("."))) if version == "IPv6": return ip.ip.exploded
dc8b87b2e6a66442965eb7ba2d5208db25b8fc52
692,078
def getAbortState(*args): """Access protected class data of the system Abort State. This approach is a secure method to access saved system state values. If an abort is tripped, the test stand control system will automatically enter 'Safe' mode and valve configuration Arguments --------- args[0] : ucl --> UpperClass Instance Returns ------- abort_state : Abort State Integer state[0] = system is nominal state[1] = system abort """ return args[0].Controls.AbortState.abort_state
490116de3b9f9de5b4a188e29e68110de992dccf
692,079
def clean(l): """ bad coding for python 3 support """ if isinstance(l, list): if l == []: return [] else: l = ([x.strip().replace('\t', u"") for x in l]) l = [x for x in l if x != u""] l = [x for x in l if x != ','] return l elif isinstance(l, float) or isinstance(l, int): return l else: return l.strip()
843c13bc0762066bac9f1eab3b36ddc3f78deaa3
692,080
import os def check_dir_empty(path): """Return True if a directory is empty.""" with os.scandir(path) as it: return not any(it)
f94eeca0f0dc1f4d4d09db05081d9ad8bb62fc07
692,081
import os def list_dir(dir_name, prefix=False): """ List all directories at a given root Args: dir_name (str): Path to directory whose folders need to be listed. prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found. Returns: directories(List): a list of str. Examples: ..code-blocks:: python import paddlefsl.utils as utils dir_name = '~/test' dirs = utils.list_dir(dir_name) # dirs: ['test_module'] """ dir_name = os.path.expanduser(dir_name) directories = [p for p in os.listdir(dir_name) if os.path.isdir(os.path.join(dir_name, p))] if prefix is True: directories = [os.path.join(dir_name, d) for d in directories] return directories
5a6ba572bebb5ac5c7b4fc876c5c1e8fc9078200
692,082
def get_current_connection_name(context): """ Return the name of the current connection or None if there is no current connection """ return context.pywbem_server.name if context.pywbem_server else None
4ecfaed7eca01a9631f1931f5c795f416a5ff6c0
692,083
def json_parts(json_line): """ Checks for "label" and "description" components of Json command. PArameters: json_line (str): The Json command being passed into the function. Returns: Arr [Bool, Bool]: Is there a label command? Is there a description command? """ labF, desF = False, False if "\"label\"" in json_line: labF = True if "\"description\"" in json_line: desF = True return [labF, desF]
168f6cd064b7f443539b3cd8a187a7234085ef7b
692,084
from typing import Union from datetime import datetime def create_url(date: str) -> Union[str, None]: """create_url. Args: date (str): date Returns: Union[str, None]: """ try: date_str = datetime.strptime(date, "%Y%m%d") except ValueError: return None url_data_part = date_str.strftime("%Y/%m/%d/") base_url = "http://weather.is.kochi-u.ac.jp/sat/ALL/" return base_url + url_data_part
2dd0e79b4db16b713e2697e34a2c502fdac09eb9
692,085
import re def remove_link(text): """ input: string Remove links from input. output: string """ pattern = re.compile('htt.*', re.IGNORECASE) return ' '.join([word for word in text.split(' ') if pattern.search(word)==None])
21f4eff2c28dd409bc66f3a254530e052db17cd9
692,086
def get_sr_otkl(x_list, y_list, x_func=lambda x: x, y_func=lambda y: y): """ Функция для вычисления величины среднего квадратичного отклонения S(x) и S(y) """ n = len(x_list) sum_x = sum([x_func(x) for x in x_list][::5]) sum_y = sum([y_func(y) for y in y_list][::5]) otkl_x = (sum([(x - sum_x) ** 2 for x in x_list]) / n) ** 0.5 otkl_y = (sum([(y - sum_y) ** 2 for y in y_list]) / n) ** 0.5 return otkl_x, otkl_y
553f0bccdb2aba8893e76b914f575011203e1263
692,087
def id_func(param): """ Give a nice parameter name to the generated function parameters """ if isinstance(param, dict) and ":name:" in param: return param[":name:"] retval = str(param) if len(retval) > 25: retval = retval[:20] + "..." + retval[-2:] return retval
fdee82949118f7d610ceb913e222761ac86f5b44
692,088
def get_name(layer_name, counters): """ utlity for keeping track of layer names """ if not layer_name in counters: counters[layer_name] = 0 name = layer_name + '_' + str(counters[layer_name]) counters[layer_name] += 1 return name
61fb60b5bafae0e5bc5d57e6092ba48f437c098b
692,089
import subprocess def get_repo_root(): """Gets the root of the git repository. This allows the script to work regardless of the current working directory (as long as the current working directory is *somewhere* in the repository in question). Returns ------- str The path to the current repository""" output = subprocess.run(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE) assert output.returncode == 0 return output.stdout.decode().strip()
104c85ef2f3402bfef33b5ca767dabf58ea7b99c
692,090
def vacation_finance_help(date_start): """ Finance help for vacation one time in year for fill in right place in vacation blank """ year = date_start[-4:] # because finance help paid only once in year on start date finance_help = str(input('Материальная помощь? Да/Нет ')) if finance_help.lower() in ['Да', 'да', 'Д', 'д', 'Yes', 'Y', 'y', '1']: finance_help = f'''Прошу выплатить единовременную материальную помощь к отпуску на \ оздоровление за {year} год. В {year} году материальную помощь не получал.''' else: finance_help = '_' * 204 return finance_help
5503506b5dab63f05bfd4bfec02c40480602bc42
692,091
import json def to_json(config): """Converts a JSON-serializable configuration object to a JSON string.""" if hasattr(config, "to_json") and callable(config.to_json): return config.to_json(indent=2) else: return json.dumps(config, indent=2)
1fe7d27cc944abeb9830b11c31c424a9266e63eb
692,092
def METARwind(sknt, drct, gust): """convert to METAR""" s = "" d5 = drct if str(d5)[-1] == "5": d5 -= 5 s += "%03.0f%02.0f" % (d5, sknt) if gust is not None: s += "G%02.0f" % (gust, ) s += "KT" return s
c68350f60dc055db4a7c20e704a653b4acc2c89e
692,093
import warnings def _label_compcor(confounds_raw, compcor_suffix, n_compcor): """Builds list for the number of compcor components.""" compcor_cols = [] for nn in range(n_compcor + 1): nn_str = str(nn).zfill(2) compcor_col = compcor_suffix + "_comp_cor_" + nn_str if compcor_col not in confounds_raw.columns: warnings.warn(f"could not find any confound with the key {compcor_col}") else: compcor_cols.append(compcor_col) return compcor_cols
437cfb03a51e26bd94cd11786eb05df202771c6e
692,094
def makeFileName(name): """Makes a string serve better as a file name.""" return (name .encode('ascii', 'replace') .decode('ascii') .replace('?', '_') .replace('/', '_'))
a2c3be6c335fe0ef61d0ec74ef8d1e7b9e556622
692,095
def extractDidParts(did, method="dad"): """ Parses and returns keystr from did raises ValueError if fails parsing """ try: # correct did format pre:method:keystr pre, meth, keystr = did.split(":") except ValueError as ex: raise ValueError("Invalid DID value") if pre != "did": raise ValueError("Invalid DID identifier") if meth != method: raise ValueError("Invalid DID method") return keystr
66975b935576f77c223ac8d59a3fffc648afd139
692,096
def generate_random_node_features(random, num_batches, num_nodes, num_node_features): """Generates a random node feature matrix Args: random: The NumPy random number generator to use. num_batches: The number of batches to generate (use 'None' for no batch dimension). num_nodes: The number of nodes to generate. num_node_features: The number of features per node. Returns: A NumPy array of shape (num_batches, num_nodes, num_node_features) with random values between -1 and 1. """ if num_batches: features = random.rand(num_batches, num_nodes, num_node_features) else: features = random.rand(num_nodes, num_node_features) return features * 2.0 - 1.0
61a7688d4659078a187960590111e14dc2f04815
692,097
def author_id_string(aob): """ Produce a string representation of an author id :param aob: author object :return: string representation of author id """ return u"{x}: {y}".format(x=aob.get("type"), y=aob.get("id"))
be8aac97538fc2146a79f4ac53aa35eb6096045d
692,099
def path_it(cache_dir, name): """Generate a path from name. """ if str(cache_dir) in name and name.endswith(".vcf"): return name return cache_dir.joinpath(name + ".vcf")
30684f31de674b6324c37ce677950b2adebabd80
692,100
def extract_digest_key_date(digest_s3_key): """Extract the timestamp portion of a manifest file. Manifest file names take the following form: AWSLogs/{account}/CloudTrail-Digest/{region}/{ymd}/{account}_CloudTrail \ -Digest_{region}_{name}_region_{date}.json.gz """ return digest_s3_key[-24:-8]
e0264fc30f491685deb177ea3c8d6e483b46537f
692,101
import json def load_token_dict(dict_fn): """ 加载字典方法 :return:input_token, target_token """ with open(dict_fn, 'r', encoding='utf-8') as file: token = json.load(file) return token
eb498fc219c68432e1b008c65e11fabe61284c11
692,102
import math def belows_num(mult): """ number of multisets strictly below mult """ result = 0 for a in mult.sp.iter_all(): m = mult(*a) result = (m + 1) * result + m return math.floor(result)
acb8c6904c3e303b6689b04c4c2d8591c686a5bc
692,103
def get_intersection_area(box1, box2): """ compute intersection area of box1 and box2 (both are 4 dim box coordinates in [x1, y1, x2, y2] format) """ xmin1, ymin1, xmax1, ymax1 = box1 xmin2, ymin2, xmax2, ymax2 = box2 x_overlap = max(0, min(xmax1, xmax2) - max(xmin1, xmin2)) y_overlap = max(0, min(ymax1, ymax2) - max(ymin1, ymin2)) overlap_area = x_overlap * y_overlap return overlap_area
d3c420597533236a210640f9d4b37a754c6f5a33
692,104
import warnings import copy def check_budget_manager( budget, budget_manager, default_budget_manager_class, default_budget_manager_dict=None, ): """Validate if budget manager is a budgetmanager class and create a copy 'budget_manager_'. """ if default_budget_manager_dict is None: default_budget_manager_dict = {} if budget_manager is None: budget_manager_ = default_budget_manager_class( budget=budget, **default_budget_manager_dict ) else: if budget is not None and budget != budget_manager.budget: warnings.warn( "budgetmanager is already given such that the budget " "is not used. The given budget differs from the " "budget_managers budget." ) budget_manager_ = copy.deepcopy(budget_manager) return budget_manager_
b2206704257af4b6b0f981fba4b4b20b8389cd24
692,106
def hmsm_to_days(hour=0, min=0, sec=0, micro=0): """ Convert hours, minutes, seconds, and microseconds to fractional days. Parameters ---------- hour : int, optional Hour number. Defaults to 0. min : int, optional Minute number. Defaults to 0. sec : int, optional Second number. Defaults to 0. micro : int, optional Microsecond number. Defaults to 0. Returns ------- days : float Fractional days. Examples -------- >>> hmsm_to_days(hour=6) 0.25 """ days = sec + (micro / 1.e6) days = min + (days / 60.) days = hour + (days / 60.) return days / 24.
b32b02770fa5bee335e24b8490ae3cb1151130fc
692,108
import argparse def parse_arguments(argv): """Parse command line arguments.""" parser = argparse.ArgumentParser(prog='MalNet') parser.add_argument('--input_file', dest='input_file', type=str, help='Path to PE file.') parser.add_argument('--model_path', dest='model_path', type=str, help='Path to model directory.') parser.add_argument('--scaler_path', dest='scaler_path', type=str, help='Path to the scaler object file.') parser.add_argument('--threshold', dest='threshold', type=float, default=0.273, help='Threshold to distinguish benign and malicous.') return parser.parse_args(argv)
2957036907b005cb1d893a5119200f0995d71d2d
692,110
import curses def load_keys() -> dict: """ Load all keyboard keys available to user in program Usage: KEYS['DOWN'] Parameters: None Returns: KEYS (dict): Dictionary of references to curses keys """ KEYS = { "ENTER": (curses.KEY_ENTER, ord('\n'), ord('\r')), "SPACE": (32, ord(' ')), "UP": (curses.KEY_UP, ord('k')), "DOWN": (curses.KEY_DOWN, ord('j')), "RIGHT": (curses.KEY_RIGHT, ord('l')), "LEFT": (curses.KEY_LEFT, ord('h')), "PAUSE": (ord('p'), ord('P')), "RESUME": (ord('r'), ord('R')), "QUIT": (27, ord('q'), ord('Q')) } return KEYS
3d81af1777db46923c889659522be77d89b178d9
692,111
def pprTXRecord(rec): """Pretty print a TX record""" return "nsamples={nsamples}".\ format(nsamples=rec.nsamples)
3c9efe3bdf4f1bd1e1d7740ecba836bcef6e7c84
692,112
def index_of_(string, sub, start, length): """:yaql:indexOf Returns an index of first occurrence sub in string beginning from start ending with start+length. -1 is a return value if there is no any occurrence. :signature: string.indexOf(sub, start, length) :receiverArg string: input string :argType string: string :arg sub: substring to find in string :argType sub: string :arg start: index to start search with, 0 by default :argType start: integer :arg length: length of string to find substring in :argType length: integer :returnType: integer .. code:: yaql> "cabcdab".indexOf("bc", 2, 2) 2 """ if start < 0: start += len(string) if length < 0: length = len(string) - start return string.find(sub, start, start + length)
3b8992077d8f7fd3ed8851258fb7bd6c7bb10928
692,113
def get_fonts(): """ gets fonts """ fonts_dict = {} fonts_dict['listbox_font'] = ('Courier', 14)#, 'roman bold') return fonts_dict
4647ed8f05ff8de355316f250bdc991d2e08e912
692,114
def matches(top, symbol): """ checks if the last found symbol matches with the stack top element """ openingSymbols = "({[" closingSymbols = ")}]" return openingSymbols.index(top) == closingSymbols.index(symbol)
c3653c3bf28ff0f5ca19fa79ee67cca0d0f24df4
692,115
def ben_type(exp): """ Given a bencoded expression, returns what type it is. """ if exp[0] == "i": return int elif exp[0].isdigit(): return str elif exp[0] == "l": return list elif exp[0] == "d": return dict
bdb79435145baad9de9a1740aae8e6a880ae2d5c
692,116
def apply_to(d, f): """ Apply a function to dictionary-like object values, recursively. """ for k in d: if isinstance(d[k], dict): d[k] = apply_to(d.get(k), f) else: d[k] = f(d[k]) return d
59e69643730971e5c904166d21c68fba4b2128f0
692,117
def get_producer_map(ssa): """ Return dict from versioned blob to (i, j), where i is index of producer op, j is the index of output of that op. """ producer_map = {} for i in range(len(ssa)): outputs = ssa[i][1] for j, outp in enumerate(outputs): producer_map[outp] = (i, j) return producer_map
746b218cd406bbcc3a25924cf86b5ba4d93d1e85
692,118
import sys import functools def with_content(content, **kwargs): """ Always use a bare 'python' in the *content* string. It will be replaced with ``sys.executable``. """ if 'python' in content: # XXX: This probably breaks if there are spaces in sys.executable. content = content.replace('python', sys.executable) def factory(f): @functools.wraps(f) def w(self): self.document_content = content if kwargs: if 'ignore_warnings' in kwargs: getattr(self, 'ignore_warnings') self.ignore_warnings = kwargs.pop("ignore_warnings") getattr(self, 'confoverrides') self.confoverrides = kwargs f(self) return w return factory
85d68c4cdf0c811fdca998f3d353187bef42387e
692,119
import uuid def uniqueFile(): """Generate a unique file name based upon a random UUID Returns: str: Random UUID """ return str(uuid.uuid4())
cc7096cebedadf27dd7aec63c54d572a52b51299
692,121
from typing import Dict from typing import Tuple import collections def getBinsFromGenomeSize( genome_dict: Dict[str, int], bin_size: int ) -> Dict[Tuple[str, int, int], int]: """Create a dictionary contains all bins of the same size across the genome Attributes: binSize: bin size (i.e. 5000) genomeDict: a dictionary contains chromosome sizes Return: A dictionary contains all bins and its index (start from 1) """ bin_dict = collections.OrderedDict() i = 1 for _chrom in genome_dict: for _start in range(1, genome_dict[_chrom], bin_size): _end = min(_start + bin_size - 1, genome_dict[_chrom]) _binId = (_chrom, _start, _end) bin_dict[_binId] = i i = i + 1 return bin_dict
b05ba53f77359ebbb27d210e398f050b3cbd4fb0
692,122
def check_multi_location(alignment, tags, log=None): """ See if the read was mapped at multiple locations. if so, it returns True and can be counted in the optional log :param alignment: the read :param tags: alignment tags as dict :return: """ if 'XA' in tags: alignment.is_qcfail = True if log: log.multi(alignment) return True else: return False
e8989ee5b5f60b79dbb2780b8c5505c530080929
692,123
from math import factorial def permutations(n, k): """ nPk >>> permutations(52, 2) 2652 """ return factorial(n) // factorial(n - k)
fa9eca7c0cc561309a6814cfa264472b25a98096
692,124
def sign2binary(y, zero_as_plus=False): """ Convert signs {-x,x} -> binary values {0,1} Parameters ---------- y: np.array (n,c) float/int (-inf,inf) zero_as_plus: bool if True, convert 0 -> 1, else 0 -> 0 Returns ------- np.array (n,c) int {0,1} """ if zero_as_plus: return (y >= 0).astype(int) else: return (y > 0).astype(int)
8433e885af244306b43b23318d4c1345732f58ea
692,125
def regions(): """ Reurns a list of regions # Returns: list, str # Example: from datahub_core import data regions = data.regions print(regions) >> ['NAM', 'EMEA', 'LATAM', 'APAC' ] """ return ['NAM', 'EMEA', 'LATAM', 'APAC']
26ea18dd0346b2b0c5fea4842ad37c51aea73f7b
692,126
import os def makedir(save): """ Create required directory if not exists Args: population: population which will be saved in save function file_name: file created inside directory """ def wrapper(*args): directory = os.path.join(os.getcwd(), 'datasets') if not os.path.exists(directory): os.makedirs(directory) save(*args) return wrapper
d967e7387cac6cc0ad2c7253e0be634b6f0172c0
692,127
import textwrap def word_wrap_tree(parented_tree, width=0): """line-wrap an NLTK ParentedTree for pretty-printing""" if width != 0: for i, leaf_text in enumerate(parented_tree.leaves()): dedented_text = textwrap.dedent(leaf_text).strip() parented_tree[parented_tree.leaf_treeposition(i)] = textwrap.fill(dedented_text, width=width) return parented_tree
299f715278056ea3c725eb5c7e95e6690609f466
692,128
import sys def HST_info(info_file): """Read the given info file and returns a dictionary containing the data size and type. .. note:: The first line of the file must begin by ! PyHST or directly by NUM_X. Also note that if the data type is not specified, it will not be present in the dictionary. :param str info_file: path to the ascii file to read. :return: a dictionary with the values for x_dim, y_dim, z_dim and data_type if needed. """ info_values = {} f = open(info_file, 'r') # the first line must contain PyHST or NUM_X line = f.readline() if line.startswith('! PyHST'): # read an extra line line = f.readline() elif line.startswith('NUM_X'): pass else: sys.exit('The file does not seem to be a PyHST info file') info_values['x_dim'] = int(line.split()[2]) info_values['y_dim'] = int(f.readline().split()[2]) info_values['z_dim'] = int(f.readline().split()[2]) try: info_values['data_type'] = f.readline().split()[2] except IndexError: pass return info_values
596b5ed67871ace5ed3628f4a8c9a36588447347
692,129
def read_pytorch_network_parameters_4_loihi(network, use_learn_encoder=True): """ Read parameters from pytorch network :param network: pytorch popsan :param use_learn_encoder: if true read from network with learn encoder :return: encoder mean and var, hidden layer weights and biases, decoder weights and biases """ if use_learn_encoder: encoder_mean = network.encoder.mean.data.numpy() encoder_var = network.encoder.std.data.numpy() encoder_var = encoder_var**2 else: encoder_mean = network.encoder.mean.numpy() encoder_var = network.encoder.var layer_weights, layer_bias = [], [] for i, fc in enumerate(network.snn.hidden_layers, 0): tmp_weights = fc.weight.data.numpy() tmp_bias = fc.bias.data.numpy() layer_weights.append(tmp_weights) layer_bias.append(tmp_bias) layer_weights.append(network.snn.out_pop_layer.weight.data.numpy()) layer_bias.append(network.snn.out_pop_layer.bias.data.numpy()) decoder_weights = network.decoder.decoder.weight.data.numpy() decoder_bias = network.decoder.decoder.bias.data.numpy() return encoder_mean, encoder_var, layer_weights, layer_bias, decoder_weights, decoder_bias
f1ead4e5648cdad46774085e0357bc8785c015b0
692,130
def build_collection_representation(model, description): """Enclose collection description into a type-describing block.""" # pylint: disable=protected-access collection = { model.__name__: description, "selfLink": None, # not implemented yet } return collection
d0852c0b07f1cb410a94c090dafc9d3048e28d95
692,131
def weight(T): """Specify the weight for tanimoto sampling. Parameters ---------- T : float Tanimoto similarity score between 0 and 1 Returns ------- Foat New value for sampling """ return T**4
d115657620055c039767a4f283fd958dac47f1b8
692,132
def int2str(index): """Convert well index into a human readable address.""" row = u'ABCDEFGH'[index / 12] col = (index % 12) + 1 return u'%s%s' % (row, col)
9af97ae624ee2aa67f01e22da35018b823fe3bed
692,133
import os def _get_filename(path): """Construct a usable filename for outputs""" dname = "." fname = "bootchart" if path != None: if os.path.isdir(path): dname = path else: fname = path return os.path.join(dname, fname)
7cf258e9f53289234b74b70919ffb6d41e58fe4b
692,134
def compression_type_of_files(files): """Return GZIP or None for the compression type of the files.""" return 'GZIP' if all(f.endswith('.gz') for f in files) else None
d8c471829d277094eb804f28b02692f8b707ca7b
692,135
def add_missing_filter_band( datamodel ): """ Ensure the given data model contains FILTER, CHANNEL and BAND keywords. :Parameters: datamodel: MiriDataModel The calibration data model whose metadata is to be updated. :Returns: nreplaced: int The number of metadata keywords replaced (0, 1, 2 or 3). """ nreplaced = 0 if hasattr(datamodel, 'meta') and hasattr(datamodel.meta, 'instrument'): # Replace any missing filter, channel or band attribute with 'N/A' if datamodel.meta.instrument.filter is None: datamodel.meta.instrument.filter = 'N/A' nreplaced += 1 if datamodel.meta.instrument.channel is None: datamodel.meta.instrument.channel = 'N/A' nreplaced += 1 if datamodel.meta.instrument.band is None: datamodel.meta.instrument.band = 'N/A' nreplaced += 1 else: strg = "MIRI instrument metadata attributes missing from data model %s" % \ datamodel.__class__.__name_ raise TypeError(strg) return nreplaced
3cd6d0c94f6a87580f118900c60fd6248a853040
692,136
import requests import json def _query_ID_converter(ext_id): """ Converts ePMC ext_id into PMID , API description here - https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/ Parameters ---------- ext_id : String ePMC identifier used to retrieve the relevant entry. Format is prefix of 'PMC' followed by an integer. Returns ------- response_json : dict json returned by the API containing the relevant information. Can be passed to :func:`~pyre.convert_PMCID_to_PMID` See Also -------- * :func:`~pyre.convert_PMCID_to_PMID` """ service_root_url = "https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?ids=" request_url = service_root_url + ext_id fmt = "json" request_url = request_url + r"&format=" + fmt tool = "pyresid" request_url = request_url + r"&tool=" + tool email = "robert.firth@stfc.ac.uk" request_url = request_url + r"&email=" + email r = requests.get(request_url) response_json = json.loads(r.text) return response_json
c7e19dbe162cdcb22d51e12013f3645f8baf67d1
692,138
import yaml def parse_str(string: str): """Parse using yaml for true, false, etc.""" try: string = yaml.safe_load(string) except Exception: pass return string
37ee0d69fc0b28abd2ba71a81522447efe3c4dbc
692,139
import pkg_resources def get_data_file_path(rel_path): """ Get the path to a data file. Normally these can be found in `openff/cli/data/` but are likely hidden somewhere else in site-packages after installation. Parameters ---------- file_path : str Name of the file to find the full path of """ full_path = pkg_resources.resource_filename("openff.cli", "data/" + rel_path) return full_path
d76eac8d45b23973f1ae2b97937e3bd740fe17d9
692,140
def dict_to_title(argmap): """ Converts a map of the relevant args to a title. """ # python 3, this will be sorted and deterministic. print(argmap) return "_".join([k + "=" + v for k, v in argmap.items() if k != "cmd"])
67d1a6086bd7e773aefb874c8418082f0ae4d8a2
692,141
def loadData(filename): """从文件中读取数据。""" dataSet = [] id2country = [] # 将索引对应到国家名 with open(filename) as fr: for i, line in enumerate(fr.readlines()): curLine = line.strip().split(' ') fltLine = list(map(int, curLine[1:])) # 去掉第一列国家名 dataSet.append(fltLine) id2country.append(curLine[0]) return dataSet, id2country
a2216280fed29f3c74a7c65d51ab7e9e5f8911df
692,142
def do_secrets_conflict(a: dict, b: dict) -> bool: """Check whether secrets in two dicts returned by get_secrets() conflict. :return: True if secrets conflict, False otherwise. :rtype: bool """ for key in a: if key in b and a[key]["name"] != b[key]["name"]: return True return False
609d078f64eb612400e3082d15c0854a312a9863
692,143
def get_table(content): """Get header (list) and body (2d list) from input string""" content = content.replace('\r', '') rows = [row.strip().split('\t') for row in content.split('\n')] header = rows[0] header_length = len(header) body = [row[:header_length] for row in rows[1:] if len(row) >= header_length] return header, body
5990e218ad9b9bf28f23af082a8be4f48c6bc9ea
692,144
def cagr(dff): """calculate Compound Annual Growth Rate for a series and returns a formated string""" start_bal = dff.iat[0] end_bal = dff.iat[-1] planning_time = len(dff) - 1 cagr_result = ((end_bal / start_bal) ** (1 / planning_time)) - 1 return f"{cagr_result:.1%}"
8e90a0a4feb34281c55657a39217707a5a8b8f43
692,145
def in_polygon_np(vertices, point, border_value=True): """ Return True/False is a pixel is inside a polygon. @param vertices: numpy ndarray Nx2 @param point: 2-tuple of integers or list @param border_value: boolean Numpy implementation. """ counter = 0 px, py = point[0], point[1] nvert = len(vertices) polypoint1x, polypoint1y = vertices[-1] for i in range(nvert): if (polypoint1x == px) and (polypoint1y == py): return border_value polypoint2x, polypoint2y = vertices[i] if (py > min(polypoint1y, polypoint2y)): if (py <= max(polypoint1y, polypoint2y)): if (px <= max(polypoint1x, polypoint2x)): if (polypoint1y != polypoint2y): xinters = (py - polypoint1y) * (polypoint2x - polypoint1x) / (polypoint2y - polypoint1y) + polypoint1x if (polypoint1x == polypoint2x) or (px <= xinters): counter += 1 polypoint1x, polypoint1y = polypoint2x, polypoint2y if counter % 2 == 0: return False else: return True
eb493afa5efec938299c7c1e83c0190f63ef1eee
692,146
from datetime import datetime def timestamp_now(): """Return UTC timestamp string in format: yyyy/mm/dd-hh/mm/ss""" return datetime.utcnow().strftime("%y%m%d-%H%M%S")
08266425bf223382170dbea6edfd3330abc21e51
692,148
def measure_distance(cell1, cell2): """ This function maps distances in a cartesian plane of size 10X10 to a torus of the same size and then measures the Euclidean distance on the torus. """ x1, y1 = cell1.location x2, y2 = cell2.location x_dist = abs(x1-x2) y_dist = abs(y1-y2) if x_dist > 5: x_dist = 10-x_dist if y_dist > 5: y_dist = 10-y_dist return (x_dist**2 + y_dist**2)**.5
1c57dca72fcca90ad40c4099ee815feb227da3b3
692,149
def home(request): """The default home view of Websauna. You should really never see this is, as this view should be only active during Websauna test run and dev server. """ return {}
52e76d380148737fa62787276edc65cfc366aa9b
692,150
import math def pol2cart(rho, phi): """ Convert from polar (rho, phi) to cartesian (x, y) coordinates. phi is in degrees. """ x = rho * math.cos(math.radians(phi)) y = rho * math.sin(math.radians(phi)) return(x, y)
f8d3c941ed936e20f1c33c9e81888a55b2b4a4ea
692,151
from typing import Any import json def compact_json(obj: Any) -> str: """Encode into JSON, but in a more compacted form.""" return json.dumps(obj, separators=(",", ":"))
0d0ad626eabea97e547f5181a083ba0646c4d83b
692,152
def compute_number(attributes): """ Compute the number of a mention. Args: attributes (dict(str, object)): Attributes of the mention, must contain values for "type", "head_index" and "pos". Returns: str: the number of the mention -- one of UNKNOWN, SINGULAR and PLURAL. """ number = "UNKNOWN" head_index = attributes["head_index"] pos = attributes["pos"][head_index] if attributes["type"] == "PRO": if attributes["citation_form"] in ["i", "you", "he", "she", "it"]: number = "SINGULAR" else: number = "PLURAL" elif attributes["type"] == "DEM": if attributes["head"][0].lower() in ["this", "that"]: number = "SINGULAR" else: number = "PLURAL" elif attributes["type"] in ["NOM", "NAM"]: if pos == "NNS" or pos == "NNPS": number = "PLURAL" else: number = "SINGULAR" if pos == "CC": number = "PLURAL" return number
3259735c01d6900f04526a08ddd6467e7a380627
692,153
def check_buffering_complete(torrent_client, params=None): """ Check if buffering is complete :return: bool - buffering status """ return torrent_client.is_buffering_complete
d18e8c247c2ff98f5440dd0c361b67d36ff16ce8
692,154
import torch def decode(loc, dbox_list): """ オフセット情報を使い、DBoxをBBoxに変換する。 Parameters ---------- loc: [8732,4] SSDモデルで推論するオフセット情報。 dbox_list: [8732,4] DBoxの情報 Returns ------- boxes : [xmin, ymin, xmax, ymax] BBoxの情報 """ # DBoxは[cx, cy, width, height]で格納されている # locも[Δcx, Δcy, Δwidth, Δheight]で格納されている # オフセット情報からBBoxを求める boxes = torch.cat(( dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:], dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1) # boxesのサイズはtorch.Size([8732, 4])となります # BBoxの座標情報を[cx, cy, width, height]から[xmin, ymin, xmax, ymax] に boxes[:, :2] -= boxes[:, 2:] / 2 # 座標(xmin,ymin)へ変換 boxes[:, 2:] += boxes[:, :2] # 座標(xmax,ymax)へ変換 return boxes
1083cc832aa3d3403d34314155e1d1086f0d05ff
692,155
def safe_is_subclass(subclass, superclass) -> bool: """ A clone of :func:`issubclass` that returns ``False`` instead of throwing a :exc:`TypeError`. .. versionadded:: 1.2 """ try: return issubclass(subclass, superclass) except TypeError: return False
b574d5a6d4ce871c420e64562260ed3011f74cb3
692,156
def _seg_to_vcf(vals): """Convert GATK CNV calls seg output to a VCF line. """ call_to_cn = {"+": 3, "-": 1} call_to_type = {"+": "DUP", "-": "DEL"} if vals["CALL"] not in ["0"]: info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"], "PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"], "SVTYPE=%s" % call_to_type[vals["CALL"]], "SVLEN=%s" % (int(vals["END"]) - int(vals["START"])), "END=%s" % vals["END"], "CN=%s" % call_to_cn[vals["CALL"]]] return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".", ".", ";".join(info), "GT", "0/1"]
ae096b33a90d9e1875bb5795b538937698d7622d
692,157
def countLines(filePath: str): """Count the number of lines in a text file""" return sum(1 for _ in open(filePath))
1f860194812989518fa91bfcd8a61e9cfb09420c
692,158
def search(): """asks for criterion by which search should be performed and returns it (as string)""" criterions = ["name", "description", "type", "difficulty", "terrain", "size", "downloaddate", "available", "attribute", "distance"] print("\nWonach willst du suchen?") print("1: Name") print("2: Beschreibung") print("3: Cache-Typ") print("4: D-Wertung") print("5: T-Wertung") print("6: Groesse") print("7: Download-Datum") print("8: Verfuegbarkeit") print("9: Attribut") print("10: Abstand von einer bestimmten Position (Koordinaten erforderlich)") inp = input(">> ") if inp == "0": print("Ungueltige Eingabe") else: try: return criterions[int(inp)-1] except IndexError: print("Ungueltige Eingabe") except ValueError: print("Ungueltige Eingabe")
5e71754856350582d922fbd243e7e4e4336b91b4
692,159
def Q_deph(P_mass, r_dist, R): """ Calculates the heat load of dephlegmator. Parameters ---------- P_mass : float The mass flow rate of dist , [kg/s] R : float The reflux number [dimensionless] r_dist : float The heat vaporazation of dist, [J/kg] Returns ------- Q_deph : float The heat load of dephlegmator, [W] , [J/s] References ---------- Дытнерский, формула 2.2, стр.45 """ return P_mass * (R + 1) * r_dist
ac8dc09d6b0a7513e32c47b4334ba4876de52daf
692,161
import re def transliterate(string): """ Функция транслитиризации. :param string: Строковое значение на русском языке :type string: String :return: Строковое значение в ANSII :rtype: String """ capital_letters = {u'А': u'A', u'Б': u'B', u'В': u'V', u'Г': u'G', u'Д': u'D', u'Е': u'E', u'Ё': u'E', u'З': u'Z', u'И': u'I', u'Й': u'Y', u'К': u'K', u'Л': u'L', u'М': u'M', u'Н': u'N', u'О': u'O', u'П': u'P', u'Р': u'R', u'С': u'S', u'Т': u'T', u'У': u'U', u'Ф': u'F', u'Х': u'H', u'Ъ': u'', u'Ы': u'Y', u'Ь': u'', u'Э': u'E', } capital_letters_transliterated_to_multiple_letters = {u'Ж': u'Zh', u'Ц': u'Ts', u'Ч': u'Ch', u'Ш': u'Sh', u'Щ': u'Sch', u'Ю': u'Yu', u'Я': u'Ya', } lower_case_letters = { u'а': u'a', u'б': u'b', u'в': u'v', u'г': u'g', u'д': u'd', u'е': u'e', u'ё': u'e', u'ж': u'zh', u'з': u'z', u'и': u'i', u'й': u'y', u'к': u'k', u'л': u'l', u'м': u'm', u'н': u'n', u'о': u'o', u'п': u'p', u'р': u'r', u'с': u's', u'т': u't', u'у': u'u', u'ф': u'f', u'х': u'h', u'ц': u'ts', u'ч': u'ch', u'ш': u'sh', u'щ': u'sch', u'ъ': u'', u'ы': u'y', u'ь': u'', u'э': u'e', u'ю': u'yu', u'я': u'ya', } for cyrillic_string, latin_string in iter(capital_letters_transliterated_to_multiple_letters.items()): string = re.sub("%s([а-я])" % cyrillic_string, '%s\1' % latin_string, string) for dictionary in (capital_letters, lower_case_letters): for cyrillic_string, latin_string in iter(dictionary.items()): string = re.sub(cyrillic_string, latin_string, string) for cyrillic_string, latin_string in iter(capital_letters_transliterated_to_multiple_letters.items()): string = re.sub(cyrillic_string, latin_string.upper(), string) return string
a505eb6c9ba73fc1c5bcb97814e4fe5dbe3495ea
692,162
def get_image(track_data_list, index_mapping, index): """ Returns an image from a series of track directories. Inputs: track_data_list -- List of TrackData objects. index_mapping -- total num images by 2 array, with each row containing a directory index and the image indices for that directory only. index -- Single index, less than total number of images in all directories. """ dir_index, orig_index = index_mapping[index] return track_data_list[dir_index].detection_image(orig_index)
1fd143e1ae83a6a5dc09cd2791f3cd77717b0e52
692,163
import hashlib import json def subscribe_sqs_queue(sns_client, topic_arn, queueobj): """ Subscribe an SQS queue to a topic. This is convenience method that handles most of the complexity involved in using an SQS queue as an endpoint for an SNS topic. To achieve this the following operations are performed: * The correct ARN is constructed for the SQS queue and that ARN is then subscribed to the topic. * A JSON policy document is contructed that grants permission to the SNS topic to send messages to the SQS queue. * This JSON policy is then associated with the SQS queue using the queue's set_attribute method. If the queue already has a policy associated with it, this process will add a Statement to that policy. If no policy exists, a new policy will be created. :type topic_arn: string :param topic_arn: The ARN of the new topic. :type queueobj: A boto3 SQS Queue object :param queueobj: The queue object you wish to subscribe to the SNS Topic. """ #q_arn = queue.arn q_arn = queueobj.attributes['QueueArn'] # t = queue.id.split('/') # '/512686554592/exp-workflow-starter-queue' => exp-workflow-starter-queue # this is the boto3 equivalent, but `t` is unused # t = q_arn.rsplit(':', 1)[-1] # arn:aws:sqs:us-east-1:512686554592:exp-workflow-starter-queue => exp-workflow-starter-queue sid = hashlib.md5((topic_arn + q_arn).encode('utf-8')).hexdigest() sid_exists = False # resp = sns_`client.subscribe(topic_arn, 'sqs', q_arn) resp = sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=q_arn) #attr = queue.get_attributes('Policy') # if 'Policy' in attr: # policy = json.loads(attr['Policy']) # else: # policy = {} policy = queueobj.attributes.get('Policy', {}) if policy: policy = json.loads(policy) if 'Version' not in policy: policy['Version'] = '2008-10-17' if 'Statement' not in policy: policy['Statement'] = [] # See if a Statement with the Sid exists already. for s in policy['Statement']: if s['Sid'] == sid: sid_exists = True if not sid_exists: statement = {'Action': 'SQS:SendMessage', 'Effect': 'Allow', 'Principal': {'AWS': '*'}, 'Resource': q_arn, 'Sid': sid, 'Condition': {'StringLike': {'aws:SourceArn': topic_arn}}} policy['Statement'].append(statement) #queue.set_attribute('Policy', json.dumps(policy)) queueobj.set_attributes(Attributes={'Policy': json.dumps(policy)}) return resp
aa3210dc908e2ec334154de40c1f944785dc0496
692,165
import os import time from datetime import datetime def time_to_montreal(fname=None, timezone='US/Eastern'): """Get time in Montreal zone. Returns ------- str Current date at the selected timezone in string format """ # Get time os.environ['TZ'] = timezone time.tzset() if fname: tstamp = os.path.getctime(fname) else: tstamp = time.time() time_str = datetime.fromtimestamp(tstamp).strftime('%I:%M %p (%b %d)') return time_str
f4ab53237aa7be56cf38741903a4a8250b983a49
692,166
import math def get_number_format(number_of_pages) -> str: """ Get the correct number formatting for pdftoppm's output numbering. E.g. a file with 10-99 pages will have output images named '01.png, 02.png, 03.png'; a file with 100-999 pages will output '001.png, 002.png, 003.png'. We need to use the same formatting for reading the images back in (and scanning them) :param number_of_pages: The total number of pages :return: A format string (e.g. '{:03}') to use for formatting these page numbers into filenames """ formatted_number_length = int(math.log10(number_of_pages)) + 1 return "{:0" + str(formatted_number_length) + "}"
db8f3c205e9763566e20c6ae57fbd34cfc04d42f
692,167
def merge_dict(dict1, dict2): """Merge two dicts together into single Dict.""" for key, val in dict1.items(): if type(val) == dict: if key in dict2 and type(dict2[key] == dict): merge_dict(dict1[key], dict2[key]) else: if key in dict2: dict1[key] = dict2[key] for key, val in dict2.items(): if key not in dict1: dict1[key] = val return dict1
24a6a552170b37bd5cbbd03919d4eff2e8461b47
692,168
def getKeyValueFromDict(dictToSearch, searchKey): """ Takes a dict with nested lists and dicts, and searches all dicts for a key of the field provided. Returns the value for the searched key. """ return_value = "" matched = "false" for key, value in dictToSearch.items(): if key == searchKey: return_value = value matched = "true" break elif isinstance(value, dict): return_value_1, matched_1 = getKeyValueFromDict(value, searchKey) if matched_1 == "true": matched = matched_1 return_value = return_value_1 break elif isinstance(value, list): for i in range (len(value)): if isinstance(value[i], dict): return_value_2, matched_2 = getKeyValueFromDict(value[i], searchKey) if matched_2 == "true": matched = matched_2 return_value = return_value_2 break return return_value, matched
3247d16e23a978f1125f649567b54e785f859ac5
692,169
import requests import os def lookup_github_full_name(gh_username): """ Retrieve a github user's full name by username. """ url = 'https://api.github.com/users/{}'.format(gh_username) request = requests.get(url, auth=(os.environ.get('GITHUB_API_USER', ''), os.environ.get('GITHUB_API_TOKEN', ''))) user = request.json() return user.get('name', '')
60ea709ec514c9f94c53743eb2e65f4c9a69c017
692,170
def switch_inbox(conn_, inbox_, **kwargs): """ Switch the IMAP connection to a different inbox (= inbox folder) """ conn_.select(inbox_) return conn_
5fb457a3e15c072590609fcb15bf46182aee375d
692,171
import argparse import textwrap def parseInput(): """Use argparse to handle user input for program""" # Create a parser object parser = argparse.ArgumentParser( prog="DemultiplexFlncFasta.py", formatter_class=argparse.RawDescriptionHelpFormatter, description="""Custom demultiplex script for the 'isoseq_flnc.fasta' Fastas are output to a new folder called 'demultiplexed' in the 'isoseq_flnc.fasta' directory.""") # Parse the polyA or the noPolyA groups parser.add_argument('--ignorePolyA', help="", action='store_true') parser.add_argument("-v", "--version", action="version", version=textwrap.dedent("""\ %(prog)s ----------------------- Version: 0.3 Updated: 08/29/2016 By: Prech Uapinyoying Website: https://github.com/puapinyoying""")) # Essential args requiredNamed = parser.add_argument_group('required named arguments') requiredNamed.add_argument('--resultsRoot', help="", required=True) requiredNamed.add_argument('--size', help="", required=True) args = parser.parse_args() return args
648ac31e9a27b42a74acde1f99d8725e18586fb8
692,173
import logging def getLogger(name): """ 用于替代 logging.getLogger :param name: :return: """ return logging.getLogger(name)
609968d6e16b65d41d4431cae5dc05818280d3c1
692,174