content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def isalnum(text): """ Checks if all characters in ``text`` are alphanumeric and there is at least one character A character c is alphanumeric if one of the following returns True: :func:`isalpha`, :func:`isdecimal`,:func:`isdigit`, or, :func:`isnumeric`. :param text: The string to check :type text: ``str`` :return: True if all characters in ``text`` are alphanumeric and there is at least one character, False otherwise. :rtype: ``bool`` """ assert isinstance(text,str), '%s is not a string' % text return text.isalnum()
bf63dc89522398e8c8a4d91b39dbdb37d61edc28
692,831
def match_application_commands_to_commands(application_commands, commands, match_schema): """ Matches the given application commands to slash commands. Parameters ---------- application_commands : `list` of ``ApplicationCommand`` Received application commands. commands : `None` or `list` of ``SlashCommand`` A list of slash commands if any. match_schema : `bool` Whether schema or just name should be matched. Returns ------- commands : `None` or `list` of ``SlashCommand`` The remaining matched commands. matched : `None` or `list` of `tuple` (``ApplicationCommand``, ``SlashCommand`) The matched commands in pairs. """ matched = None if (commands is not None): for application_command_index in reversed(range(len(application_commands))): application_command = application_commands[application_command_index] application_command_name = application_command.name for command_index in reversed(range(len(commands))): command = commands[command_index] if command.name != application_command_name: continue if match_schema: if (command.get_schema() != application_command): continue del application_commands[application_command_index] del commands[command_index] if matched is None: matched = [] matched.append((application_command, command)) if not commands: commands = None return commands, matched
fe5e97bf3e3560e5fbb80161ce171d3041e9cd88
692,832
import os def sibpath(path, sibling): """ Return the path to a sibling of a file in the filesystem. This is useful in conjunction with the special C{__file__} attribute that Python provides for modules, so modules can load associated resource files. """ return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
d18aa8f558df697485e63c9ac87318f5bbbc0e19
692,833
def sdp_content(f, K): """Returns GCD of coefficients in `K[X]`. """ if K.has_Field: return K.one else: cont = K.zero for _, c in f: cont = K.gcd(cont, c) if K.is_one(cont): break return cont
abf8bbf9e23a884185f0a64750d284aa611515cb
692,834
def check_old_policy(policy): """ Checks the validity of a single policy using the rules from part 1 of day 2. """ letter_count = policy["passwd"].count(policy["letter"]) result = dict(policy) result["valid"] = policy["low"] <= letter_count <= policy["high"] return result
07d755be3ad71b342381d6d611a33325eecd3fd6
692,836
def action_parse(action): """ Internal method to parse and validate actions, which are required for some methods like toggle_activist_code() and toggle_volunteer_action() """ action = action.capitalize() if action not in ('Apply', 'Remove'): raise ValueError("Action must be either 'Apply' or 'Remove'") return action
cbf630d42c34e9750773618b3b6692dc015ee369
692,838
def figure_image_adjustment(fig, img_size): """ adjust figure as nice image without axis :param fig: :param (int, int) img_size: :return: >>> fig = figure_image_adjustment(plt.figure(), (150, 200)) >>> isinstance(fig, matplotlib.figure.Figure) True """ ax = fig.gca() ax.set_xlim([0, img_size[1]]) ax.set_ylim([img_size[0], 0]) ax.axis('off') ax.axes.get_xaxis().set_ticklabels([]) ax.axes.get_yaxis().set_ticklabels([]) fig.tight_layout(pad=0) fig.subplots_adjust(left=0, right=1, top=1, bottom=0) return fig
e026242ea1ef7cd760645a6cbec8e7b414e31d8b
692,839
def find_best(strings, criteria): """ Parse a list of `strings` and return the "best" element based on `criteria`. :param strings: List of string where best will be found based on `criteria`. :type strings: List[str] :param criteria: '>'-separated substrings sought in descending order. '+' is a logical 'and', '=' substitutes: `A=B` returns B if A is found. :type criteria: str :return: Best string based on given `criteria` or `` if no best found. :rtype: str """ criterion, _, further_criteria = criteria.partition('>') wanted = criterion.partition('=')[0].split('+') if all(w in strings or w is '' for w in wanted): return criterion.rpartition('=')[2] else: return find_best(strings, further_criteria)
283c7e64f82faeced23eaafd3d5a2853302874e0
692,840
import random def verb(): """Calculate a random verb declension.""" return [random.randrange(3), random.randrange(6)]
30a580953fc41b925aedd5ea766451472623bfca
692,841
import logging def replaceHTMLBlock(html, commentIdentifier, newContent): """ Replaces html content in block of <!-- commentIdentifier -->Old content<!-- end of commentIdentifier --> by new value. :param html: source html containing section(s) to be replaced :param commentIdentifier: identifier of section to be replaced :param newContent: new content of identified section :return: resulting html >>> html = "<html><body><h1>Title</h1><p><!-- content -->Here should be page content<!-- end of content --></p></body></html>" >>> html = replaceHTMLBlock(html, "content", "My content of page.") >>> print html <html><body><h1>Title</h1><p>My content of page.</p></body></html> """ commentIdentifier = commentIdentifier.strip() startId = ("<!-- %s -->" % commentIdentifier).upper() endId = ("<!-- END OF %s -->" % commentIdentifier).upper() while html.upper().find(startId) >= 0: upperCase = html.upper() startPos = upperCase.find(startId) endPos = upperCase.find(endId) if endPos < 0: logging.error("replaceHTMLBlock endPos(%d) < 0" % (endPos)) return html endCutPos = upperCase.find("-->", endPos) + 3 if endCutPos < 3: return html if startPos>=0 and endCutPos>=0: html = html[:startPos] + newContent + html[endCutPos:] return html
133e31bad4ef6d743ff5a078394a4ed6b87df862
692,842
def returnsides(short_cathet): """ >>> returnsides(1) (2, 1.73) >>> returnsides(2) (4, 3.46) >>> returnsides(3) (6, 5.2) """ return 2*short_cathet, round((short_cathet**2 + 2*short_cathet**2)**(1/2),2)
11a737f5820bfcf38556dc241e3eda7267ff522f
692,843
def conv(filter_value): """ Convert * to % got from clients for the database queries. """ if filter_value is None: return '%' return filter_value.replace('*', '%')
8decf962efc3234501b2503f341a1d29720d361f
692,844
def script_tags(soup): """grab tags with json data from soupified page""" tags = soup.find_all('script', attrs={'type': 'application/json'}) return tags
8e96138024648305a4054acb03d859d2e81daeb9
692,845
import functools import unicodedata def uni_nornal(func): """ 替换`\xa0`为space """ @functools.wraps(func) def wrapper(*args, **kwargs): res = func(*args, **kwargs) return unicodedata.normalize('NFKD', res) return wrapper
0440cbc968de50bea597cff21392904bdfaa9415
692,846
def load_urls_from_text(text): """Load urls from text, one per line, ignore lines with #, ignores duplicity""" urls = set() lines = text.split('\n') for line in lines: # Ignore all white characters url = line.strip() # Take url only if is not commented if not line.startswith("#") and (url != '') and (url is not None): urls.add(url) return urls
eecebeb8c298f6a5d07c1e4233d29032b247f756
692,847
def save(df, corpus): """Saves dataset with predicted senses to CSV file. Args: df: Dataframe with mutisense words and their contexts. corpus: Name of the original file. Returns: Path to saved CSV file with predicted senses. """ output_fpath = corpus + "_predictions.csv" df.to_csv(output_fpath, sep="\t", encoding="utf-8", index=False) print("Generated dataset: {}".format(output_fpath)) return output_fpath
c195d43654b3c789022f063e33ae12ee5092c984
692,848
def get_subject_from_components(components): """Return the certificate subject from components list. >>> components = [('C', 'FR'), ('ST', 'Ile-de-France'), ('L', 'Paris'), ... ('O', 'Test Ltd'), ('OU', 'Test'), ('CN', 'Alain Dupont'), ... ('emailAddress', 'alain.dupont@localhost')] >>> print get_subject_from_components(components) /C=FR/ST=Ile-de-France/L=Paris/O=Test Ltd/OU=Test/CN=Alain \ Dupont/emailAddress=alain.dupont@localhost """ return u'/' + u'/'.join(['%s=%s' % (a, b) for a, b in components])
4dea7e19759986fd8d3d7756e32df06e5476a173
692,850
def odd_numbers(): """Provides sequence of odd numbers.""" return 1, 7, 23, 10481, -1, -173, 597104865
c8e1d43b110ffc973d335bd27e5089ff45275295
692,851
def getmapname(number,prefix,day): """ generate a pcraster type mapname based on timestep and prefix :var number: number of the mape :var prefix: prefix for the map :return: Name """ #Name format: e.g. PM_NM_YEAR_DOY_HR mapname = prefix + '_NM_' + day.strftime('%Y') + '_' + day.strftime('%j') + '_' + day.strftime('%H') + '.tif' # #above_thousand = number / 1000 # mapname = str(prefix + '%0' + str(8-len(prefix)) + '.f.%03.f') % (above_thousand, doy) return mapname
f2f6f9b379950486f1eec274035100311efc76f0
692,852
def xcrun_env(ctx): """Returns the environment dictionary necessary to use xcrunwrapper.""" platform = ctx.fragments.apple.single_arch_platform action_env = ctx.fragments.apple.target_apple_env(platform) \ + ctx.fragments.apple.apple_host_system_env() return action_env
9854b08e547c567a7fa0ccacde2a4fb7fbb09ef9
692,853
import networkx as nx from collections import defaultdict def poly_graph(surf): """NetworkX undirected graph representing polygons of a Surface. """ edges = defaultdict(list) for ii,(a,b,c) in enumerate(surf.polys): edges[frozenset([a,b])].append(ii) edges[frozenset([a,c])].append(ii) edges[frozenset([b,c])].append(ii) #nedges = len(edges) #ii,jj = np.vstack(edges.values()).T #polymat = sparse.coo_matrix((np.ones((nedges,)), (ii, jj)), shape=[len(self.polys)]*2) polygraph = nx.Graph() polygraph.add_edges_from(((p[0], p[1], dict(verts=k)) for k,p in edges.items())) return polygraph
c05f1578200655ed6adca0d39ed74afe2f843e0c
692,854
def bytes_view(request): """ A simple test view that returns ASCII bytes. """ return b'<Response><Message>Hi!</Message></Response>'
19bac61604ba81a0f87640670f2993a56aee4d3f
692,855
import re def strip_ansi_sequences(text: str) -> str: """Strip ANSI sequences from the input text. :param text: text to sanitize :return: sanitized text """ return re.sub( r"(?:\x1B[@-Z\\-_]|" r"[\x80-\x9A\x9C-\x9F]|" r"(?:\x1B\[|\x9B)[0-?]*[ -/]*[@-~])", "", text, )
67c0c7c950f2ed52704e3302a0067856f2a3116e
692,856
def unconvert(class_id, width, height, x, y, w, h): """Converts the normalized positions into integer positions """ xmax = int((x*width) + (w * width)/2.0) xmin = int((x*width) - (w * width)/2.0) ymax = int((y*height) + (h * height)/2.0) ymin = int((y*height) - (h * height)/2.0) class_id = int(class_id) return (class_id, xmin, xmax, ymin, ymax)
1e811ad895c290e533997782eb9f40ab16d95bb2
692,857
def parse_relation(fields): """ Assumes all relation are binary, argument names are discarded :param fields: correspond to one Brat line seperated by tab :return: relation id, relation name, arg1 and arg2 """ rel, a1, a2 = fields[1].split(" ") rel_id = fields[0] return rel_id, rel, a1.split(":")[1], a2.split(":")[1]
9e86f45d571e7b3de2e64645209a5854f145330e
692,858
import re def pop_id_num(par): """ Split apart parameter prefix and ID number. """ # Spare us from using re.search if we can. if not (par.startswith('pop') or par.startswith('pq') or par.startswith('source')): return par, None # Look for integers within curly braces m = re.search(r"\{([0-9])\}", par) if m is None: # Look for integers within underscores m = re.search(r"\_([0-9])\_", par) if m is None: return par, None prefix = par.replace(m.group(0), '') return prefix, int(m.group(1))
6ebeae178a6d60d07da67f9b5485b2bc554d77af
692,859
def get_bind_args(run): """ Returns args available to template expansion for `run`. """ return { "run_id": run.run_id, "job_id": run.inst.job_id, **run.inst.args, }
9b454d408f732ea4c3b36ef486732a3e61721d52
692,860
def check_data(data): """ Check the *data* argument and make sure it's a tuple. If the data is a single array, return it as a tuple with a single element. This is the default format accepted and used by all gridders and processing functions. Examples -------- >>> check_data([1, 2, 3]) ([1, 2, 3],) >>> check_data(([1, 2], [3, 4])) ([1, 2], [3, 4]) """ if not isinstance(data, tuple): data = (data,) return data
23298eb4070eb0643b9dd75c51ac4f93ee525f0b
692,861
def ichno(coalg): """ Dual to prothesi, extending an anamorphism by exposing the current trace. """ def run(a, trace=()): fa = coalg(a, trace) return fa.map(lambda x: run(x, (fa,) + trace)) return run
989279b26bf1e1e3175da1eb9511dfb2904d890d
692,862
def spatpix_frame_to_ref(spatpix, frame='dms', subarray='SUBSTRIP256', oversample=1): """Convert spatpix from an arbitrary frame to nat coordinates in SUBSTRIP256. :param spatpix: spatpix coordinates in an arbitrary coordinate frame. :param frame: the input coordinate frame. :param subarray: the input coordinate subarray. :param oversample: the oversampling factor of the input coordinates. :type spatpix: array[float] :type frame: str :type subarray: str :type oversample: int :returns: spatpix_ref - the input coordinates transformed to nat coordinate frame and SUBSTRIP256 subarray. :rtype: array[float] """ if (frame == 'nat') & (subarray == 'SUBSTRIP256'): spatpix_ref = spatpix elif (frame == 'dms') & (subarray == 'SUBSTRIP256'): spatpix_ref = 255*oversample - spatpix elif (frame == 'sim') & (subarray == 'SUBSTRIP256'): spatpix_ref = spatpix elif (frame == 'nat') & (subarray == 'SUBSTRIP96'): spatpix_ref = spatpix + 150*oversample elif (frame == 'dms') & (subarray == 'SUBSTRIP96'): spatpix_ref = 245*oversample - spatpix elif (frame == 'sim') & (subarray == 'SUBSTRIP96'): spatpix_ref = spatpix + 150*oversample else: raise ValueError('Unknown coordinate frame or subarray: {} {}'.format(frame, subarray)) return spatpix_ref
495b0c0c50dfe52bbbdfeb6d3385c74211cddef1
692,863
def ref(takeoff, emergency=False): """ Basic behaviour of the drone: take-off/landing, emergency stop/reset Parameters: takeoff -- True: Takeoff / False: Land emergency -- True: Turn of the engines """ assert type(takeoff) == bool assert type(emergency) == bool return 0b10001010101000000000000000000 | (emergency << 8) | (takeoff << 9),
d66ebc7905a54d32dd21df651305e7a0288a273e
692,864
def decode(code): """ Convert seat code into binary and then into int. :param code: str containing position info :return: int ID corresponding to position code """ return int(code.replace('L', '0').replace('R', '1').replace('F', '0').replace('B', '1'), 2)
4ac5258983908382abaa075c5e9c7e0ce1d98903
692,865
def name(obj): """Try to find some reasonable filename for the obj.""" return (getattr(obj, 'filename', 0) or getattr(obj, '__name__', 0) or getattr(getattr(obj, '__class__', 0), '__name__', 0) or str(obj))
cfcbb671c64949e636cfeb0d3c80bc241522333f
692,866
import random def randomMultinomial(dist): """ @param dist: List of positive numbers summing to 1 representing a multinomial distribution over integers from 0 to C{len(dist)-1}. @returns: random draw from that distribution """ r = random.random() for i in range(len(dist)): r = r - dist[i] if r < 0.0: return i return "weird"
57c7b2aaa74cecc298f5bfb54f9d95d3589c1741
692,868
import re def sanitize_element(element): """ Eliminate some undeeded characters out of the XML snippet if they appear. :param element: element str :return: sanitized element str """ element = re.sub(r"\n\s+", "", element) element = re.sub(r"\n", "", element) return element
63c87e8972127bb4f85600c911f98464254cca68
692,869
import base64 def img2base64(filename): """Cadena de datos en base64 a partir de archivo de imagen""" with open(filename, "rb") as image_file: b64string = base64.b64encode(image_file.read()) return b64string.decode()
c229209aa5d1bff461dd2f9ca7db57270555ee19
692,870
def verify_integer(parser, arg, htzero): """ Verifies if the supplied column and increment are valid. """ try: arg = int(arg) except ValueError: if htzero: parser.error("--column must be an integer, 1 or higher") else: parser.error("--increment must be an integer") if htzero: if not arg >= 1: parser.error("--column must be an integer, 1 or higher") else: if not arg >= 0 or arg > 2844131327: parser.error("--increment must be an integer between 1 and " "2844131327") return arg
6b5fc40a2724a87bc5ef76f22afb422149c9f3e1
692,871
def negative_signature(b): """(...-)""" return -1
8e01b793327af62cbc360b554b4274f0e4bdf73d
692,872
import numpy def freq_from_autocorr(sig, fs): """ Estimate frequency using autocorrelation """ # Calculate autocorrelation and throw away the negative lags corr = numpy.correlate(sig, sig, mode='full') corr = corr[len(corr)//2:] # Find the first low point d = numpy.diff(corr) non_zero = numpy.nonzero(d > 0) if len(non_zero) == 0 or len(non_zero[0]) == 0: return 0 start = non_zero[0][0] # Find the next peak after the low point (other than 0 lag). This bit is # not reliable for long signals, due to the desired peak occurring between # samples, and other peaks appearing higher. # Should use a weighting function to de-emphasize the peaks at longer lags. peak = numpy.argmax(corr[start:]) + start return fs / peak
6a520b83523ef1d40212b74ccc8772b941e75041
692,873
def default_partition(key, nr_partitions, params): """Returns ``hash(str(key)) % nr_partitions``.""" return hash(str(key)) % nr_partitions
d49eea66c36779c6a17417e96551938a1fa6ee89
692,874
def get_port(socket): """Return the port to which a socket is bound.""" addr, port = socket.getsockname() return port
7618a44a28aa209922b257751e0b862917b1ea9c
692,875
def average_gate_error_to_rb_decay(gate_error: float, dimension: int): """ Inversion of eq. 5 of [RB]_ arxiv paper. :param gate_error: The average gate error. :param dimension: Dimension of the Hilbert space, 2^num_qubits :return: The RB decay corresponding to the gate_error """ return (gate_error - 1 + 1 / dimension) / (1 / dimension - 1)
91f58a420e08a9bc43a871db2a51c06ee4ad7756
692,876
def is_apex_available(): """ Check if apex is available with simple python imports. """ try: apex_available = True except ImportError: apex_available = False return apex_available
d4cdd323b9fa96f707445d3f15168b9d0fcac678
692,877
import logging def image_sanity_fail(image, shape, description): """ Sanity check on images: training and testing; shape needs to match. description affects the logging, on failure. """ if image is None: logging.error("{} : image is None".format(description)) return True elif image.shape != shape: logging.error("{} : shape is {}, (expecting {})".format( description, repr(image.shape), repr(shape))) return True else: return False
a7c795495e1f4766630d07599964fc9e08620816
692,878
def get_active_profile(content, key): """ Gets the active profile for the given key in the content's config object, or NONE_PROFILE """ try: if content.config.has_option(key, 'profile'): return content.config.get(key, 'profile') else: return 'None' except: return 'None'
b26491381fcd22003316ce9d1c2eb0577d89d715
692,879
def sam_format(): """ The canonical definition of the SAM format. http://samtools.github.io/hts-specs/SAMv1.pdf FLAG definitions: 0x1 template having multiple segments in sequencing 0x2 each segment properly aligned according to the aligner 0x4 segment unmapped 0x8 next segment in the template unmapped 0x10 SEQ being reverse complemented 0x20 SEQ of the next segment in the template being reversed 0x40 the first segment in the template 0x80 the last segment in the template 0x100 secondary alignment 0x200 not passing quality controls 0x400 PCR or optical duplicate 0x800 supplementary alignment CIGAR string: Op BAM Description M 0 alignment match (can be a sequence match or mismatch) I 1 insertion to the reference D 2 deletion from the reference N 3 skipped region from the reference S 4 soft clipping (clipped sequences present in SEQ) H 5 hard clipping (clipped sequences NOT present in SEQ) P 6 padding (silent deletion from padded reference) = 7 sequence match X 8 sequence mismatch 'name' is the name of the field. 'type' is a function that should be applied to convert it to the expected type when parsing. """ all_fields = [ { # Query template NAME 'name': 'QNAME', 'type': str }, { # bitwise FLAG 'name': 'FLAG', 'type': int }, { # Reference sequence NAME 'name': 'RNAME', 'type': str }, { # 1-based leftmost mapping POSition 'name': 'POS', 'type': int }, { # MAPping Quality 'name': 'MAPQ', 'type': int }, { # CIGAR string 'name': 'CIGAR', 'type': str }, { # Ref. name of the mate/next read 'name': 'RNEXT', 'type': str }, { # Position of the mate/next read 'name': 'PNEXT', 'type': int }, { # observed Template LENgth 'name': 'TLEN', 'type': int }, { # segment SEQuence 'name': 'SEQ', 'type': str }, { # ASCII of Phred-scaled base QUALity+33 'name': 'QUAL', 'type': str } ] return all_fields
1714e8921a2e518ca9142db8e16ad658c1ecee55
692,881
def _generate_context_filepath(outdir, kind, seq, window_size, step): """Generates the fullpath of the output file. The name contains information on how the context was created. Args: outdir (string): The output directory kind (string): Identifier for how the context was extracted from the traces. start (datetime): Start of the context data end (datetime): End of the context data window_size (int): Size of the sliding window step (int): Step of the sliding window Returns: String: The path of the output file """ seq_or_con = 'seq' if seq else 'con' return outdir + seq_or_con + '_context_' + kind + '_w' + str(window_size) + '_s' + str(step) + '.csv'
52297898923f7ad26158e4d8c140a1d1a05f1e8e
692,882
import subprocess import logging def get_material_you_colors(wallpaper_data, ncolor, flag): """ Get material you colors from wallpaper or hex color using material-color-utility Args: wallpaper_data (tuple): wallpaper (type and data) ncolor (int): Alternative color number flag passed to material-color-utility flag (str): image or color flag passed to material-color-utility Returns: str: string data from material-color-utility """ try: materialYouColors = subprocess.check_output("material-color-utility "+flag+" '"+wallpaper_data+"' -n "+str(ncolor), shell=True, universal_newlines=True).strip() return materialYouColors except Exception as e: logging.error(f'Error trying to get colors from {wallpaper_data}') return None
a9df56cca10f76754d86baca8137fd0d18aa7338
692,883
def _find_provider(hass, prov_type): """Return provider for type.""" for provider in hass.auth.auth_providers: if provider.type == prov_type: return provider return None
e8cfa630f961f330785064c9b8e0d51d2231ef35
692,885
def filter_dict(data, *keys): """ Returns a smaller dict with just `keys`. """ d = {} for k in keys: val = data.get(k) if val: d[k] = val return d
79d8ad9b79981b0d36230c2a787266f20ac0ef00
692,886
def normalize_dimensions(img, dimensions, return_ratio=False): """ Given a PIL image and a tuple of (width, height), where `width` OR `height` are either numeric or NoneType, returns (dst_width, dst_height) where both dst_width and dst_height are integers, the None values having been scaled based on the image's aspect ratio. """ max_width, max_height = dimensions dst_width, dst_height = max_width, max_height src_width, src_height = img.size if dst_width > 0 and dst_height > 0: pass elif dst_width <= 0: dst_width = float(src_width * max_height) / float(src_height) elif dst_height <= 0: dst_height = float(src_height * max_width) / float(src_width) else: raise ValueError("Width and height must be greater than zero") if return_ratio: dst_ratio = float(dst_width) / float(dst_height) return (int(dst_width), int(dst_height), dst_ratio) else: return (int(dst_width), int(dst_height))
e8b99fffe98d71e252ff3c389b3b97b7c3327e74
692,887
import os def bitbake_path(request, bitbake_path_string): """Fixture that enables the PATH we need for our testing tools.""" old_path = os.environ['PATH'] os.environ['PATH'] = bitbake_path_string def path_restore(): os.environ['PATH'] = old_path request.addfinalizer(path_restore) return os.environ['PATH']
427b95f35a1866fdc1218a183a67a61ecc186543
692,888
def unpack_list(a_list): """ ================================================================================================= unpack_list(a_list) This is a recursive function which takes a list of lists of... and returns a single list containing all elements of the input lists. ================================================================================================= Arguments: a_list -> A list of an arbitrary number of sublists ================================================================================================= Returns: A list containing all elements of all sublists. ================================================================================================= """ # Initialize the output list outlist = [] # loop over the elements in the input list for element in a_list: # If the element is a list or a tuple if type(element) in [list, tuple]: # then use unpack_list() to unpack the # element and add that value to theoutlist outlist += unpack_list(element) # Otherwise, else: # Add the element to the list outlist.append(element) # and return the output list once the loop finishes return outlist
fd75ff640fba451cdf27c4efc36f6a4a8441e544
692,889
def removeUnicodeIdentifiers(s): """ Removes the u infrount of a unicode string: u'string' -> 'string' Note that this also removes a u at the end of string 'stru' -> 'str' which is not intended. @ In, s, string, string to remove characters from @ Out, s, string, cleaned string """ s = s.replace("u'","'") return s
82020bd5debd53009fee4b27110a38f2ca24f9f4
692,890
def update_ports(cfmclient, port_uuids, field, value): """ Update attributes of composable fabric switch ports :param cfmclient: :param port_uuids: list of str representing Composable Fabric port UUIDs :param field: str specific field which is desired to be modified (case-sensitive) :param value: str specific field which sets the new desired value for the field :return: dict which contains count, result, and time of the update :rtype: dict """ if port_uuids: data = [{ 'uuids': port_uuids, 'patch': [ { 'path': '/{}'.format(field), 'value': value, 'op': 'replace' } ] }] return cfmclient.patch('v1/ports', data)
0088d5a569be878d53de662943c6c86db4cd8d76
692,891
def zipper_merge(*lists): """ Combines lists by alternating elements from them. Combining lists [1,2,3], ['a','b','c'] and [42,666,99] results in [1,'a',42,2,'b',666,3,'c',99] The lists should have equal length or they are assumed to have the length of the shortest list. This is known as alternating merge or zipper merge. """ return list(sum(zip(*lists), ()))
9e892a201684f5d215fd38c46c72bed0e457ba40
692,892
def _compareName(resp, respName, compareName): """ Search a list of objects (services, policies, priortities, etc.) and return the id for the search name :param resp: :param respName: :param compareName: :return: id found or None """ for item in resp[respName]: if item['name'].lower() == compareName: return item['id'] return None
51dc0a59ad77906ac64c0490ff15f838d07413d2
692,893
import copy def create_sample_tallying_counters(samples_list): """ Creates a tallyiing dictionary of samples for reporting the final results : Param samples_list: List of samples : Return samples_counter_dict: Dictionary of samples and empty tally scores """ samples_counters_dict = {} tallying_dictionary = {'Biallelic_Testable': 0, 'Sig_ASE': 0, 'Sig_ASE_Ref': 0, 'Sig_ASE_Alt': 0, 'Biallelic_No_ASE': 0, 'Passing_Homozygous': 0, 'Passing_Homozygous_Ref': 0, 'Passing_Homozygous_Alt': 0, 'Non_Testable': 0} for sample in samples_list: #Making a deep copy of the dictionary, so each one is independent tallying_dictionary = copy.deepcopy(tallying_dictionary) samples_counters_dict.update({sample: tallying_dictionary}) return(samples_counters_dict)
8791a893f20230370a6495ad29c6fc2d10e33e1e
692,894
from typing import Callable from typing import Any from functools import reduce def pipe(*operators: Callable[[Any], Any]) -> Callable[[Any], Any]: """Compose multiple operators left to right. Composes zero or more operators into a functional composition. The operators are composed to left to right. A composition of zero operators gives back the source. Examples: >>> pipe()(source) == source >>> pipe(f)(source) == f(source) >>> pipe(f, g)(source) == g(f(source)) >>> pipe(f, g, h)(source) == h(g(f(source))) ... Returns: The composed observable. """ def compose(source: Any) -> Any: return reduce(lambda obs, op: op(obs), operators, source) return compose
b663782ccce3002ce8f21e42a5c47b205649c157
692,895
from typing import List from datetime import datetime def get_closest_timestamp(timestamps: List[datetime], ref_timestamp: datetime) -> datetime: """ Get the timestamo closest to the reference timestamp """ closest_idx = 0 for i, ts in enumerate(timestamps): if abs((ts - ref_timestamp).days) < abs((timestamps[closest_idx] - ref_timestamp).days): closest_idx = i return timestamps[closest_idx]
514d1713321c2c2a0a22d45ff20c45eb83c24a6a
692,896
def _xml_escape_attr(attr, skip_single_quote=True): """Escape the given string for use in an HTML/XML tag attribute. By default this doesn't bother with escaping `'` to `&#39;`, presuming that the tag attribute is surrounded by double quotes. """ escaped = (attr .replace('&', '&amp;') .replace('"', '&quot;') .replace('<', '&lt;') .replace('>', '&gt;')) if not skip_single_quote: escaped = escaped.replace("'", "&#39;") return escaped
bc2e28a480ba41b13708665b55eb822e207bb236
692,897
def encode_id(instance_id): """ Convert an instance id to mask colour This matches the encoding done in the dataset renderer, see https://github.com/jskinn/Dataset_Synthesizer/blob/local-devel/Source/Plugins/NVSceneCapturer/Source/NVSceneCapturer/Private/NVSceneCapturerUtils.cpp#L673 :param instance_id: :return: """ return [ (instance_id << 1) & 254, (instance_id >> 6) & 254, (instance_id >> 13) & 254 ]
156862e36400c934a58ca1ad6dc3c24fdf9f7c65
692,898
def build_config_var(beta=False, external=False): """ Create the configuration key which will be used to locate the base tiddlywiki file. """ base = 'base_tiddlywiki' if external: base += '_external' if beta: base += '_beta' return base
bb3adb422ef26740702acd24c517b095921a4e83
692,899
from pathlib import Path import os def find_youngest_file(path, pattern, n=1): """ find the file that matches a pattern and has the highest modification timestamp if there are multiple files that match the pattern. input: path, string or pathlib.Path, where to look for the file(s) pattern, string, pattern to look for in files (see pathlib.Path.glob) n, integer, how many to return. defaults to 1 returns filename(s) of youngest file(s), including path None if no file """ assert n >= 1, "n must be greater equal 1." path = Path(path) files = [Path(f) for f in path.glob(pattern) if os.path.isfile(f)] sortfiles = sorted(files, key=lambda x: os.path.getmtime(x), reverse=True) if sortfiles: return sortfiles[:n] return None
51db431ddeed2574033b83ecb6df9c5c692f9b51
692,900
from typing import Callable def always(func: Callable) -> Callable: """ Marks a test method to run even when previous tests have failed. """ func._always = True # type: ignore return func
6ad1588574f283d0c541f3871cd1e30a824af55e
692,901
import os import sys def _get_unzipped_local_path(filename): """The callback script and filename are sent in a .zip to the tester machine. When unzipped they should be in the same directory. """ d = os.path.dirname(sys.argv[0]) return os.path.normpath(os.path.join(d, filename))
5584bbfb200d3bb0d47926d6009c7a640d0ff8da
692,902
def list_print(shoplist: list) -> str: """ Оформить данные о списке в виде, удобном для вывода в чат с пользователем. :param shoplist: данные списка покупок """ out = "" for i in range(len(shoplist)): out += shoplist[i][1]["name"] + ":\n" lst = shoplist[i][1]["shopping_list"].split(',') for j in range(len(lst)): out += f"{j + 1}. {lst[j]}\n" out += '\n' return out
949d982d64b14343edf1d61055505ac343e7f091
692,903
import yaml def yamllint(): """Parse .yamllint file.""" return yaml.safe_load(open(".yamllint").read())
c3be615e38c7e698f4bc27b7998392fb00884528
692,904
def get_pair_st_so(st_id,so_id): """ Get string st<st_id>-so<so_id> (e.g. st0-so0). Parameters ---------- st_id : int station id. so_id : int source id. Returns ------- pair_st_so : str output. """ return("st"+str(st_id)+"-"+"so"+str(so_id))
0f222818163706f40dc835ee2b66c7420834ea7a
692,905
def sort_root_accounts(roots): """Sort root types as Asset, Liability, Equity, Income, Expense""" def compare_roots(a, b): if a.report_type != b.report_type and a.report_type == "Balance Sheet": return -1 if a.root_type != b.root_type and a.root_type == "Asset": return -1 if a.root_type == "Liability" and b.root_type == "Equity": return -1 if a.root_type == "Income" and b.root_type == "Expense": return -1 return 1 roots.sort(compare_roots)
68c3db83ea1a7762ce1753c464542ec62c6f8c48
692,906
def binary_partition(n): """Get the powers of two that sum to an integer""" if(n == 0): return([0]) out = [] ctr = 0 while(n > 0): if n % 2 == 1: out.append(ctr) ctr += 1 n //= 2 return(out)
c69f597ae49e70d279e582f2581ecea26d5db6dc
692,907
def create_q7c_list_item(): """ Q7 缶ビール(ビール・発泡酒・第3のビール)で、重視する点を選んでください。 q7c1 泡がきめ細かいこと q7c2 飲みごたえがあること q7c3 のど越しが良いこと q7c4 キレがあること q7c5 飲みやすいこと q7c6 後味が良いこと q7c7 苦味があること q7c8 コクがあること q7c9 香りが良いこと q7c10 味が濃いこと q7c11 食事に合うこと q7c12 気軽に飲めること q7c13 本格的なこと q7c14 話題性があること q7c15 高級感があること q7c16 品質が良いこと q7c17 素材にこだわっていること q7c18 健康にこだわっていること q7c19 あてはまるものはない """ q7c_list = [] # 19項目全て作成 for i in range(1, 20): q7c_i = str("q7c") + str(i) q7c_list.append(q7c_i) result = q7c_list return result
ea3bf4532ea22a8c2d9b2f84625f32f61d469758
692,908
def read_file(file_name): """ read lines from a file and return array :param file_name: path to file :type file_name: str """ with open(file_name, 'r') as file: array = [] for line in file: array.append(line.strip()) return array
9b9c2fe87be5e2a63ded1cc9bc0be15499a1b90f
692,909
def params_1(kernels, time_1, time_system, time_format, sclk_id): """Input parameters from WGC API example 1.""" return { 'kernels': kernels, 'times': time_1, 'time_system': time_system, 'time_format': time_format, 'sclk_id': sclk_id, }
10a5c31b2fbd2575093dc51dd0cf49b5f67f7172
692,910
import yaml def load_yaml(file_path): """Load yaml file located at file path, throws error if theres an issue loading file. """ with open(file_path) as fin: content = yaml.load(fin, Loader=yaml.FullLoader) return content
33baca8cb28a935d6a8d0dd643cd7cf716c191ac
692,911
import numpy def string_table(tbl, delimeter='print', has_header=True): """ Provided the array of data, format it with equally spaced columns and add a header (first row) and contents delimeter. Args: tbl (`numpy.ndarray`_): Array of string representations of the data to print. delimeter (:obj:`str`, optional): If the first row in the table containts the column headers (see ``has_header``), this sets the delimeter between first table row and the column data. Use ``'print'`` for a simple line of hyphens, anything else results in an ``rst`` style table formatting. has_header (:obj:`bool`, optional): The first row in ``tbl`` contains the column headers. Returns: :obj:`str`: Single long string with the data table. """ nrows, ncols = tbl.shape col_width = [numpy.amax([len(dij) for dij in dj]) for dj in tbl.T] _nrows = nrows start = 1 if delimeter != 'print': _nrows += 2 start += 1 if has_header: _nrows += 1 start += 1 row_string = ['']*_nrows for i in range(start,nrows+start-1): row_string[i] = ' '.join([tbl[1+i-start,j].ljust(col_width[j]) for j in range(ncols)]) if delimeter == 'print': # Heading row row_string[0] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) # Delimiter if has_header: row_string[1] = '-'*len(row_string[0]) return '\n'.join(row_string)+'\n' # For an rst table row_string[0] = ' '.join([ '='*col_width[j] for j in range(ncols)]) row_string[1] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) if has_header: row_string[2] = row_string[0] row_string[-1] = row_string[0] return '\n'.join(row_string)+'\n'
eff89d8602b065315ec97eb50c4f5f56e97c0a62
692,912
def quorum_check(value_x, value_y, value_z, delta_max): """ Quorum Checking function Requires 3 input values and a max allowed delta between sensors as args. Checks all 3 values against each other and max delta to determine if sensor has failed or is way out of agreement with the other two. Returns a "Return Code" and a value. Return Codes: 0 - All sensors agree, 1 - sensor x out of spec, 2 - sensor y out of spec, 3 - sensor z out of spec, 4 - no sensors agree, you should error out/email/alarm/etc. 5 - sensors agree in pairs but spread across all 3 exceeds delta """ # Reset values agree_xy = False agree_xz = False agree_yz = False x_min = value_x - delta_max x_max = value_x + delta_max y_min = value_y - delta_max y_max = value_y + delta_max # Check for agreement between pairs if x_min <= value_y <= x_max: agree_xy = True if x_min <= value_z <= x_max: agree_xz = True if y_min <= value_z <= y_max: agree_yz = True # Evaluate if all sensors either disagree or agree if not (agree_xy) and not (agree_xz) and not (agree_yz): val = 0 return_val = [4, val] return return_val # Set this to return error code stating none of the sensors agree if agree_xy and agree_xz and agree_yz: val = (value_x + value_y + value_z) / 3 val = round(val, 1) return_val = [0, val] return ( return_val # Set this to return all good code and average of all 3 sensors ) # Catch edge case of agreement between two separate pairs but not the third. # For this case also return an average of all 3. if ( (agree_xy and agree_yz and not agree_xz) or (agree_yz and agree_xz and not agree_xy) or (agree_xy and agree_xz and not agree_yz) ): val = (value_x + value_y + value_z) / 3 val = round(val, 1) return_val = [5, val] return return_val # Set this to return all large spread code and average of all 3 sensors # If we flow through all the previous checks, identify which sensor is out of line with quorum. if agree_xy and not agree_yz and not agree_xz: val = (value_x + value_y) / 2 val = round(val, 1) return_val = [3, val] return return_val # Set this to return one bad sensor code for sensor z and average of 2 remaining sensors if not agree_xy and agree_yz and not agree_xz: val = (value_y + value_z) / 2 val = round(val, 1) return_val = [1, val] return return_val # Set this to return one bad sensor code for sensor x and average of 2 remaining sensors if not agree_xy and not agree_yz and agree_xz: val = (value_x + value_z) / 2 val = round(val, 1) return_val = [2, val] return return_val
4802a664fd3de588b14f736ca22a47297e7eb6dd
692,913
import hashlib def computeFileChecksum(algo, filePath): """Compute digest of ``filePath`` using ``algo``. Supported hashing algorithms are SHA256, SHA512, and MD5. It internally reads the file by chunk of 8192 bytes. :raises ValueError: if algo is unknown. :raises IOError: if filePath does not exist. """ if algo not in ['SHA256', 'SHA512', 'MD5']: raise ValueError("unsupported hashing algorithm %s" % algo) with open(filePath, 'rb') as content: hash = hashlib.new(algo) while True: chunk = content.read(8192) if not chunk: break hash.update(chunk) return hash.hexdigest()
5e93b79ec6f008133e2ce436c91be9452d912c63
692,914
def detectFeatures(taskParams): """ Function to detect missed features in buildconfig. It should return a list of detected features. """ if 'run' in taskParams and 'runcmd' not in taskParams['features']: return ['runcmd'] return []
84798d029413a1bc72c8cf15ba71a6667b104adf
692,915
def cleanup(val): """ Round up little number to 0 :param val: learned policy :return: """ if abs(val) < 1e-3: return 0 else: return val
1fe37e06bda7783a90e4c2883da887aa26800ea1
692,916
def currency_to_num(string, data_type=int): """ Converts a pound sterling currency value into a number. >>> currency_to_num("£250,000") >>> 250000 :param string: value of currency as a string :param data_type: intended data type of output :return: numerical value of currency """ value = string.strip().replace('£', '').replace(',', '').replace('pcm', '') try: return data_type(value) except ValueError: return value
884f964695100758220387f67ab296d5182cf503
692,917
def quotePath(path): """Appends quotes around string if quotes are not already present""" if path[0] != r'"': path = r'"'+path if path[-1] != r'"': path = path+r'"' return path
fdd0a2e31a9515dcf41983e52d54b065c09e76df
692,918
def r_upper(l: list) -> list: """Recursively calls str.upper on each string in l.""" result = [] if not l: return [] result.append(l[0].upper()) return result + r_upper(l[1:])
efbafb59201be5c504756920d82b541d1f32702a
692,919
import os import argparse def checkFileRead(filename): """Check if file exists and we have access to read it""" if os.path.isfile(filename) and os.access(filename, os.R_OK): return filename else: raise argparse.ArgumentTypeError("Invalid {} file (File does not exist, insufficient permissions or it's not a file).".format(filename))
23e56a42a23a19927e6686f1c7d564a3e9c6de5a
692,920
import csv from io import StringIO def csv_res2_dict_lst(res): """Convert CSV string with a header into list of dictionaries""" return list(csv.DictReader(StringIO(res), delimiter=","))
5271cd4ef1e82fdc77b0d69c58faedf2f971c07c
692,921
def check_zones(domain, zones): """ Check if the provided domain exists within the zone """ for zone in zones: if domain == zone or domain.endswith("." + zone): return zone return None
b00a8db6175f13c2227ce5b51fe0954a04252fea
692,922
from typing import Optional from pathlib import Path import os def conda_install_path() -> Optional[Path]: """ If a conda install of niftyreg is available, return the directory containing the niftyreg binaries. """ if "CONDA_PREFIX" not in os.environ: return None bin_path = Path(os.environ["CONDA_PREFIX"]) / "bin" if (bin_path / "reg_aladin").exists(): return bin_path return None
3857ecc79dabd03b47722bd7b75e5c5b37888b53
692,923
def nucleotide_table_data(): """Return a dictionary containing the expected values of the Nucleotide Table""" columns = [ "id", "Nucleotide_id", "NucleotideAccession", "NucleotideBioProjectAccession", "NucleotideOrganism", "NucleotideLength", "NucleotideComment", ] metadata = [ [ "1", "1769169004", "NZ_CP045153", "PRJNA224116", "Yersinia pestis", "9610", "", ], [ "2", "1769169003", "NZ_CP045152", "PRJNA224116", "Yersinia pestis", "33990", "", ], [ "3", "1769169002", "NZ_CP045151", "PRJNA224116", "Yersinia pestis", "68343", "", ], [ "4", "1769169001", "NZ_CP045150", "PRJNA224116", "Yersinia pestis", "100984", "", ], [ "5", "1769169000", "NZ_CP045149", "PRJNA224116", "Yersinia pestis", "4546217", "", ], ["6", "1769094081", "CP045153", "PRJNA269675", "Yersinia pestis", "9610", ""], ["7", "1769094034", "CP045152", "PRJNA269675", "Yersinia pestis", "33990", ""], ["8", "1769093958", "CP045151", "PRJNA269675", "Yersinia pestis", "68343", ""], [ "9", "1769093841", "CP045150", "PRJNA269675", "Yersinia pestis", "100984", "", ], [ "10", "1769089848", "CP045149", "PRJNA269675", "Yersinia pestis", "4546217", "", ], ] table_dict = {} for c in range(0, len(columns)): col = columns[c] table_dict[col] = [] for m in range(0, len(metadata)): print(col, c, metadata[m]) meta = metadata[m][c] table_dict[col].append(meta) return table_dict
73540a590f39216fdbccff25dc35076429320e6a
692,924
import pickle def readPickle(picklePath): """ :param picklePath: path for the file to be read :return: """ classDict = [] with open(picklePath, 'rb') as pickle_handle: classDict = pickle.load(pickle_handle) return classDict
2bb420ba0ccc62f77d53515ac06e63f0bfccc9bb
692,925
import torch def interpolate(x: torch.Tensor, ratio: int): """Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num) """ (batch_size, time_steps, classes_num) = x.shape upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num) return upsampled
507afdbbf1a3b35f00ea0721a4343d07df4c1ec5
692,926
def base_arguments(source="/tmp", target="/tmp", start=None, end=None, glob="*"): """Builds the base arguments dictionary Args: source (str): the source directory of the packets target (str): the target directory for the output start (str): the time for the earliest packets end (str): the time for the latest packets glob (str): file-glob for the source files Returns: dict: arguments for the GetPackets Constructor """ return dict(source=source, target=target, start=start, end=end, source_glob=glob)
f58add887447f2e262a7bb9a65d617b11e9a7265
692,927
def collate_acceptance_ratios(acceptance_list): """ Collate the running proportion of all runs that have been accepted from an MCMC chain. """ count, n_total, ratios = 0, 0, [] for n_accept in acceptance_list: n_total += 1 if n_accept: count += 1 ratios.append(count / n_total) return ratios
24138b866db311e6757389311ab308336b4eabec
692,928
def turn_cycle(cycle, front_node): """ turn the list of the cycle to ensure frontnode is the first :param cycle: the cycle to be turned :param front_node: the node that needs to be at the front(and the back) :return: the turned cycle takes at most cyclelength -1 runs, so is bounded by O(V) """ if front_node not in cycle: # ensure it will not run forever because it lacks the required node raise Exception("incorrect use of turn_cycle function, front_node not in given cycle") while cycle[ 0] != front_node: # as long as the node at the front is not the desired frontnode, make the second node the node at the front and check again cycle = cycle[1:] cycle += (cycle[0],) return cycle
58ff487f5cea72bbfbb9076df97c432af42eb482
692,929
def GenerateDataKey(operation, zone_source, zone_target, database_set, instance, table, primary_key): """operation is 'insert'/'update'/'delete'""" # Build the prefix so that the "location" of this key is easily derived from text, which can be # passed between different systems and stored. prefix = '%s.%s.%s.%s' % (zone_source, zone_target, database_set, instance) # Build up the final key key = 'data.%s.%s.%s.%s' % (operation, prefix, table, primary_key) return key
04835580f26cdfa84654306397e25e2b6d519626
692,930
def get_individual(individual, ensembl_json): """Return a list with the genotypes of the individual.""" genotypes = [] for individual_genotype in ensembl_json["genotypes"]: if individual in individual_genotype["sample"]: genotypes.append(individual_genotype) return genotypes
a3024c8f7ec15b37ceb7d83874a662354a14ee57
692,931
def _tef_P(P): """Define the boundary between Region 3e-3f, T=f(P) >>> "%.7f" % _tef_P(40) '713.9593992' """ return 3.727888004*(P-22.064)+647.096
6e4f83dd0e54df60c7ff26e1c2aee1bed0b43fca
692,933
def _is_sigkill(sig: str): """Return 'True' if sig' is the 'SIGKILL' signal.""" return sig == "15" or sig.endswith("KILL")
aea4b2b75f5e5bdaa60c984524a69164c1435bc2
692,934
def FoP_initialisation(S, source): """ 'L' initialisation for FoP :param S: :param source: :return: """ L, R = {}, {} L[source[1]] = source[0] # (a_u) R[source[1]] = source[0] return L, R
8cf73da05196d41bcce8fa5bd12bec4bfc2b5725
692,935
def find_period(l): """ Finds the period of list of numbers. Parameters ---------- l: integer[] The sequence of numbers. Return ------ steps: integer The period. Returns None, if no period is found. """ steps = 1 for i in range(1, len(l)): if l[i] == l[0]: if l[:i] == l[i:i+i]: return steps steps += 1 return None
7de707c550aeeaa37ad7ad7b1d1819b2809059fe
692,936
def partition_names_by_comp(names, compmap=None): """Take an iterator of names and return a dict with component names keyed to lists of variable names. Simple names (having no '.' in them) will have a key of None. For example, the list ['abc.def', 'abc.pdq', 'foo', 'bar'] would return the dict { 'abc': ['def','pdq'], None: ['foo', 'bar'] } If a compmap dict is passed in, it will be populated with data from the iterator of names. """ if compmap is None: compmap = {} for name in names: parts = name.split('.', 1) if len(parts) == 1: compmap.setdefault(None, []).append(name) else: compmap.setdefault(parts[0], []).append(parts[1]) return compmap
7dc9c90feef9fdaf3ac78e5a04a7568265d70b30
692,937