content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def keep_step(metrics, step): """ Only keeps the values of the metrics for the step `step`. Default to None for each metric that does not have the given `step`. Args: metrics (dict): keys are metric names, values are lists of (step, value) pairs e.g. { "loss": [(1, 0.1), (100, 0.01), ...], "acc": [(1, 0.9), (100, 0.91), ...] } step (int): the step we want to keep Returns: metrics (dict): keys are metric names, values have the flatten result for the given `step` e.g. { "loss": 0.01, "acc": 0.91 } """ result = {} for metric, values in metrics.items(): result[metric] = None for s, v in values: if s == step: result[metric] = v break return result
6d1ea0adfcb8e311c312690117106c410082be32
693,725
def quantile(l, p): """Return p quantile of list l. E.g. p=0.25 for q1. See: http://rweb.stat.umn.edu/R/library/base/html/quantile.html """ l_sort = l[:] l_sort.sort() n = len(l) r = 1 + ((n - 1) * p) i = int(r) f = r - i if i < n: result = (1-f)*l_sort[i-1] + f*l_sort[i] else: result = l_sort[i-1] return result
2fae67ed8caf55a4701d463687b0e0e4220a8e6d
693,726
def swap_bits(num, pos1, pos2): """Swaps bits in num at positions 1 and 2. Used by quantum_state.measure_multiple method. """ bit1 = (num >> pos1) & 1 bit2 = (num >> pos2) & 1 x = bit1 ^ bit2 x = (x << pos1) | (x << pos2) return num ^ x
6d880bd60e1e6491879eade1a89bb2b76b4a1028
693,727
from typing import List def get_metaworld_mt_benchmark_names() -> List[str]: """ Returns a list of Metaworld multi-task benchmark names. """ return ["MT1", "MT10", "MT50"]
594195927ce211c1a2da9f2be3853198f31c24cc
693,728
import requests def is_positive(text) : """Use a web service to determine whether the sentiment of text is positive""" r = requests.post("http://text-processing.com/api/sentiment/", data={'text': text}) return r.json()['probability']['pos'] > r.json()['probability']['neg']
e5a5d480a619c29969286a2bed17df14b9bc3f11
693,729
from io import StringIO import traceback def get_call_stack(): """ Returns a string showing the call stack when this function is called. .. exref:: :title: Display the call stack .. runpython:: :showcode: from pyquickhelper.pycode import get_call_stack print(get_call_stack()) """ s = StringIO() traceback.print_stack(file=s) return s.getvalue()
bf5147158b3aa9b60439f8fc48b5d05aff47ff02
693,730
def base_transport_factor(i_cn=0,i_en=1): """ The base transport factor measures the fraction of electrons that make it into the collector after injection from the emitter. Parameters ---------- i_cn : TYPE, optional DESCRIPTION. The default is 0. i_en : TYPE, optional DESCRIPTION. The default is 1. Returns ------- None. """ alpha_t = i_cn / i_en return alpha_t
f3a551a9abbda75e8ca7dceb43373699d34d8886
693,731
def attr_names(attr_map): """return a list of attribute names from the map""" if attr_map: return list(sorted(attr_map.keys())) return []
e6dff4a416cf6d26e29ad01484cd26e307c6efb0
693,732
def removeEmptySections(configtree): """ remove empty sections from configtree """ ret = dict() branchkeys = configtree.keys() for key in branchkeys: if type(configtree[key]) == dict: #print "+++ section[",key,"] is dict" if len(configtree[key]) > 0: #print " +++ and > 0:",configtree[key] temp = removeEmptySections(configtree[key]) if len(temp) > 0: ret.update({key: temp}) else: ret.update({key: 'filled'}) return ret
ac192c1bed8a99d972bc0f2b281ea48d096591e4
693,733
def resolve_location_type_enum(location_id): """Resolve the location item ID to its type name.""" if 30000000 <= location_id <= 39999999: return "solar_system" if 60000000 <= location_id < 64000000: return "station" if location_id >= 100000000: return "item" return "other"
399699ba60f9f2ce82010731e1fa60f981b5ef96
693,734
def count_collisions(Collisions): """ Counts the number of unique collisions and gets the collision index. Parameters ---------- Collisions : array_like Array of booleans, containing true if during a collision event, false otherwise. Returns ------- CollisionCount : int Number of unique collisions CollisionIndicies : list Indicies of collision occurance """ CollisionCount = 0 CollisionIndicies = [] lastval = True for i, val in enumerate(Collisions): if val and lastval == False: CollisionIndicies.append(i) CollisionCount += 1 lastval = val return CollisionCount, CollisionIndicies
15118945d9ccc7b8760e01d993e59b36943fc7a0
693,735
def quintic_ease_out(p): """Modeled after the quintic y = (x - 1)^5 + 1""" f = p - 1 return (f * f * f * f * f) + 1
ffd09fcc45f08861688257c254cc86a53bf13041
693,736
def context_string(context=None, with_comma=True): ############################################################################### """Return a context string if <context> is not None otherwise, return an empty string. if with_comma is True, prepend string with ', at ' or ', in '. >>> context_string() '' >>> context_string(with_comma=True) '' >>> context_string(context= ParseContext(linenum=32, filename="source.F90"), with_comma=False) 'source.F90:33' >>> context_string(context= ParseContext(linenum=32, filename="source.F90"), with_comma=True) ', at source.F90:33' >>> context_string(context= ParseContext(linenum=32, filename="source.F90")) ', at source.F90:33' >>> context_string(context= ParseContext(filename="source.F90"), with_comma=False) 'source.F90' >>> context_string(context= ParseContext(filename="source.F90"), with_comma=True) ', in source.F90' >>> context_string(context= ParseContext(filename="source.F90")) ', in source.F90' """ if context is None: cstr = "" elif with_comma: if context.line_num < 0: cstr = ", in {}".format(context) else: cstr = ", at {}".format(context) # End if else: cstr = "{}".format(context) # End if return cstr
6a8c4e3004c47ff5c29a31ddb9421e6369effd72
693,737
def process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (dictionary) raw structured data to process Returns: List of dictionaries. Structured data with the following schema: [ { "name": string, "flags": integer, "state": [ string ], "mtu": integer, "ipv4_addr": string, "ipv4_mask": string, "ipv4_bcast": string, "ipv6_addr": string, "ipv6_mask": integer, "ipv6_scope": string, "mac_addr": string, "type": string, "rx_packets": integer, "rx_bytes": integer, "rx_errors": integer, "rx_dropped": integer, "rx_overruns": integer, "rx_frame": integer, "tx_packets": integer, "tx_bytes": integer, "tx_errors": integer, "tx_dropped": integer, "tx_overruns": integer, "tx_carrier": integer, "tx_collisions": integer, "metric": integer } ] """ for entry in proc_data: int_list = ['flags', 'mtu', 'ipv6_mask', 'rx_packets', 'rx_bytes', 'rx_errors', 'rx_dropped', 'rx_overruns', 'rx_frame', 'tx_packets', 'tx_bytes', 'tx_errors', 'tx_dropped', 'tx_overruns', 'tx_carrier', 'tx_collisions', 'metric'] for key in int_list: if key in entry: try: key_int = int(entry[key]) entry[key] = key_int except (ValueError, TypeError): entry[key] = None # convert OSX-style subnet mask to dotted quad if 'ipv4_mask' in entry: try: if entry['ipv4_mask'].find('0x') == 0: new_mask = entry['ipv4_mask'] new_mask = new_mask.lstrip('0x') new_mask = '.'.join(str(int(i, 16)) for i in [new_mask[i:i + 2] for i in range(0, len(new_mask), 2)]) entry['ipv4_mask'] = new_mask except (ValueError, TypeError, AttributeError): pass # convert state value to an array if 'state' in entry: try: new_state = entry['state'].split(',') entry['state'] = new_state except (ValueError, TypeError, AttributeError): pass return proc_data
b4d990e75204d9137d7f9fc39b38f10716972939
693,738
def get_trial_metadata_by_trial_id(trial): """Get one trial metadata record by trial identifier.""" # this is not user-input due to @with_lookup, so safe to return return trial
b6b0eab93c8de9d7f3bd498dc47b6660739152fa
693,739
def classproperty(f): """ Create a property on a class, not an instance. Works only for getting. """ class Descriptor(object): def __get__(self, _obj, objtype): return f(objtype) return Descriptor()
1f24a6d3c7470d5042089c5f8ab71c2949fdfe77
693,740
import time def timestamp_to_datetime(timestamp, formatter="%Y-%m-%d %H:%M:%S"): """ timestamp格式化为datetime :参数 timestamp: 精确到秒的timestamp时间 :参数 formatter: 格式化后的时间格式,默认: %Y-%m-%d %H:%M:%S :返回: CST时间 """ ltime = time.localtime(timestamp) timeStr = time.strftime(formatter, ltime) return timeStr
56b199e9054a278b22c25cc86d6905c8a2112c18
693,742
from random import random def random_string (length, char_range = 127, char_offset = 128) : """Returns a string of `length` random characters in the interval (`char_offset`, `char_offset + char_range`). """ return "".join \ ( chr (int (random () * char_range + char_offset)) for c in range (length) )
6e898d59848ef8d912c19828805e1273cbc4df46
693,743
from typing import List def convert_bio_tags_to_conll_format(labels: List[str]): """ Converts BIO formatted SRL tags to the format required for evaluation with the official CONLL 2005 perl script. Spans are represented by bracketed labels, with the labels of words inside spans being the same as those outside spans. Beginning spans always have a opening bracket and a closing asterisk (e.g. "(ARG-1*" ) and closing spans always have a closing bracket (e.g. "*)" ). This applies even for length 1 spans, (e.g "(ARG-0*)"). A full example of the conversion performed: [B-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, O] [ "(ARG-1*", "*", "*", "*", "*)", "*"] Parameters ---------- labels : List[str], required. A list of BIO tags to convert to the CONLL span based format. Returns ------- A list of labels in the CONLL span based format. """ sentence_length = len(labels) conll_labels = [] for i, label in enumerate(labels): if label == "O": conll_labels.append("*") continue new_label = "*" # Are we at the beginning of a new span, at the first word in the sentence, # or is the label different from the previous one? If so, we are seeing a new label. if label[0] == "B" or i == 0 or label[1:] != labels[i - 1][1:]: new_label = "(" + label[2:] + new_label # Are we at the end of the sentence, is the next word a new span, or is the next # word not in a span? If so, we need to close the label span. if i == sentence_length - 1 or labels[i + 1][0] == "B" or label[1:] != labels[i + 1][1:]: new_label = new_label + ")" conll_labels.append(new_label) return conll_labels
efab766299a40f32e4886d32ec3e249891c2b084
693,744
def overwrite_yaml(yaml, dst): """ dst : class CFG """ yaml['wandb'] = dst.wandb yaml['data'] = dst.data yaml['augmentation'] = dst.augmentation yaml['configs'] = dst.configs yaml['model'] = dst.model yaml['optimizer'] = dst.optimizer yaml['scheduler'] = dst.scheduler return yaml
0446dad4296feecd1d23a033177cdd9c32f074e0
693,745
def removeSESSID(urlssid): """ Remove the phpsessid information... don't care about it now """ k = urlssid.find('PHPSESSID') if k > 0: return urlssid[0:k-1] k = urlssid.find('sid') if k > 0: return urlssid[0:k-1] return urlssid
63ef77828bdf1bcf5b249bb93de1bd6d80fc928d
693,746
import ssl def load_ssl_context(cert_file, key_file=None): """Creates an SSL context from a certificate and private key file. :param cert_file: Path of the certificate to use. :param key_file: Path of the private key to use. If not given, the key will be obtained from the certificate file. """ context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) context.load_cert_chain(cert_file, key_file) return context
2696f0a9ddc4d841066d3e78e737856733942b09
693,748
def bt_addr_to_str(bt_addr): """ Convert a Bluetooth address (6 bytes) into a human readable format. """ return ":".join([b.encode("hex") for b in bt_addr])
85b91da76d39a21c31b782e1365e3d9e63e1e836
693,749
import string def is_valid_folder_name(name): """ Return True if the string (that will be the folder name of each subtest) is a valid name for a test function: it should start with ``test_``, and contain only letters, digits or underscores. """ if not name.startswith('test_'): return False # Remove valid characters, see if anything remains, allow only ASCII strings here bad_characters = name.translate(None, string.ascii_letters + string.digits + '_') if bad_characters: return False return True
412faa838ae70d6ca9b46830d3a7e580c1b89f67
693,750
import unicodedata def unifyaccentedchars(to_translate): """convert to NFC form, e acute is now 1 char """ norm_txt = unicodedata.normalize('NFC', to_translate) return norm_txt
4836ab28edfeeeb48d17f2e2a8b60311cfe6ad21
693,751
def get_line_kind(line): """Return type kind of a line.""" return line.split()[2][1:-1]
e5be2657f373525726ca988f75215170f62b33fc
693,752
import json def load_jpeg_registry(jpeg_registry_path): """Load the jpeg registry JSON into a dictionary.""" with open(jpeg_registry_path) as f: return json.load(f)
a1437f4742d80302537e666df79a8e393b5c489f
693,753
def _reverse_wrapped_binary_op(op): """Swap inputs.""" def reverser(a, b): #print("Op: {}, Left: {}, Right {}".format(op, a, b)) def wrapped(x, y): #print("Reversed, now Op: {}, Left: {}, Right {}".format(op, x, y)) return op(x, y) return wrapped(b, a) return reverser
21eeeba2e36848b2251a9ca5c2b459c01833e457
693,754
def get(attribute_name, json_response, default=None): """ Get an attribute from a dictionary given its key name. :param attribute_name: Attribute name. :param json_response: Dictionary where the attribute should be. :param default: Value that has to be returned if the attribute is not there. :return: Attribute value. """ return default if not json_response or attribute_name not in json_response else json_response[attribute_name]
e8d794564332fec7557ba13e16f392693fc488c2
693,755
import struct def two_ints_to_long(intl, inth): """ Interpert two ints as one long """ longint = struct.pack(">I", inth) + struct.pack(">I", intl) return struct.unpack('>q', longint)[0]
c503e00f1ed934ad22590b942be85b7fd877bb47
693,756
def conflateRoutes(srcRoute, dstRoute): """ Conflates the given source route to destination route For two given routes, this method combines probes (conflates), if srcRoute route is a super set of the dstRoute :param srcRoute: Route to be conflated to destination route :param dstRoute: Traget route to conflate to """ indices = [] probeIndex = len(dstRoute) - 1 for i, point in enumerate(reversed(srcRoute.points)): if probeIndex < 0: break if dstRoute.points[probeIndex] == point: indices.append(len(srcRoute) - (i + 1)) probeIndex -= 1 if len(dstRoute) == len(indices): return indices[::-1] return None
d9fd5dde07f26a0f6ef37bcb07bfd3982b23685f
693,757
def user_avatar_upload(instance, filename): """Upload user avatar.""" return f'{instance.user_id}/{filename}'
be0a34b4b7bedc0e7252127a33797a8c9bede4ce
693,759
def build_path(tmp_path_factory): """Provides a temp directory with a single file in it.""" magic = tmp_path_factory.mktemp('build_magic') hello = magic / 'hello.txt' hello.write_text('hello') return magic
52a1f1a3735d80589e31e46214e9781ec03943d5
693,760
import copy def mangle_dictionary(a_dict, curr_dicts, key_to_delete=None): """Return copy of nested dictionary with a field popped or added. The popped or added field may be at any level of nesting within the original dictionary. `curr_dicts` is an OrderedDict containing each nested dictionary in the original dictionary we are looping through (not necessarily the same as a_dict). The first entry has the key "top" and holds the entire original dictionary. The second entry has the key of whatever the first nested dictionary is, and the value of that whole nested dictionary. If that has nested dictionaries within it, they will be represented in the subsequent key/values, etc.""" curr_dict = a_dict.copy() if key_to_delete is not None: curr_dict.pop(key_to_delete) else: curr_dict["disallowed_key"] = "bogus_value" curr_parent_key, _ = curr_dicts.popitem(True) q = len(curr_dicts.keys()) while q > 0: next_parent_key, next_parent_dict = curr_dicts.popitem(True) next_parent_dict[curr_parent_key] = copy.deepcopy(curr_dict) curr_dict = copy.deepcopy(next_parent_dict) curr_parent_key = next_parent_key q = q - 1 return curr_dict
c7c2f7fbd7828688a711a279a3014da4e41a1a3e
693,761
def iptup_to_str(formatted_tuple: tuple[str, int]) -> str: """ Converts a tuple IP address into a string equivalent This function is like the opposite of ``ipstr_to_tup`` :param formatted_tuple: A two-element tuple, containing the IP address and the port. Must be in the format (ip: str, port: int) :type formatted_tuple: tuple[str, int] :return: A string, with the format "ip:port" :rtype: str """ return f"{formatted_tuple[0]}:{formatted_tuple[1]}"
ff3cb457a1396935ee0c9d6834fa8f17c65c4970
693,762
def s3_read_write_policy_in_json(s3_bucket_name): """ Define an IAM policy statement for reading and writing to S3 bucket. :return: an IAM policy statement in json. """ return { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:Put*", "s3:Get*" ], "Resource": [ "arn:aws:s3:::{}/*".format(s3_bucket_name) ] } ] }
b872fd3c833e0384ed891ab3709826a9e9a823bf
693,764
def is_specific_url(url): """ This api has a tendency to give you the url for the general list of things if there are no entries. this separates "people/" from "people/123456" """ return url[-1] != '/'
0d4842f4e4264d38350738d92060e0d2de0575c1
693,765
def to_tuple(obj, encoder): """ Converts an arbitrary object C{obj} to a C{tuple}. """ return tuple(obj)
01899e2ccea550f171b1cd0de648f4ec0db48f34
693,766
import hashlib def get_hash_utf8(str_hash): """ 获取字符串的hash值 :param str_hash: :return: """ if not isinstance(str_hash, str): str_hash = str(str_hash) md = hashlib.md5() md.update(str_hash.encode('utf-8')) return md.hexdigest()
dde1070107b94c1aa40bac9a2c59df92bb64c6e2
693,767
def conj(x): """Return conjugate of x.""" return x.conjugate()
d710ca7536775d26176d6a759cb072532ff9428d
693,768
def diff(existing_tenants, module): """ 1. fill canonical tenant logical router structure with module input 2. compare input structure with matching structure, keeping differing fields 3. return the diff and let tenant apply only the diff """ matching = [tenant for tenant in existing_tenants if tenant['name'] == module.params['name']] if matching: return True return False
db7f3446bc03b4edc9dd072118514cfb3f172b08
693,769
def rdot(a, b): # pragma: no cover """Real dot product of two dense vectors. Here, ``b`` will *not* be conjugated before the inner product. """ a, b = a.reshape((1, -1)), b.reshape((-1, 1)) return (a @ b)[0, 0]
3dd67a9c57c1e50f86b9fdca678823e4d529d5b4
693,770
def fv_annuity(n,c,r): """ Objective: estimate future value of an annuity n : number of payments c : payment amount r : discount formula : c/r*((1+r)**n-1) e.g., >>>fv_annuity(2,100,0.1) 210.0000000000002 """ return c/r*((1+r)**n-1)
312d4d5a8760d86dfb310c7527e02ae611500369
693,771
def create_label_vocab(input_data): """create label vocab from input data""" label_vocab = {} for sentence in input_data: labels = sentence.strip().split(' ') for label in labels: if label not in label_vocab: label_vocab[label] = 1 else: label_vocab[label] += 1 return label_vocab
d65dc8ff830be880e057158432c5cff69cffa04b
693,772
from typing import Iterable from typing import Any def _iterable_choices_metavar(iterable: Iterable[Any]) -> str: """Generates a string metavar from iterable choices. Args: iterable (Iterable[Any]): Iterable object to generate metavar for. Returns: str: Generated metavar """ # Generate and Return return f"{{{', '.join(str(i) for i in iterable)}}}"
e556b62205904cb4c6ffb565f7dfe3c02f02a87d
693,773
def places_photo(client, photo_reference, max_width=None, max_height=None): """ Downloads a photo from the Places API. :param photo_reference: A string identifier that uniquely identifies a photo, as provided by either a Places search or Places detail request. :type photo_reference: string :param max_width: Specifies the maximum desired width, in pixels. :type max_width: int :param max_height: Specifies the maximum desired height, in pixels. :type max_height: int :rtype: iterator containing the raw image data, which typically can be used to save an image file locally. For example: ``` f = open(local_filename, 'wb') for chunk in client.places_photo(photo_reference, max_width=100): if chunk: f.write(chunk) f.close() ``` """ if not (max_width or max_height): raise ValueError("a max_width or max_height arg is required") params = {"photoreference": photo_reference} if max_width: params["maxwidth"] = max_width if max_height: params["maxheight"] = max_height # "extract_body" and "stream" args here are used to return an iterable # response containing the image file data, rather than converting from # json. response = client._request("/maps/api/place/photo", params, extract_body=lambda response: response, requests_kwargs={"stream": True}) return response.iter_content()
487c700defc4e42d4bf17a7b84ddd16e4d930e17
693,774
def beta_distribution_params( a=None, b=None, mu=None, sigma=None, omega=None, kappa=None ): """[converts parameters of beta into different formulations] Keyword Arguments: a {[type]} -- [a param] (default: {None}) b {[type]} -- [b param] (default: {None}) mu {[type]} -- [mean] (default: {None}) sigma {[type]} -- [standard var] (default: {None}) omega {[type]} -- [mode] (default: {None}) kappa {[type]} -- [concentration] (default: {None}) Raises: NotImplementedError: [description] """ if kappa is not None and omega is not None: a = omega * (kappa - 2) + 1 b = (1 - omega) * (kappa - 2) + 1 return a, b elif a is not None and b is not None: mu = a / (a + b) omega = (a - 1) / (a + b - 2) kappa = a + b return mu, omega, kappa else: raise NotImplementedError
f1aa8e8dcf5d8fbb8af55424d89ca42a0343a558
693,776
def count_prizes(palmares): """ This is a support function used to count the prizes of each kind. It is given the palmares as it is in the managers database on mongodb and it returns the overall number of trophies of each kind in a dictionary """ sc=ch=cop=sup=tot=ig=pv=cf=po=ca=0 for prize in palmares: if prize['Type'] == 'Coppa di Lega': cop+=1 elif prize['Type'] == 'Scudetto': sc+=1 elif prize['Type'] == 'Champions': ch+=1 elif prize['Type'] == 'Supercoppa': sup+=1 elif prize['Type'] == 'Porta Violata': pv +=1 elif prize['Type'] == 'Cartellino Facile': cf +=1 elif prize['Type'] == 'Panchina D\'Oro': po +=1 elif prize['Type'] == 'Caduti': ca +=1 ig=pv+cf+po+ca tot=cop+sc+ch+sup return {'tot': tot,'sc': sc, 'ch': ch, 'cop': cop, 'sup': sup, 'tot_ig': ig,'pv':pv, 'cf':cf, 'po':po, 'ca':ca}
dbf66bc0937c6eca6025cbcf5e71eb1ee759a351
693,778
from datetime import datetime def get_datetime(utc_time): """ Convert date string to datetime object """ utc = datetime.strptime(utc_time, '%Y-%m-%dT%H:%M:%S.%fZ') return utc
a3626e9eef59a4c8d26944c4776dd6e6c30b21d8
693,779
def unique(v): """Generates a list from a sequence where each value appears only once, and the order is preserved.""" try: sequence = list(v) except: return [] seen = set() seen_add = seen.add return [item for item in sequence if not (item in seen or seen_add(item))]
3c314afeade3ef813ba4869d5017f395184508cb
693,780
import hashlib import json def get_config_uuid(config: dict): """ Helper function for generating a unique, repeatable id from a dictionary of config parameters. Useful for identifying whether an experiment has been run before or not. Parameters ---------- config : dict Dictionary containing all relevant parameters to retrieve this experiment's data/results in the future Returns ------- str Description of returned object. Examples ------- Examples should be written in doctest format, and should illustrate how to use the function/class. >>> """ return hashlib.sha1(json.dumps(config, sort_keys=True).encode()).hexdigest()
2ae989054cceb28b98a047e9eb6d8601f4641ddc
693,781
def get_audio_route(log, ad): """Gets the audio route for the active call Args: log: logger object ad: android_device object Returns: Audio route string ["BLUETOOTH", "EARPIECE", "SPEAKER", "WIRED_HEADSET" "WIRED_OR_EARPIECE"] """ audio_state = ad.droid.telecomCallGetAudioState() return audio_state["AudioRoute"]
2c6353a4f46523d947c949da36782316f8925ff1
693,782
import numpy def _differentiate(data): """ Derivative nearly linear between dc and 30 Hz ---------- Parameters ---------- data : list Data samples of the signal where the first derivative estimate is done. Returns ------- out : list List with the differences between consecutive samples (the length of this list is equal to len(data) - 1). """ return numpy.diff(data)
ba465ce7dfd0e300bd170eec956f06a992dd7bd8
693,783
def Device_Set_To_Permitted(device): """ Set Device to Permitted :param device: :return: """ device.permitted = True device.save() return device.permitted
c7abd281e39416ed5ee430b85cba9bfb8cc2f3d6
693,785
import re def FormatWikiLinks(html): """Given an html file, convert [[WikiLinks]] into *WikiLinks* just to ease readability.""" wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]') return wikilink.sub(r'*\1*', html)
e33203875579d1dccb9e09370205da63ff9b3b2c
693,787
def notas(*n, sit=False): """ -> CALCULA NOTA E SITUYAÇÃO DE VÁRIOS ALUNOS para *n: recebe várias notas para sit: opcional, se quiser mostra o situação return: dicionario com várias informação """ dict_notas = {} dict_notas['quant_notas'] = len(n) dict_notas['maior'] = max(n) dict_notas['menor'] = min(n) dict_notas['media'] = sum(n) / len(n) if sit: if dict_notas["media"] < 5: dict_notas['sit'] = 'RUIM' elif dict_notas['media'] < 7: dict_notas['sit'] = 'RAZÓAVEL' else: dict_notas['sit'] = 'BOA' return dict_notas
d8a8928d71b0daf73d0d3a080f3b0a5cd47c082b
693,789
def _map_configurations(map_conf_args, grid_conf_args, invar, ref_mod): """ Create and/or modify map configuration dictionary for map plots """ if not map_conf_args.keys(): map_conf = { 'proj': 'stere', 'zoom': 'crnrs', 'crnr_vals': grid_conf_args['meta data'][invar][ref_mod]['crnrs'], 'lon_0': grid_conf_args['meta data'][invar][ref_mod]['lon_0'] } else: map_conf = map_conf_args.copy() if 'zoom' in map_conf_args: if map_conf_args['zoom'] == 'crnrs' and\ 'crnr_vals' not in map_conf_args: map_conf.update( {'crnr_vals': grid_conf_args['meta data'][invar][ref_mod]['crnrs']}) elif map_conf_args['zoom'] == 'geom': errmsg = ("\t\tFor map plot with 'zoom': 'geom', 'zoom_geom' " "(zoom geometry -- width/height)must be set!") assert 'zoom_geom' in map_conf_args, errmsg else: map_conf.update( {'zoom': 'crnrs', 'crnr_vals': grid_conf_args['meta data'][invar][ref_mod]['crnrs']}) if 'proj' in map_conf_args and map_conf_args['proj'] == 'lcc': if 'lat_0' not in map_conf_args: map_conf.update( {'lat_0': grid_conf_args['meta data'][invar][ref_mod]['lat_0']}) else: map_conf.update({'proj': 'stere'}) if 'lon_0' not in map_conf_args: map_conf.update({ 'lon_0': grid_conf_args['meta data'][invar][ref_mod]['lon_0'] }) return map_conf
329d3ca01cbfa7e890c104e70b2d330b53c9f8d5
693,791
def get_table_name(sh, line_num, table_name_columns): """テーブル指定の列から抜き出して、1個でも配列にして返す """ table_name = [] for c in table_name_columns: tmp = sh[f'{c}{line_num}'].value if tmp is not None: table_name.append(tmp) if len(table_name)==0: return None return table_name
4a417866b228485ce8234552003985f979ee8eab
693,792
def extract_fam_id( tag ): """ Sumilar to extract_indi_id. """ return tag.replace( '@', '' ).lower().replace( ' ', '' )
d0aa111aa11ffdb7fb2162cd63b62afed0b2dde8
693,793
import yaml from typing import Any def pkg_representer(dumper: yaml.Dumper, data: Any) -> yaml.MappingNode: """Represent Packages as a simple dict""" return dumper.represent_mapping("lsst.utils.packages.Packages", data, flow_style=None)
1bec38ef54b87df796e9baad7dbec2c8abb18393
693,794
def is_valid_process(timing, realization_bounds, realization_id): """helper function to check if the current graph node or edge is valid to be added in the current time realization""" if realization_id != 0: if realization_bounds[realization_id] >= timing > realization_bounds[realization_id - 1]: return True else: return False else: if timing <= realization_bounds[realization_id]: return True else: return False
30a07c8dbbdbcbeb42c1af46151141fac0bd00e2
693,795
def increment_char(c): """ Increment an uppercase character, returning 'A' if 'Z' is given """ return chr(ord(c) + 1) if c != 'Z' else 'A'
89c30f136acc05b289e61e0125ffe4ae3fcc213e
693,796
def RGB(red, green, blue): """ Given integer values of Red, Green, Blue, return a color string "#RRGGBB" :param red: (int) Red portion from 0 to 255 :param green: (int) Green portion from 0 to 255 :param blue: (int) Blue portion from 0 to 255 :return: (str) A single RGB String in the format "#RRGGBB" where each pair is a hex number. """ return '#%02x%02x%02x' % (red, green, blue)
cf2776f29e6b189de3ef8d18f025a40b47e505d0
693,797
import pandas as pd from pathlib import Path def my_piclke_load(file_name): """ General extraction of variables from a pickle file. Example: file_name = "test" x = 4 var = x my_piclke_dump(var, file_name) zz = my_piclke_load(file_name) :param file_name: name of the pickle file :return: the variable inside the file """ file_path = Path().joinpath('Pickles', file_name + ".pkl") var = pd.read_pickle(file_path) print("The file ", file_name, ".pkl was loaded.") return var
c6483ea4c94d81d6b9f8654711d4eab47a449df0
693,798
import json def get_data(file_name: str) -> dict: """ Simply getting data from specified json file name and returning them in the form of a python dictionary """ with open(file_name, "r") as json_file: content = json.load(json_file) return content
d501d5162c58d2c24a1250e1623947d5f5c6d4bc
693,799
def splitter(patient_id, date, cycle, report_dict): """ splits the report into the number of keys in report_dict Parameters ---------- patient_id date cycle report_dict Returns ------- """ return [{'patient_id': patient_id, 'date': date, 'cycle': cycle, 'section': key, 'text': report_dict[key]} for key in report_dict]
95e7aa604da39403933290936716c0bc8d74e0d7
693,800
import subprocess def compress_and_hash_directory(directory, working_directory): """Utility to compress a directory and generate a SHA1 hash of it Parameters ---------- directory: str Directory to hash working_directory: str The working directory (where the compressed directory is saved) Returns ------- sha1: str sha1sum of compressed directory """ # Time-agnostic archiving and sha generation print('Compressing and generating hash ...') ps = subprocess.Popen( 'gzip -cnr --fast {} | sha1sum'.format(directory), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=working_directory ) cmd_output, _ = ps.communicate() sha1, _ = cmd_output.split() return sha1.decode()
46ad98f9e6d730daca0a43bb142a2632dd157dcd
693,802
from typing import Optional def _get_bool(val) -> Optional[bool]: """ Converts val to bool if can be done with certainty. If we cannot infer intention we return None. """ if isinstance(val, bool): return val elif isinstance(val, str): if val.strip().lower() == 'true': return True elif val.strip().lower() == 'false': return False return None
da2562baeedd83454912745f1b436e1ae18562a4
693,803
def serialize_items(items): """ Returns a list of dicts with all the entries """ final_list = [] for item in items: final_list.append(item.__dict__) return final_list
955d7d28dabf82c97675dea69ea4bbd6fd28ddd6
693,804
from typing import Dict from typing import Any import typing import os from typing import ChainMap def get_config(configs: Dict[str, Dict[Any, Any]]) -> typing.ChainMap[Any, Any]: """Convenience function for merging different configs. Merges together based on os.environ, env variable, and default config. os.environ takes preference, the environment variable config, then defaults Args: configs: A that will be used to merge based on the ENVIRONMENT Returns: ChainMap: ChainMap of os.environ, environment config, default config """ env_var = os.environ.get("ENVIRONMENT") environment = env_var.upper() if env_var is not None else "" environment_config: Dict[Any, Any] = ( configs[environment] if configs.get(environment) is not None else {} ) return ChainMap(os.environ, environment_config, configs["DEFAULT"])
88f989bdef6563d871dc09e2d320f4a263fd9465
693,805
import sys import yaml import os def write_config(cat, key, value): """ Write a new value in the configuration :return: bool, operation status """ try: dir = "/".join(sys.path[0].split("/")[:-2]) config = yaml.load(open(os.path.join(dir, "config.yaml"), "r"), Loader=yaml.SafeLoader) config[cat][key] = value with open(os.path.join(dir, "config.yaml"), "w") as yaml_file: yaml_file.write(yaml.dump(config, default_flow_style=False)) return True except: return False
732a500a797b1546cbfffa5c41eda837dcf548f5
693,806
def qw_not_found(arg1, arg2, path=None, headers=None, payload=None): """This function is used when a not found resource should be returned. According arg2 argument, if "not_found" is detected as a string then payload is returned directly or as part of "data" dictionnary. """ payload = {'errors': [{ 'code': 'not_found', 'message': 'Resource not found' }] } if arg2 == 'not_found': return payload return {'data': payload}
243648edcada2d35baafeeedee80e7319bd8c115
693,807
def determine_spec_version(obj): """Given a STIX 2.x object, determine its spec version.""" missing = ("created", "modified") if all(x not in obj for x in missing): # Special case: only SCOs are 2.1 objects and they don't have a spec_version # For now the only way to identify one is checking the created and modified # are both missing. return "2.1" return obj.get("spec_version", "2.0")
7c3df55c7e80a6823571aa1af8d77cfd9c67710f
693,808
import re def exemplar_unicodes_convert(text): """Convert \u1234 strings inside the exemplar text to real unicode characters Args: text (str): Raw exemplar string. Eg: "a b c \\u0041" Returns: [type]: Converted exemplar string: Eg: "a b c A" """ uni_chars = re.findall(r"\\u([0-9A-F]+)", text) for uni_char in uni_chars: text = text.replace("\\u" + uni_char, chr(int(uni_char, 16))) return text
47d0f1435839a8b47c199fa6f5a687694427860e
693,809
def move_point_cat(point, ipoint, to_clust, from_clust, cl_attr_freq, membship): """Move point between clusters, categorical attributes.""" membship[to_clust, ipoint] = 1 membship[from_clust, ipoint] = 0 # Update frequencies of attributes in cluster. for iattr, curattr in enumerate(point): cl_attr_freq[to_clust][iattr][curattr] += 1 cl_attr_freq[from_clust][iattr][curattr] -= 1 return cl_attr_freq, membship
6b8c9d95143b0234a6b18495c0715c3e1294fc4d
693,810
import math def luv_to_hcl(ell: float, u: float, v: float) -> tuple[float, float, float]: """Convert the color from CIELUV coordinates to HCL coordinates.""" h = math.atan2(v, u) c = math.hypot(v, u) h = h + math.tau if h < 0 else h return h, c, ell
569b51e1cf0155e06c34d6f1655e76cd2966243f
693,811
import re def edit_decl(decl): """Edits declarations for easier parsing. Declarations e.g.: int fname OF((int x, char c)); int BZ_API(fname)(int a); """ decl = re.sub(r'(.*?)\b__NTH\((\w*\(.*?\))\);', r'\1\2;', decl) decl = re.sub(r'(.+?\s\w+)\b\w+\((\(.*?\))\);', r'\1\2;', decl) decl = re.sub(r'(.*?)\b\w+\((\w+)\)\s*\((.*)\);', r'\1 \2(\3);', decl) return decl
5dd0f6d2070972a63c0a12d4a56d806937ce38e4
693,812
def function_3(x,y): """ Returns: x divided by y Precondition: x is a number Precondition: y is a number, y > 0 """ return x/y
e814f26ff5367b71abceac27fa61bbc66afe10c0
693,813
import sys import csv import logging from typing import Counter import os def readtextcsv(filename,DATADIR): """Read in the text csv from filename. filename: the file from which to read DATADIR: the dir to which to write out corpus: a list of documents articles: a dict mapping source domain to its text i2s: a dict mapping article index to source domain for the articles in the corpus sdom_counts: a dict mapping source domain to its number of articles in the corpus """ # read in/format data files maxInt = sys.maxsize decrement = True articles = {} i2s = {} corpus = [] sdoms = [] with open(filename, 'r',encoding='utf-8') as csvfile: reader = csv.reader(csvfile) next(reader) for i,row in enumerate(reader): while decrement: # decrease the maxInt value by factor 10 # as long as the OverflowError occurs. decrement = False try: csv.field_size_limit(maxInt) except OverflowError: maxInt = int(maxInt/10) decrement = True logging.info('decrementing csv field size limit.') # each row = article ID, sdom, article text if row[1] not in articles: articles[row[1]] = [] articles[row[1]].append(row[2]) corpus.append(row[2]) sdoms.append(row[1]) # Create the mapping from article ID to sdom name i2s[i] = row[1] sdom_counts = Counter(sdoms) if not (os.path.exists(os.path.join(DATADIR, 'sdom_by_article.csv'))): with open(os.path.join(DATADIR, 'sdom_by_article.csv'), "w",encoding='utf-8') as f: writer = csv.writer(f) # Write header writer.writerow(["sdom","number of articles"]) for key in sdom_counts.keys(): # Write rows writer.writerow([key,sdom_counts[key]]) return corpus, articles, i2s, sdom_counts
791888b8b3a06d5c522a5b877ee1e92795553e59
693,814
def escape_text(text: str) -> str: """The Inception doc is wrong about what it is really escaped """ text = text.replace('\\', '\\\\') text = text.replace('\r', '\\r') text = text.replace('\t', '\\t') return text
25ded0043dc39decad973b08a7ee60665b828037
693,816
import re def split_s3_path(path): """ Returns --- tuple[string, string] Returns a pair of (bucket, key) """ return re.sub(r'^s3a?://', '', path).split(sep='/', maxsplit=1)
feefed76642090e01d157475d5a5dba185879d24
693,817
def get_client_current_path(request): """ :param request: :return: 不带参数的地址 """ return request.path
7a36531c54f77632163345a370b1fa8da6925363
693,818
def WebError(message=None, data=None): """ Unsuccessful web request wrapper. """ return { "status": 0, "message": message, "data": data }
022d150fba82bbc1bd7ed425eb34c657d4c7c673
693,819
def cake(number): """ Returns True if number is cake """ # n-th lazy caterer number is (n**3 + 5*n + 6) / 6 n = 1 while True: p = (n**3 + 5*n + 6) / 6 if p == number: return True elif p > number: return False n = n + 1
404851ea8340fbfa0346cc580c35f5a7157a8582
693,820
def get_uncertain_endings(): """returns a list of all file endings that don't fully define their format. For formats that for example can contain different kinds of data like .fasta with genetic and aminoacid sequences. """ return ["fasta"]
2e9073f724b12fc1af27806b655936efd50b2811
693,821
def get_block_by_path(block, path): """ Returns a block given its hierarchical path. The path must be a list of PathNode objects. """ if len(path) == 0: return block # Find instance instance = "{}[{}]".format(path[0].name, path[0].index) if instance in block.blocks: block = block.blocks[instance] # Check operating mode if path[0].mode is not None: if block.mode != path[0].mode: return None # Recurse return get_block_by_path(block, path[1:]) return None
4709e6009facf8d93f0edb2a1c8f6b4fb6ddf04a
693,822
def generate_rearranged_graph(graph, fbonds, bbonds): """Generate a rearranged graph by breaking bonds (edge) and forming others (edge) Arguments: graph (nx.Graph): reactant graph fbonds (list(tuple)): list of bonds to be made bbonds (list(tuple)): list of bonds to be broken Returns: nx.Graph: rearranged graph """ rearranged_graph = graph.copy() for fbond in fbonds: rearranged_graph.add_edge(*fbond) for bbond in bbonds: rearranged_graph.remove_edge(*bbond) return rearranged_graph
33f842cee6759f7a012c9806c67fdcee6797bd6c
693,823
def create_html_url_href(url: str) -> str: """ HTML version of a URL :param url: the URL :return: URL for use in an HTML document """ return f'<a href="{url}">{url}</a>' if url else ""
5c41f5035a2b549e9c969eccbcc960a100600e7e
693,824
def tet_vector(i, num_tet): """ Gives the tet equation for the i^th tetrahedron. """ out = [] for j in range(num_tet): if i == j: out.extend([1]*3) else: out.extend([0]*3) return out
5470ee3873e76514c6c747a2066744669df837c0
693,825
from typing import Counter from typing import Tuple def _counter_key_vals(counts: Counter, null_sort_key="ø") -> Tuple[Tuple, Tuple]: """ Split counter into a keys sequence and a values sequence. (Both sorted by key) >>> tuple(_counter_key_vals(Counter(['a', 'a', 'b']))) (('a', 'b'), (2, 1)) >>> tuple(_counter_key_vals(Counter(['a']))) (('a',), (1,)) >>> tuple(_counter_key_vals(Counter(['a', None]))) (('a', None), (1, 1)) >>> # Important! zip(*) doesn't do this. >>> tuple(_counter_key_vals(Counter())) ((), ()) """ items = sorted( counts.items(), # Swap nulls if needed. key=lambda t: (null_sort_key, t[1]) if t[0] is None else t, ) return tuple(k for k, v in items), tuple(v for k, v in items)
0965c7c7e5717c4daa48d3b9a188ad70283b41c7
693,827
def disable_event(library, session, event_type, mechanism): """Disables notification of the specified event type(s) via the specified mechanism(s). Corresponds to viDisableEvent function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be disabled. (Constants.QUEUE, .Handler, .SUSPEND_HNDLR, .ALL_MECH) :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ return library.viDisableEvent(session, event_type, mechanism)
9bbaf57350e46dd3a6035b9f73c2c286821cb346
693,828
def autocast_value(value): """Cast string to string, float, bool or None. """ if value is None: return value_lcase = value.lower() if value_lcase == "null": return if value_lcase == "false": return False if value_lcase == "true": return True try: return float(value) except ValueError: return value
0ed4ebcd64f2f9b2fd200d296d4e216a8eca73ac
693,829
def memory(k, n, registers=[0,1,2,3,4,5,6,7]): """ This is a secret ingredient with 8 bytes of memory. """ index = n % 8 r = registers[index] registers[index] = k if (n % 5 == 0): return(r) else: return(k)
e5b05b94005a54561b92ac10373e328e72627cf5
693,830
def gauss(mu, sigma): """Gaussian distribution. *mu* is the mean, and *sigma* is the standard deviation. This is slightly faster than the :func:`normalvariate` function defined below.""" return 0.0
db0bbaa44f634b1f50be6e983661949b42f1d14b
693,831
def fixture_sample_tag_name() -> str: """Return a tag named 'sample'""" return "sample"
1a5efe0fd71234333ceb778e05bec31315947b52
693,833
def workflow(name): """Using this decorator you can override the workflow name (class name) to use. This can be very useful when you're writing a newer version of the same workflow, but you want to write it in a different class. .. code-block:: python class ExampleWorkflow(WorkflowDefinition): @execute(version='1.0', execution_start_to_close_timeout=1*MINUTES) def example_start(self, a, b): pass @workflow(name='ExampleWorkflow') class ExampleWorkflowV2(ExampleWorkflow): @execute(version='2.0', execution_start_to_close_timeout=1*MINUTES) def example_start_v2(self, a, b, c): pass In the example above, you have two classes that handle *ExampleWorkflow*, but with version 1.0 and version 2.0, which you can start by calling the corresponding example_start() and example_start_v2() class methods. :param str name: Specifies the name of the workflow type. If not set, the default is to use the workflow definition class name. """ def _workflow(cls): for workflow_type in cls._workflow_types: workflow_type._reset_name(name, force=True) return cls return _workflow
44b582964bbf0bb025039cc81b01f8f3d167ad35
693,834
def version_is_available(request): """Return a boolean, whether we have the version they asked for. """ path = request.line.uri.path version = request.website.version return path['version'] == version if 'version' in path else True
4c6e67153733b7d547423babc878ea4c71ab50f8
693,835
def is_tag_list(tag_name, conf): """ Return true if a XMP tag accepts list or not """ return tag_name in conf["list"]
0dfcf08abcea9979129515b1a0dec22fae1ec46d
693,836
def x_intersection(line_slope, intercept, y_value): """ Calculates the x value of which the line according to the given y value This basically solves y = mx + b :param line_slope: slope of the line (m) :param intercept: the intercept of the line (b) :param y_value: the value to be used (substituted with x) :return: the y value """ return (y_value - intercept) / float(line_slope), y_value if line_slope != 0 else (0, y_value)
b24c0635e3b667cd694a99956df70350dcf57710
693,837
import hashlib def filehash(filepath): """ Compute sha256 from a given file. Parameters ---------- filepath : str File path. Returns ------- sha256 : str Sha256 of a given file. """ BUF_SIZE = 65536 sha256 = hashlib.sha256() with open(filepath, "rb") as f: while True: data = f.read(BUF_SIZE) if not data: break sha256.update(data) return sha256.hexdigest()
7e0af85ef132b19a18c4ba7956f58e46256d6445
693,838