content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def is_integer(val): """Returns `True` if argument is an integer or whole number. Parameters ---------- val : int, float Value to be checked. Returns ------- bool ``True`` if ``val`` is `int` or whole number (if `float`). """ try: return val.is_integer() except AttributeError: if isinstance(val, int): return True # last ditch effort try: return int(val) == float(val) except (ValueError, TypeError): return False
a7f388d9767b8ca2fef64a4ec7851d0ef5ffb06e
690,704
def raw_pixels(img): """Return raw pixels as feature from the image""" return img.flatten()
fe16e6371b92ab3bd553677fa4f17ca914355d14
690,705
def remove_subtitle(title): """Strip a book's subtitle (if it exists). For example, 'A Book: Why Not?' becomes 'A Book'.""" if ':' in title: return title[:title.index(':')].strip() else: return title
cb223aa57a1eae2ab326a86bd7145bc345330800
690,706
def strip_quotes(t): """ Run replace_diacritics first -- this routine only attempts to remove normal quotes ~ ', " """ return t.strip('"').strip("'")
f2056172dba59a8e996c62d2bf1a46020430d855
690,707
import torch def get_samples_from_datasets(datasets, wav): """Gets samples (noise or speech) from the datasets. Arguments --------- datasets : list List containing datasets. More precisely, we expect here the pointers to the object used in speechbrain for data augmentation (e.g, speechbrain.lobes.augment.EnvCorrupt). wav : torch.tensor The original waveform. The drawn samples will have the same dimensionality of the original waveform. Returns ------- samples: torch.tensor A batch of new samples drawn from the input list of datasets. """ # We want a sample of the same size of the original signal samples = torch.zeros( wav.shape[0], wav.shape[1], len(datasets), device=wav.device ) # Let's sample a sequence from each dataset for i, dataset in enumerate(datasets): # Initialize the signal with noise wav_sample = (torch.rand_like(wav) * 2) - 1 len_sample = torch.ones(wav.shape[0], device=wav.device) # Sample a sequence wav_sample = dataset(wav_sample, len_sample) # Append it samples[:, :, i] = wav_sample # Random permutations of the signal idx = torch.randperm(samples.shape[-1], device=wav.device) samples[:, :] = samples[:, :, idx] return samples
490d780bafd514bb2d1b7b03c650e977fd3586eb
690,708
def mirror_distance_image(focal_point,distance_object): """Usage: Find distance of image with focal point and distance of object""" numerator = focal_point * distance_object denominator = distance_object - focal_point return numerator / denominator
c76fb058082173332012ef05e87e1e0b5fe98ce7
690,709
import math import torch def _get_log_freq(sample_rate, max_sweep_rate, offset): """Get freqs evenly spaced out in log-scale, between [0, max_sweep_rate // 2] offset is used to avoid negative infinity `log(offset + x)`. """ half = sample_rate // 2 start, stop = math.log(offset), math.log(offset + max_sweep_rate // 2) return torch.exp(torch.linspace(start, stop, sample_rate, dtype=torch.double)) - offset
739593efa9ef15809bacddf495502d92625c002f
690,711
def solution(string): """This will reverse a string""" return string[::-1]
491eef9fd8f909199951d8ce661f20cbd199b89d
690,712
def submit_url(): """Returns the submission url.""" return "https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1" #return "https://class.coursera.org/" + URL + "/assignment/submit"
9783cae4d80551354915fa87cc0cf4126e4c93e9
690,713
import torch def _to_bcdhw(tensor: torch.Tensor) -> torch.Tensor: """Converts a PyTorch tensor image to BCDHW format. Args: tensor (torch.Tensor): image of the form :math:`(D, H, W)`, :math:`(C, D, H, W)`, :math:`(D, H, W, C)` or :math:`(B, C, D, H, W)`. Returns: torch.Tensor: input tensor of the form :math:`(B, C, D, H, W)`. """ if not isinstance(tensor, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}") if len(tensor.shape) > 5 or len(tensor.shape) < 3: raise ValueError(f"Input size must be a three, four or five dimensional tensor. Got {tensor.shape}") if len(tensor.shape) == 3: tensor = tensor.unsqueeze(0) if len(tensor.shape) == 4: tensor = tensor.unsqueeze(0) return tensor
872302f2b6b7fc03348a42e730f6a09c2abd8784
690,714
def identifier_path(items): """Convert identifier in form of list/tuple to string representation of filesystem path. We assume that no symbols forbidden by filesystem are used in identifiers. """ return '/'.join(items)
9835ffe136008b53c37f074311d045c4aaec64ce
690,715
from pathlib import Path from typing import Iterable def find_pictures(folder: Path) -> Iterable[Path]: """ find pictures in folder """ return folder.glob("*.jpg")
754591c5b72aeb7c74f2c0024273efc39c1a56a9
690,716
def is_unknown(value: str) -> bool: """Returns True if val represents and unknown value""" if not isinstance(value, str): raise TypeError if not value or value.upper() in ("UNKN", "UNK", "UKN"): return True for char in value: if char not in ("/", "X", "."): break else: return True return False
59fdb74fb78ef4da6b86b935eda4f66aa114da63
690,719
import json def deserialize_records(records): """ This properly deserializes records depending on where they came from: - SQS - SNS """ native_records = [] for record in records: parsed = json.loads(record['body']) # Is this a DynamoDB stream event? if isinstance(parsed, str): native_records.append(json.loads(parsed)) # Is this a subscription message from SNS? If so, skip it: elif parsed.get('Type') == 'SubscriptionConfirmation': continue # Is this from SNS (cross-region request -- SNS messages wrapped in SQS message) -- or an SNS proxied message? elif parsed.get('Message'): native_records.append(json.loads(parsed['Message'])) else: native_records.append(parsed) return native_records
3bb7c3aee0b4c9efb1d01c8b5f75cd02d8997a6d
690,720
import pandas def single_spheroid_process(spheroid_frame: pandas.DataFrame, descriptors: list = []): """ spheroid_frame has the label of each cell as index. """ # If the original dataframe is only 2D, then transform the 2D # data to 3D. if "z" not in spheroid_frame.columns: spheroid_frame["z"] = 0 assert set(["z", "x", "y"]).issubset(spheroid_frame.columns) assert set(descriptors).issubset(spheroid_frame.columns) spheroid = {} cells = {} for ind in spheroid_frame.index: unique_cell = {} unique_cell["x"] = spheroid_frame.loc[ind, "x"] unique_cell["y"] = spheroid_frame.loc[ind, "y"] unique_cell["z"] = spheroid_frame.loc[ind, "z"] unique_cell["label"] = spheroid_frame.loc[ind, "label"] for descriptor in descriptors: unique_cell[descriptor] = spheroid_frame.loc[ind, descriptor] cells[ind] = unique_cell spheroid["cells"] = cells return spheroid
07c588819e60fe8914273b2ba8aafd1b0cf64c30
690,721
import collections import csv def read_mapping_attr2ids(mapping_file): """get attr to one hot id by label mapping file""" attr2ids = collections.defaultdict(list) with open(mapping_file, 'r', encoding="ISO-8859-1") as f: reader = csv.reader(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) for row in reader: attribute_name = row[1] # value_name = row[2] attr_id = row[3] attr2ids[attribute_name].append(attr_id) return attr2ids
55c8fc5e666b51e6d18a4ce99d7a1dadef02f8fb
690,722
def get_result_from_payload(json_resp): """Try to get result node from the payload.""" assert json_resp is not None assert 'result' in json_resp # read the actual result return json_resp.get('result')
867ed5db5ec4759a78f47d04cb75c39185b04c02
690,724
import torch def reorder_lstm_states(lstm_states, order): """ lstm_states: (H, C) of tensor [layer, batch, hidden] order: list of sequence length """ assert isinstance(lstm_states, tuple) assert len(lstm_states) == 2 assert lstm_states[0].size() == lstm_states[1].size() assert len(order) == lstm_states[0].size()[1] order = torch.LongTensor(order).to(lstm_states[0].device) sorted_states = (lstm_states[0].index_select(index=order, dim=1), lstm_states[1].index_select(index=order, dim=1)) return sorted_states
bac758ad376e06fe084aad1fc469b4c5b3dc7e29
690,725
import socket def is_port_used(ip, port): """ check whether the port is used by other program :param ip: :param port: :return: True(in use) False(idle) """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((ip, port)) return True except OSError: return False finally: s.close()
56fbcf03ab0bbfb23fdea56df8eeda19adaac539
690,726
def ShortHash(rc, stdout, stderr): """Stores the shortened hash from stdout into the property pkg_buildnum.""" # Now that we're using git and our patchlevel is not unique (we're using # just the date) we add the short hash as the build #. short = "0x" + stdout[:7] return {'pkg_buildnum': short}
809bc486fcd58b0f6169a394bd6b67a71ee10305
690,727
import re def check_indent_keys(document): """ Check whether the word before the cursor was an indent key (which will trigger a reevaluation of the line's indent)""" lw_start = document.find_previous_word_beginning() or 0 lw_end = 0# document.find_previous_word_ending() or 0 col = document.cursor_position_col #print('Prev token from', lw_start, 'to', lw_end) last_tok = document.current_line[col+lw_start:col+lw_end] #print('Prev token from', lw_start, 'to', lw_end, ':', last_tok) return re.match( r'\{|\}|\(|\)|\[|\]|,|where|let|deriving|in|::|->|=>|\||=', last_tok)
05b5191582739693c53a532fc0f1f979bd70c795
690,728
import collections def create_graph_from_distances(pairwise_distances, threshold): """ Builds an undirected graph where nodes are assemblies and edges connect assemblies which have a pairwise Mash distance below the threshold. """ print('Loading distances...', end='', flush=True) assemblies = set() graph = collections.defaultdict(set) all_connections = collections.defaultdict(set) count = 0 for line in pairwise_distances: parts = line.split('\t') assembly_1 = parts[0] assembly_2 = parts[1] distance = float(parts[2]) assemblies.add(assembly_1) assemblies.add(assembly_2) if assembly_1 == assembly_2: continue all_connections[assembly_1].add(assembly_2) all_connections[assembly_2].add(assembly_1) if distance < threshold: graph[assembly_1].add(assembly_2) graph[assembly_2].add(assembly_1) count += 1 print(' done ({})'.format(count)) assemblies = sorted(assemblies) assembly_count = len(assemblies) for assembly in assemblies: # sanity check: make sure we have all the connections assert len(all_connections[assembly]) == assembly_count - 1 return assemblies, graph
1c6c3f3246eee223fcca4cf12220641c411616d9
690,729
def reformat_position(position): """ reformat position to be float values """ raw_position = vars(position)['_raw'] position_dict = {} for key in raw_position.keys(): try: position_dict[key] = float(raw_position[key]) except ValueError: continue return position_dict
d176552a6312b5ab46c529313fd5dd8167955590
690,730
import ipaddress def is_ip_valid(): """is_ip_valid: check if a given ip address is valid a valid ip address is a unicast Class A , B ,C but excluding Loopback addresses: 127.0.0.0/8 Software scope addresses: 0.0.0.0/8 APIPA addresses(lack of ip address): 169.254.0.0/16 :param ip: ip address """ exec_addr = "127.0.0.0/24, 0.0.0.0/8, 169.254.0.0/26: " global ip ip = input("Enter a unicast IPv4 address excluding " + exec_addr) try: ip = ipaddress.ip_address(ip) except ValueError: # print("{} is neither an IPv4 nor IPv6 address".format(ip)) return False # test if ip address conform to the requirement as def in docstring if any([ip.is_link_local, ip.is_loopback, ip.is_multicast, ip.is_reserved, ip.compressed[0] == '0']): return False else: return True
2f9f08b389331d42c16bf11b0646b9222be5bf4c
690,731
def get_syscall_name(event): """Get the name of a syscall from an event. Args: event (Event): an instance of a babeltrace Event for a syscall entry. Returns: The name of the syscall, stripped of any superfluous prefix. Raises: ValueError: if the event is not a syscall event. """ name = event.name if name.startswith('sys_'): return name[4:] elif name.startswith('syscall_entry_'): return name[14:] else: raise ValueError('Not a syscall event')
689d25c9731c3725a501b135d657731a46808da8
690,732
def in_all_repository_dependencies( repository_key, repository_dependency, all_repository_dependencies ): """Return True if { repository_key : repository_dependency } is in all_repository_dependencies.""" for key, val in all_repository_dependencies.items(): if key != repository_key: continue if repository_dependency in val: return True return False
290ab8606d2b95297f3a6ecd16fb9604a79381ef
690,733
import math def to_point(nvector): """ Converts a point represented by an n-vector to latitude and longitude. Parameters ---------- nvector : np.ndarray the n-vector of a point given by latitude and longitude Returns ------- Point The same point given by latitude and longitude. """ return math.atan2(nvector[0], math.sqrt(nvector[1] ** 2 + nvector[2] ** 2))
366c94aa55ce6968e22e1d4ccf658633e429c11d
690,735
from typing import Dict def close(fulfillment_state: str, message: Dict[str, str]) -> dict: """Close dialog generator""" return { 'dialogAction': { 'type': 'Close', 'fulfillmentState': fulfillment_state, 'message': message } }
84a49d7dcb732a9e067b7f282230b45de486a899
690,736
def get_known_errors(): """ This function will read file with association from study to biosample :return: dictionary with study as a key and biosample as a values """ known_errors = dict() try: with open('ena_not_in_biosample.txt', 'r') as f: for line in f: line = line.rstrip() study, biosample = line.split("\t") known_errors.setdefault(study, {}) known_errors[study][biosample] = 1 except FileNotFoundError: pass return known_errors
aaff463f9bee8d032f553b05741968219419b5c2
690,737
def customer_table(data): """Return dataframe with all customers.""" customers = data['olist_customers_dataset'].copy() customers = customers[['customer_unique_id']].reset_index(drop=True) return customers
d353cfd455848ba0244e2b84c96d3857c566c9c2
690,738
import gettext def C_(ctx, s): """Provide qualified translatable strings via context. (copied from Orca)""" translated = gettext.gettext('%s\x04%s' % (ctx, s)) if '\x04' in translated: # no translation found, return input string return s return translated
1c2d51e4e516948ac852cf65d0d9b9892469db22
690,739
def mock_get_response(url): """Mock _get_response() function.""" json_data = False if url.lower().startswith('https'): json_data = {'titles': ['title is found']} return json_data
02d650013c65cc7f2c3ca4c69a018fea3e015695
690,740
def get_amazon_product_id(url: str) -> str: """ Extract the amazon product id from product or review web page :param url: url of product :return: product id """ start = url.find("/dp/") # search pattern for a product url count = 4 if start == -1: start = url.find("/product-reviews/") # search pattern for a review page count = 17 if start == -1: start = url.find("/product/") # search pattern for a review page count = 9 if start == -1: raise Exception( "Failed to find the product id in the given url: " + url ) end = url.find("/", start + count) if end == -1: end = url.find("?", start + count) if end == -1: end = len(url) result = url[start + count : end] return result
4306d1ec8db9181136f027a05e13ee7f2c65de57
690,741
def _t_hs(h, s): """Define the boundary between Region 2 and 3, T=f(h,s) >>> "%.7f" % _t_hs(2600, 5.1) '713.5259364' >>> "%.7f" % _t_hs(2800, 5.2) '817.6202120' """ nu=h/3000 sigma=s/5.3 I=[-12, -10, -8, -4, -3, -2, -2, -2, -2, 0, 1, 1, 1, 3, 3, 5, 6, 6, 8, 8, 8, 12, 12, 14, 14] J=[10, 8, 3, 4, 3, -6, 2, 3, 4, 0, -3, -2, 10, -2, -1, -5, -6, -3, -8, -2, -1, -12, -1, -12, 1] n=[0.629096260829810e-3, -0.823453502583165e-3, 0.515446951519474e-7, -0.117565945784945e1, 0.348519684726192e1, -0.507837382408313e-11, -0.284637670005479e1, -0.236092263939673e1, 0.601492324973779e1, 0.148039650824546e1, 0.360075182221907e-3, -0.126700045009952e-1, -0.122184332521413e7, 0.149276502463272, 0.698733471798484, -0.252207040114321e-1, 0.147151930985213e-1, -0.108618917681849e1, -0.936875039816322e-3, 0.819877897570217e2, -0.182041861521835e3, 0.261907376402688e-5, -0.291626417025961e5, 0.140660774926165e-4, 0.783237062349385e7] suma=0 for i in range(25): suma+=n[i]*(nu-0.727)**I[i]*(sigma-0.864)**J[i] return 900*suma
d80d667cd671f957208837222c54c9e722a63cec
690,742
def flatten(l): """ Flatten irregular nested list :param l: :return: """ if isinstance(l, (list, tuple)): return [a for i in l for a in flatten(i)] else: return [l]
80356ee495f95916fc398c9e1b677bf37e43232b
690,743
def _dummy_boxstream(stream, **kwargs): """Identity boxstream, no tansformation.""" return stream
7ff65f1860c2e18149c496e135c28f43ccc7a980
690,744
def replica_uuid(): """Replica UUID's to be used.""" UUID = "0000000-0000-0000-0000-000000000001" size_mb = 64 * 1024 * 1024 return (UUID, size_mb)
b844e4a63504a2f4bc816cf1a22ace765391b5b1
690,745
import random def random_hex(digits = 12): """Generate a string of random hexadecimal digit and return as a string. Arguments: digits: the number of hexadecimal digits to create """ str_hex = ''.join([''.join(random.choice("0123456789ABCDEF")) for _ in range(digits)]) return str_hex
b571013b8e17a08dc35b14bece6bde24f9829813
690,746
def IR_Mensal(salarioBase: float, numeroDependentes=0) -> float: """ Calcula Imposto de Renda para 2021 :param salarioBase: (já aplicado desconto de INSS) :return: valorIR """ # Início Faixa Salarial : [Aliquota de imposto, Valor a deduzir] # Deve estar em ordem decrescente FaixasIR = {4664.68: [0.275, 869.36], # 27.5% 3751.05: [0.225, 636.13], # 22.5% 2826.65: [0.15, 354.80], # 15% 1903.98: [0.075, 142.80], # 7.5% 0: [0, 0]} # Isento DeducaoPorDependente = 189.59 # Valor a deduzir por dependentes valorIR = 0 salarioBase -= (DeducaoPorDependente * numeroDependentes) # Desconto para dependentes salarioBase = max(salarioBase, 0) # caso seja negativo, considerar como zero for faixa, aliquotas in FaixasIR.items(): if salarioBase > faixa: valorIR = salarioBase * aliquotas[0] - aliquotas[1] break valorIR = round(valorIR, 2) return valorIR
c1db2ef98d8ff5997e82eee4a87f10c1a1b4e10d
690,747
def _compute_min_event_ndims(bijector_list, compute_forward=True): """Computes the min_event_ndims associated with the give list of bijectors. Given a list `bijector_list` of bijectors, compute the min_event_ndims that is associated with the composition of bijectors in that list. min_event_ndims is the # of right most dimensions for which the bijector has done necessary computation on (i.e. the non-broadcastable part of the computation). We can derive the min_event_ndims for a chain of bijectors as follows: In the case where there are no rank changing bijectors, this will simply be `max(b.forward_min_event_ndims for b in bijector_list)`. This is because the bijector with the most forward_min_event_ndims requires the most dimensions, and hence the chain also requires operating on those dimensions. However in the case of rank changing, more care is needed in determining the exact amount of dimensions. Padding dimensions causes subsequent bijectors to operate on the padded dimensions, and Removing dimensions causes bijectors to operate more left. Args: bijector_list: List of bijectors to be composed by chain. compute_forward: Boolean. If True, computes the min_event_ndims associated with a forward call to Chain, and otherwise computes the min_event_ndims associated with an inverse call to Chain. The latter is the same as the min_event_ndims associated with a forward call to Invert(Chain(....)). Returns: min_event_ndims """ min_event_ndims = 0 # This is a mouthful, but what this encapsulates is that if not for rank # changing bijectors, we'd only need to compute the largest of the min # required ndims. Hence "max_min". Due to rank changing bijectors, we need to # account for synthetic rank growth / synthetic rank decrease from a rank # changing bijector. rank_changed_adjusted_max_min_event_ndims = 0 if compute_forward: bijector_list = reversed(bijector_list) for b in bijector_list: if compute_forward: current_min_event_ndims = b.forward_min_event_ndims current_inverse_min_event_ndims = b.inverse_min_event_ndims else: current_min_event_ndims = b.inverse_min_event_ndims current_inverse_min_event_ndims = b.forward_min_event_ndims # New dimensions were touched. if rank_changed_adjusted_max_min_event_ndims < current_min_event_ndims: min_event_ndims += ( current_min_event_ndims - rank_changed_adjusted_max_min_event_ndims) rank_changed_adjusted_max_min_event_ndims = max( current_min_event_ndims, rank_changed_adjusted_max_min_event_ndims) # If the number of dimensions has increased via forward, then # inverse_min_event_ndims > forward_min_event_ndims, and hence the # dimensions we computed on, have moved left (so we have operated # on additional dimensions). # Conversely, if the number of dimensions has decreased via forward, # then we have inverse_min_event_ndims < forward_min_event_ndims, # and so we will have operated on fewer right most dimensions. number_of_changed_dimensions = ( current_min_event_ndims - current_inverse_min_event_ndims) rank_changed_adjusted_max_min_event_ndims -= number_of_changed_dimensions return min_event_ndims
a168b64107ae4fb5e9c68ea679f8c786666d6db9
690,748
def is_anonymous(user_id): """ Returns whether or not the given user is an anonymous user. :param user_id: The id of the user. :return: True, if the user is anonymous; False, otherwise. """ return user_id.startswith("hmrtmp")
c6d6620cb0626967518a0f7706ff4a4895e00167
690,749
def tensor_descriptions(args): """ A list of tensor descriptions for Ops. Arguments: args: A list of Ops. Returns: A list of the Op's tensor descriptions. """ return (arg.tensor_description() for arg in args)
6825fc919e053b88a80e18fbcbe7eaa3e72890cc
690,751
def calc_flesch_readability(wordcount, sentcount, syllcount): """ Calculates the Flesch Readability Score. """ return round(float(float(206.835 - float(1.015 * float(wordcount / sentcount))) - float(84.6 * float(syllcount / wordcount))), 1)
faaaa6c315d905a7ea6f485c4720f8695897be79
690,752
def centers(signal, axes): """ Returns the centers of the axes. This works regardless if the axes contain bin boundaries or centers. """ def findc(axis, dimlen): if axis.shape[0] == dimlen+1: return (axis.nxdata[:-1] + axis.nxdata[1:]) / 2 else: assert axis.shape[0] == dimlen return axis.nxdata return [findc(a,signal.shape[i]) for i,a in enumerate(axes)]
cf39dd22a322d5c759cfd219c5097b54f05a0d86
690,753
def signing_bytes(uid, nonce): """ Returns list of bytes. Parameters: uid: string nonce: int """ sbs = f'{uid}{nonce}' return sbs.encode('utf-8') #return bytearray(sbs.encode())
3ae18cabd5cd1deb83385262cc58b1a18e936b79
690,754
def train_test_split(split): """ Create a pandas dataframe Split data into train, val, test function Train/test 80/20 """ return split
ee5c6fba68b17c19fdeccd94760aaa6861ebf36b
690,756
import os def file_changed(file): """ Checks the windows file attributes to see if a file has been updated """ new_change_time = None last_change_time = None while new_change_time == last_change_time: if last_change_time is None: last_change_time = os.stat(file).st_mtime # time of previous content modification new_change_time = os.stat(file).st_mtime # time of most recent content modification # when the while-loop becomes false - i.e. the file has changed return True
3ebd6589b06e9c096eab562a02ee809a45235d1f
690,758
def stdDevOfLengths(L): """ L: a list of strings returns: float, the standard deviation of the lengths of the strings, or NaN if L is empty. """ try: X = [] for l in L: X.append(len(l)) mean = sum(X)/float(len(X)) tot = 0.0 for x in X: tot += (x- mean)**2 std = (tot/len(X)) ** 0.5 except: return float('NaN') return std
04e68523d89bfe9b9e3ba9298f817bc0f374ba83
690,759
def _get_date(msg): """Returns the date included into the message. Args: msg: A json message. Returns: The date string """ return msg['date']
b9eb3f2cf62e80c1bf3274a50aef8604355e81f7
690,760
def convert_dict_of_sets_to_dict_of_lists(dictionary): """ Returns the same dictionary, but the values being sets are now lists @param dictionary: {key: set(), ...} """ out = dict() for key, setvalue in dictionary: out[key] = list(setvalue) return out
fe45c19684356be8d1bd46cda366dc8ba0a33922
690,761
def remove_every_other(my_list: list) -> list: """ This function removes every second element from the array. """ new_list = [] for i, k in enumerate(range(len(my_list))): if i % 2 == 0: new_list.append(my_list[i]) return new_list
1c23a204dec2fd6f888cbbe9b968e4ae72d4cfe9
690,762
import pickle def load_train_data(): """We train on the whole data but the last mini-batch.""" sound_fpaths, converteds = pickle.load(open('data/train.pkl', 'rb')) return sound_fpaths, converteds
10811d1570ae2315c7e60b7571ee232bedad9576
690,764
def tick_percent(decimals=1): """A tick formatter to display the y-axis as a float percentage with a given number of decimals. Args: decimals = 1: The number of decimals to display. Returns: A tick formatter function (f(y, position)) displaying y as a percentage. """ return (lambda y, position: '{:.{decimals}f}%'.format(100.0 * y, decimals=decimals))
b9c064b767e39b4a98abd389ef1d0656dfea582a
690,765
def cereal_protein_fractions(cereals): """ For each cereal, records its protein content as a fraction of its total mass. """ result = {} for cereal in cereals: total_grams = float(cereal["weight"]) * 28.35 result[cereal["name"]] = float(cereal["protein"]) / total_grams return result
af2ce290e31aac2dc44856bd5de9a7017b225287
690,766
def atom_dict_to_atom_dict(d, aniso_dict): """Turns an .mmcif atom dictionary into an atomium atom data dictionary. :param dict d: the .mmcif atom dictionary. :param dict d: the mapping of atom IDs to anisotropy. :rtype: ``dict``""" charge = "pdbx_formal_charge" atom = { "x": d["Cartn_x"], "y": d["Cartn_y"], "z": d["Cartn_z"], "element": d["type_symbol"], "name": d.get("label_atom_id"), "occupancy": d.get("occupancy", 1), "bvalue": d.get("B_iso_or_equiv"), "charge": d.get(charge, 0) if d.get(charge) != "?" else 0, "alt_loc": d.get("label_alt_id") if d.get("label_alt_id") != "." else None, "anisotropy": aniso_dict.get(int(d["id"]), [0, 0, 0, 0, 0, 0]), "is_hetatm": False } for key in ["x", "y", "z", "charge", "bvalue", "occupancy"]: if atom[key] is not None: atom[key] = float(atom[key]) return atom
bd03c592f00fd244d7fafd012795bd7e9c55d86d
690,767
from typing import List from typing import Dict def flatten(l: List[List[Dict]]) -> List[Dict]: """ Flattens list of lists. :param l: list containing lists of table dictionaries :return: list containing table dictionaries """ return[item for sublist in l for item in sublist]
95a11b937a547303790c54e0c4bfdafe286f7bf6
690,768
def base_to_int(string, base_str): """Converts an array of bytes encoded in a given base into a binary number.""" value = 0 base = len(base_str) for b in string: value *= base value += base_str.find(b) return value
73e097fcbaaf806e29e14db408d2d62e611ca5de
690,770
def _add_educ_rapid_test_fade_in_params(params): """Add the shares how many people with educ contacts get a rapid test. Sources: - 17-24 of March 2021 (Mon, 2021-03-22): - NRW had 80% tests for students before Easter (https://bit.ly/3u7z8Rx) - BY: test offers to educ_workers (https://bit.ly/3tbVX5u) - BW: only tests for educ workers (https://bit.ly/2S7251M) - federal level: "In Kitas und Schulen sollen die Testmöglichkeiten "mit der steigenden Verfügbarkeit von Schnell- und Selbsttests" ausgebaut werden" (https://bit.ly/3nuCSKi) - Some KiTa workers are being tested (https://bit.ly/3nyGyus) - Self tests for students in Berlin (https://bit.ly/2ScGu8m) - Schleswig-Holstein: test offer (https://bit.ly/3eVfkuv) - mandatory tests in Saxony (https://bit.ly/3eEQGhn) - no tests yet for students in Hessia, but already ordered (https://bit.ly/3gMGJB4) - Niedersachsen had one test week before Easter (https://bit.ly/3gOOC96) => assume 90% of teachers and 40% of students do rapid tests - After Easter (2021-04-07): - NRW: tests are mandatory starting April 12th (https://bit.ly/3xqVbUn) - Bavaria: tests are mandatory for all (https://bit.ly/3nz5fXS, https://bit.ly/2QHilX3) - BW: voluntary tests for students (https://bit.ly/3vuetaD) - Brandenburg starts with tests (https://bit.ly/3xAihZB) - Schleswig-Holstein: mandatory tests (https://bit.ly/3eVfkuv) => assume 95% of teachers and 75% of students get tested - BW: tests mandatory starting 2021-04-19 (https://bit.ly/3vuetaD) => assume 95% of teachers and 95% of students get tested """ params = params.copy(deep=True) loc = ("rapid_test_demand", "educ_worker_shares") params.loc[(*loc, "2020-01-01"), "value"] = 0.0 params.loc[(*loc, "2021-01-01"), "value"] = 0.0 # this is arbitrary to have a more convex shape params.loc[(*loc, "2021-03-01"), "value"] = 0.3 params.loc[(*loc, "2021-03-22"), "value"] = 0.9 params.loc[(*loc, "2021-04-07"), "value"] = 0.95 params.loc[(*loc, "2021-04-19"), "value"] = 0.95 params.loc[(*loc, "2021-06-01"), "value"] = 0.95 params.loc[(*loc, "2025-12-31"), "value"] = 0.95 loc = ("rapid_test_demand", "student_shares") params.loc[(*loc, "2020-01-01"), "value"] = 0.0 params.loc[(*loc, "2021-02-01"), "value"] = 0.0 params.loc[(*loc, "2021-03-01"), "value"] = 0.1 params.loc[(*loc, "2021-03-22"), "value"] = 0.4 params.loc[(*loc, "2021-04-07"), "value"] = 0.75 params.loc[(*loc, "2021-04-19"), "value"] = 0.95 params.loc[(*loc, "2021-06-01"), "value"] = 1.0 params.loc[(*loc, "2025-12-31"), "value"] = 1.0 # Assume weekly tests before Easter and twice weekly tests after Easter # We should get a fade-in through different ends of Easter vaccation params.loc[("rapid_test_demand", "educ_frequency", "before_easter"), "value"] = 7 params.loc[("rapid_test_demand", "educ_frequency", "after_easter"), "value"] = 3 return params
7c126809587d33b623bf297c4714904a11c798eb
690,771
def get_diff_set(a_list, b_list): """Get results of a - b""" a_dict = {tpl[0]: tpl for tpl in a_list} b_dict = {tpl[0]: tpl for tpl in b_list} result_set = list() for ik, t in a_dict.items(): if ik not in b_dict.keys(): result_set.append(t) return set(result_set)
3e352b0761f2618ab81d16de7396581f080e6ae3
690,772
def button_string(channel, red, blue): """Returns the string representation of a Combo PWM Mode button.""" return 'CH{:s}_{:s}_{:s}'.format(channel, red, blue)
200856405bdee5cdaaa0933cd2c4277092e23d23
690,773
def print_guess(guess, position): """ Obtain the representation of an underlined word """ print_word = "_" * position + guess + "_" * (6 - position - len(guess)) return print_word
efde67f19aaeddc8ca8973d586cde57a32d78d14
690,774
def predict_labels(df, confidence_threshold, mode): """Returning predicted labels from a dataframe given a treshold. mode=0 returns all labels, mode=1 returns only categories, not passive nor active""" predicted_labels = [] if mode == 0: for i in range(len(df)): if df["score"][i] >= confidence_threshold: predicted_labels.append(df.index[i]) elif mode == 1: for i in range(len(df)): if ( df["score"][i] >= confidence_threshold and df.index[i] != "passive" and df.index[i] != "active" and df.index[i] != "pasiva" and df.index[i] != "activa" ): predicted_labels.append(df.index[i]) return predicted_labels
cc89d9708d5975a342d0c661103328af9fccbb60
690,775
def GenerateConfig(context): """Generate configuration.""" FORSETI_HOME = '$USER_HOME/forseti-security' DOWNLOAD_FORSETI = ( "git clone {src_path}.git".format( src_path=context.properties['src-path'])) FORSETI_VERSION = ( "git checkout {forseti_version}".format( forseti_version=context.properties['forseti-version'])) FORSETI_CLIENT_CONF = ('gs://{bucket_name}/configs/' 'forseti_conf_client.yaml').format( bucket_name=context.properties['gcs-bucket']) SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes'] PERSIST_FORSETI_VARS = ( 'export FORSETI_HOME={forseti_home}\n' 'export FORSETI_CLIENT_CONFIG={forseti_client_conf}\n' ).format(forseti_home=FORSETI_HOME, forseti_client_conf=FORSETI_CLIENT_CONF) resources = [] deployment_name_splitted = context.env['deployment'].split('-') deployment_name_splitted.insert(len(deployment_name_splitted)-1, 'vm') instance_name = '-'.join(deployment_name_splitted) resources.append({ 'name': instance_name, 'type': 'compute.v1.instance', 'properties': { 'zone': context.properties['zone'], 'machineType': ( 'https://www.googleapis.com/compute/v1/projects/{}' '/zones/{}/machineTypes/{}'.format( context.env['project'], context.properties['zone'], context.properties['instance-type'])), 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'boot': True, 'autoDelete': True, 'initializeParams': { 'sourceImage': ( 'https://www.googleapis.com/compute/v1' '/projects/{}/global/images/family/{}'.format( context.properties['image-project'], context.properties['image-family'] ) ) } }], 'networkInterfaces': [{ 'network': ( 'https://www.googleapis.com/compute/v1/' 'projects/{}/global/networks/default'.format( context.env['project'])), 'accessConfigs': [{ 'name': 'External NAT', 'type': 'ONE_TO_ONE_NAT' }] }], 'serviceAccounts': [{ 'email': context.properties['service-account'], 'scopes': SERVICE_ACCOUNT_SCOPES, }], 'metadata': { 'items': [{ 'key': 'startup-script', 'value': """#!/bin/bash exec > /tmp/deployment.log exec 2>&1 # Ubuntu update. sudo apt-get update -y sudo apt-get upgrade -y # Forseti setup. sudo apt-get install -y git unzip # Forseti dependencies sudo apt-get install -y libffi-dev libssl-dev libmysqlclient-dev python-pip python-dev build-essential USER=ubuntu USER_HOME=/home/ubuntu # Install fluentd if necessary. FLUENTD=$(ls /usr/sbin/google-fluentd) if [ -z "$FLUENTD" ]; then cd $USER_HOME curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh bash install-logging-agent.sh fi # Install Forseti Security. cd $USER_HOME rm -rf *forseti* # Download Forseti source code {download_forseti} cd forseti-security git fetch --all {checkout_forseti_version} # Forseti dependencies pip install --upgrade pip==9.0.3 pip install -q --upgrade setuptools wheel pip install -q --upgrade -r requirements.txt # Install Forseti python setup.py install # Set ownership of the forseti project to $USER chown -R $USER {forseti_home} # Export variables {persist_forseti_vars} # Store the variables in /etc/profile.d/forseti_environment.sh # so all the users will have access to them echo "echo '{persist_forseti_vars}' >> /etc/profile.d/forseti_environment.sh" | sudo sh echo "Execution of startup script finished" """.format( # Install Forseti. download_forseti=DOWNLOAD_FORSETI, # Checkout Forseti version. checkout_forseti_version=FORSETI_VERSION, # Set ownership for Forseti conf and rules dirs forseti_home=FORSETI_HOME, # Env variables for Forseti persist_forseti_vars=PERSIST_FORSETI_VARS, ) }] } } }) return {'resources': resources}
3a08b5c71e8658b1f74d8121e3265c9f541038d2
690,776
def get_msg_count(bag, topic): """Get number of instances for the topic. # Parameters bag : rosbag.Bag a target rosbag topic : string a valid topic name # Returns num_msgs : int number of messages in the topic """ return bag.get_message_count(topic)
3da6964fa77dfbe982e090e4a13fcb778fd032a1
690,777
def bilinear_interpolation_01(x, y, values): """Interpolate values given at the corners of [0,1]x[0,1] square. Parameters: x : float y : float points : ((v00, v01), (v10, v11)) input grid with 4 values from which to interpolate. Inner dimension = x, thus v01 = value at (x=1,y=0). Returns: float interpolated value """ return (values[0][0] * (1 - x) * (1 - y) + values[0][1] * x * (1 - y) + values[1][0] * (1 - x) * y + values[1][1] * x * y)
a5e0d8b974803073df159da4d16a01a47ec0f087
690,778
from colorsys import rgb_to_hsv def calculate_hue_offsets(color_wheel): """ parse color wheel and give hue offsets compared to traditional hsv >>> cw = {300: (120, 0, 106)} >>> print(calculate_hue_offsets(cw)) {307: 300} """ return { round(rgb_to_hsv(*[rgb/255 for rgb in color_wheel[k]])[0] * 360): k for k in range(360) if k in color_wheel }
ee41a18449721ee8f75399659fa758220134d116
690,779
def is_string_like(obj): """ Check whether obj behaves like a string. Copied from numpy Parameters ---------- obj : object Object being tested for string like behavior Returns ------- bool True if object behaves like a string. False otherwise. """ try: obj + '' except (TypeError, ValueError): return False return True
1015d440ea71c8f54dc0d720145677ca37351645
690,780
def ints(): """Returns a generator of integers from the next input line.""" return (int(i) for i in input().split())
1ccda6985d8dcc275d647a4d1a133fb2dd596220
690,781
def CommandInGoEnv(input_api, output_api, name, cmd, kwargs): """Returns input_api.Command that wraps |cmd| with invocation to go/env.py. env.py makes golang tools available in PATH. It also bootstraps Golang dev environment if necessary. """ if input_api.is_committing: error_type = output_api.PresubmitError else: error_type = output_api.PresubmitPromptWarning full_cmd = [ 'vpython', input_api.os_path.join(input_api.change.RepositoryRoot(), 'go', 'env.py'), ] full_cmd.extend(cmd) return input_api.Command( name=name, cmd=full_cmd, kwargs=kwargs, message=error_type)
7d71c09f0621cfa25d6a7316a2ba1fcfbbc1fb91
690,782
import csv def get_datatypes_csv(): """ Get content of datatypes.csv """ with open("datatypes.csv") as f: return [x for x in csv.DictReader(f)]
04cf99c5731ca099b5145ccc7f4fc8855ebd2c90
690,783
from typing import Counter import random def get_markov_message(lines) -> str: """Get a random message based on a markov chain.""" words_by_line = [line.split() for line in lines] words = [word for line in words_by_line for word in line] word_counts = Counter(words) first_words = [line[0] for line in words_by_line] # Get a random first word random_index = random.randint(0, len(first_words) - 1) random_first_word = first_words[random_index] # Initialize state out = [random_first_word] last_added_word = random_first_word done = False while not done: current_index = 0 times_previous_seen = 0 # Get random count threshold with upper bound the count of the # previously added word count_threshold = random.randint(1, word_counts[last_added_word]) while times_previous_seen < count_threshold: # Iterate over all words until you have encountered the # previously added word count_threshold times if words[current_index] == last_added_word: times_previous_seen += 1 current_index += 1 word_to_add = words[current_index] out.append(word_to_add) last_added_word = word_to_add # Done when punctuation is encountered and there are at least 3 words done = last_added_word[-1] in [".", "?", "!"] and len(out) > 3 return " ".join(out)
9183823e12129911a49b31305f954d93cd0dd6d4
690,784
def __handle_result(process): """ handels the results from the executed program :param process: The process :returns: the data from stdout of the program """ out = process.communicate() if process.returncode is not 0: raise Exception("When executing " "'{}' exited with return code: '{}' " " and message:\n{}".format( process.args, process.returncode, out[1].decode())) return out[0].decode(errors='ignore')
d8e89e5fffc8a24e6dfbfab329b47c24f1000ad3
690,785
import os def isExe(fname): """ check if a path exists and is executable """ return os.path.exists(fname) and os.access(fname, os.X_OK)
ee31bda6a855c426c32fcc7bddfedfb786edac3f
690,786
import math def sigmoid(x, derivative=False): """Sigmoid activation function.""" if derivative: return x * (1 - x) return 1 / (1 + math.e ** (-x))
f650ea7fd214f721c246e51559dfee9341d21be7
690,787
def get_edge_score(ref_que_conn_scores): """Return ref_gene: que_genene: score dict.""" ret = {} for edge, scores in ref_que_conn_scores.items(): max_score = max(scores) ret[edge] = max_score return ret
b0062dc3eee02ef58c5277189ab125f954666b4a
690,788
def _osfify_urls(data): """ Formats `data` object with OSF API URL Parameters ---------- data : object If dict with a `url` key, will format OSF_API with relevant values Returns ------- data : object Input data with all `url` dict keys formatted """ OSF_API = "https://files.osf.io/v1/resources/{}/providers/osfstorage/{}" if isinstance(data, str): return data elif 'url' in data: data['url'] = OSF_API.format(*data['url']) try: for key, value in data.items(): data[key] = _osfify_urls(value) except AttributeError: for n, value in enumerate(data): data[n] = _osfify_urls(value) return data
9bacd7e60d7412a4cdcd6acf38b59b9d2de0b25a
690,789
def gcd(x: int, m: int) -> int: """ 最大公約数を求める ユークリッドの互除法 """ if m == 0: return x return gcd(m, x % m)
8d476fe4f59d230a2f0e1481fb06ccba9a43b8d6
690,790
import operator def schedule(tasks, alg=None): """Takes list of tasks (weight, length). Algorithm 'diff' or 'ratio' can be passed to determine the way of task comparison""" # Defines operator dynamically. Subtraction when algorithm # is based on weight and length difference, otherwise division is used # what reflects scheduling by ratio (optimal). operation = operator.sub if alg == 'diff' else operator.truediv # schedules jobs in decreasing order of the difference (weight - length) # or ratio (weight / length) tasks = sorted(tasks, key=lambda x: operation(x[0], x[1]), reverse=True) # handle ties so that bigger weights go first. difference = operation(tasks[0][0], tasks[0][1]) temp = [] for idx, i in enumerate(tasks): diff = operation(i[0], i[1]) if diff == difference: temp.append(i) else: difference = diff if len(temp) > 1: temp.sort(reverse=True) tasks[idx - len(temp):idx] = temp temp = [i] if len(temp) > 1: temp.sort(reverse=True) tasks[len(tasks) - len(temp):len(tasks)] = temp return tasks
d9eee8b5025b6b3877cf87537f804ce9eb239408
690,791
import argparse def _parse_args() -> argparse.Namespace: """Parses arguments.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Creates classification dataset.') # arguments relevant to both creating the dataset CSV and splits.json parser.add_argument( 'output_dir', help='path to directory where the 3 output files should be ' 'saved: 1) dataset CSV, 2) label index JSON, 3) splits JSON') parser.add_argument( '--mode', nargs='+', choices=['csv', 'splits'], default=['csv', 'splits'], help='whether to generate only a CSV, only a splits.json file (based ' 'on an existing classification_ds.csv), or both') parser.add_argument( '--match-test', nargs=2, metavar=('CLASSIFICATION_CSV', 'SPLITS_JSON'), help='path to an existing classification CSV and path to an existing ' 'splits JSON file from which to match test set') # arguments only relevant for creating the dataset CSV csv_group = parser.add_argument_group( 'arguments for creating classification CSV') csv_group.add_argument( '-q', '--queried-images-json', help='path to JSON file containing image paths and classification info') csv_group.add_argument( '-c', '--cropped-images-dir', help='path to local directory for saving crops of bounding boxes') csv_group.add_argument( '-d', '--detector-output-cache-dir', help='(required) path to directory where detector outputs are cached') csv_group.add_argument( '-v', '--detector-version', help='(required) detector version string, e.g., "4.1"') csv_group.add_argument( '-t', '--threshold', type=float, default=0.8, help='confidence threshold above which to crop bounding boxes') csv_group.add_argument( '--min-locs', type=int, help='minimum number of locations that each label must have in order ' 'to be included (does not apply to match-test-splits)') # arguments only relevant for creating the splits JSON splits_group = parser.add_argument_group( 'arguments for creating train/val/test splits') splits_group.add_argument( '--val-frac', type=float, help='(required) fraction of data to use for validation split') splits_group.add_argument( '--test-frac', type=float, help='fraction of data to use for test split, must be provided if ' '--match-test is not given') splits_group.add_argument( '--method', choices=['random', 'smallest_first'], default='random', help='"random": randomly tries up to 10,000 different train/val/test ' 'splits and chooses the one that best meets the scoring criteria, ' 'does not support --label-spec. ' '"smallest_first": greedily divides locations into splits ' 'starting with the smallest class first. Supports --label-spec.') splits_group.add_argument( '--label-spec', help='optional path to label specification JSON file, if specifying ' 'dataset priority. Requires --method=smallest_first.') return parser.parse_args()
89bafe67f8332aca17ea98956d0acd656e958a74
690,792
import re def name_simplify(name, lower=True, rm_spaces=True): """Simplify the Author names, to lower case re.sub('[^A-Za-z0-9]', '', x) """ name_out = re.sub('[^A-Za-z0-9 ]', '', name) if lower: name_out = name_out.lower() if rm_spaces: name_out = name_out.replace(' ', '') return name_out # x_out = '' # for i in x: # if 'A' <= i <= 'z': # x_out += 'i'.lower() # return x_out
61a8cfb23fde45f7460b3ea3cd25fce2666142d9
690,793
import time def get_current_time_as_str(): """ Returns current time as a string :return: """ return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
8fdcb5793631bc7834df2e714c937d6779e5895d
690,794
def linear_map( values, new_min=0.0, new_max=1.0 ): """Return a NumPy array of linearly scaled values between new_min and new_max. Equivalent to Matlab's mat2gray, I believe. """ new_values = (((values - values.min()) * (new_max - new_min)) / (values.max() - values.min())) + new_min return new_values # End linear_map() definition
2756997048bfb64a4b4f6e5f52b5bc26bf80aa77
690,795
import numpy def extract_records(recarray, names): """Convert a record-type array and list of names into a float array""" return numpy.vstack([recarray[name] for name in names]).T
7542e101b0cb425d21f8b9b57539b676318b5e02
690,796
def inttodate(i, lim=1965, unknown='U', sep='-', order="asc", startsatyear=0): """ transforms an int representing days into a date Args: i: the int lim: the limited year below which we have a mistake unknown: what to return when unknown (date is bellow the limited year) sep: the sep between your date (e.g. /, -, ...) order: if 'asc', do d,m,y else do y,m,d startsatyear: when is the year to start counting for this int Returns: str: the date or unknown """ a = int(i // 365) if a > lim: a = str(a + startsatyear) r = i % 365 m = str(int(r // 32)) if int(r // 32) > 0 else str(1) r = r % 32 d = str(int(r)) if int(r) > 0 else str(1) else: return unknown return d + sep + m + sep + a if order == "asc" else a + sep + m + sep + d
f440e136b1e4707403b3b1d565724af8f212a140
690,797
def parse_ref_words(argstr): # address: (expect, mask) """ All three of thse are equivilent: ./solver.py --bytes 0x31,0xfe,0xff dmg-cpu/rom.txt ./solver.py --bytes 0x00:0x31,0x01:0xfe,0x02:0xff dmg-cpu/rom.txt ./solver.py --bytes 0x00:0x31:0xFF,0x01:0xfe:0xFF,0x02:0xff:0xFF dmg-cpu/rom.txt Which maps to: ref_words = { 0x00: (0x31, 0xFF), 0x01: (0xfe, 0xFF), 0x02: (0xff, 0xFF), } """ ret = {} auto_addr = 0 for constraint in argstr.split(","): parts = constraint.split(":") assert len(parts) <= 3 # One arg: assume offset and just use value if len(parts) == 1: offset = auto_addr value = int(parts[0], 0) # two arg: offset:value else: offset = int(parts[0], 0) value = int(parts[1], 0) mask = 0xFF # three arg: allow masking value if len(parts) >= 3: mask = int(parts[2], 0) ret[offset] = (value, mask) auto_addr += 1 return ret
dc558139a3426f926e76096c977e3242173af793
690,798
import re def string_matching(s, keywords): """ """ for k in keywords: if re.search(k, s): return True return False
61650787e4df30fe803af9807f674f703b33ad05
690,800
import subprocess def get_git_root(): """Get the root of the current ``git`` repository.""" return ( subprocess.check_output(("git", "rev-parse", "--show-toplevel")) .strip() .decode("utf-8") )
9e8f300d5cfebf3c09f8c3dd076ddcc5047b11c5
690,801
def degrees2gradians(value:float)->float: """Convert degrees to gradians. """ return value * 200/180
b509b65aba5ced6cd47db914f9fed6ec52ac51f0
690,803
import argparse from sys import version import os def _parse_args(argv): """ Parse command-line arguments. :type argv: list :param argv: Command-line arguments to parse :rtype: argparse.Namespace :return: Parsed arguments """ p = argparse.ArgumentParser(prog='goverge') p.add_argument( '--version', action='version', version='goverge ' + version, help='Display the installed version' ) p.add_argument( '--go_flags', action='append', default=None, help='Go build flags to use when running tests example: ' '--go_flags=-x --go_flags=-timeout=10m' ) p.add_argument( '--covermode', action='store', default='count', help='Mode to use for coverage: ' 'set, count, or atomic' ) p.add_argument( '--godep', action='store_true', default=False, help='Run coverage using the projects godep files.' ) p.add_argument( '--html', action='store_true', default=False, help="View a html report of the coverage file that is generated." ) p.add_argument( '--race', action='store_true', default=False, help="Run tests using the -race flag" ) p.add_argument( '--project_import', action='store', help=( "The import path of the project. leaving this blank will get the " "project name Using go list but in some cases that doesn't work " "and needs to be manually entered. " "example: github.com/Workiva/goverge" ) ) p.add_argument( '--short', action='store_true', default=False, help='Run coverage using the -short flag' ) p.add_argument( '--tag', action='store', help="Use an optional build tag when running tests." ) p.add_argument( '--test_path', default=None, action='append', help=( 'Path(s) to a specific package to get the coverage on\n' 'Example: --test_path path/one --test_path path/two' ) ) p.add_argument( '--threads', action='store', default=4, help='The Maximum number of threads to use when running tests.' ) p.add_argument( '--xml', action='store_true', default=False, help=( "Generate xml reports of test runs, assumes that go2xunit is " "installed" ) ) p.add_argument( '--xml_dir', action='store', default=os.getcwd() + "/xml_reports/", help="The location to put the xml reports that are generated." ) p.add_argument( '--ignore', nargs='+', action='store', default=None, help="List of directories to ignore" ) return p.parse_args(argv)
7b3f57bdb97cf83f217654b07110d7d141cf90a9
690,804
from typing import Dict from typing import Tuple from typing import List def get_intents_keywords(entities: Dict) -> Tuple[List[str], List[str]]: """Obtains the list of intents and the list of keywords from an Wit.ai entity.""" intents = [] keywords = [] for key, val in entities.items(): if key == "intent": intents.extend([dct.get("value") for dct in val]) else: keywords.append(key) return intents, keywords
fa161ed1f5468d6ed86ab1eccee5943f60c4b173
690,805
def application_case_func(decorator_application_scenario, current_cases): """Returns the case function used behind `decorator_application_scenario`.""" return current_cases["decorator_application_scenario"]["p"].func
5135e01e14e2dd54b8afa3db77438f6241be9497
690,806
def _splits_for_summary_description_openapi_doc(lines): """Splites a list of docstring lines between the summary, description, and other properties for an API route openapi spec. For the openapi spec block, it should start with '````openapi' and close with '```'. Parameters ---------- lines : list[str] Returns ------- tuple summary : str description : str openapi : str """ info = [] openapi = [] in_openapi = False for line in lines: if '```openapi' == line: in_openapi = True continue if in_openapi and '```' == line: in_openapi = False continue if not in_openapi: info.append(line) else: openapi.append(line) return info[0], info[1:], openapi
773f060d2af7cda41fbc4a941e9fa57372b2cede
690,807
import argparse import sys def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='') parser.add_argument("--gpu", type=int, required=True) parser.add_argument('--exp-dir', default='logs', type=str, help='directory to save results and logs') parser.add_argument('--dataset', required=True, type=str, choices=['CIFAR-10', 'CIFAR-100', 'ImageNet', 'TinyImageNet'], help='which dataset to use') parser.add_argument('--phase', default='test', type=str, choices=['train', 'val', 'valv2', 'test'], help='train, val, test') parser.add_argument('--arch', default=None, type=str, help='victim network architecture') parser.add_argument('--all_archs', action="store_true") parser.add_argument('--targeted', action="store_true") parser.add_argument('--target_type', type=str, default='increment', choices=['random', 'least_likely', "increment"]) parser.add_argument('--norm', default='l2', type=str, choices=['l2'], help='l2 attack or linf attack') parser.add_argument('--attack-method', default='ba', choices=['ba', 'cw', 'bapp'], help='attack method') parser.add_argument('--save-all-steps', action='store_true', help='save all intermediate adversarial images') parser.add_argument('--seed', default=0, type=int, help='random seed') parser.add_argument('--ssh', action='store_true', help='whether or not we are executing command via ssh. ' 'If set to True, we will not print anything to screen and only redirect them to log file') parser.add_argument('--json-config', type=str, default='./configures/boundary_attack.json', help='a configures file to be passed in instead of arguments') # bapp (a.k.a., hsja) parameters parser.add_argument('--bapp-iteration', default=132, type=int, help='boundary attack++: number of iterations') parser.add_argument('--bapp-initial-num-eval', default=100, type=int, help='boundary attack++: initial number of evaluations for gradient estimation') parser.add_argument('--bapp-max-num-eval', default=10000, type=int, help='boundary attack++: max number of evaluations for gradient estimation') parser.add_argument('--bapp-stepsize-search', default='geometric_progression', type=str, choices=['geometric_progression', 'grid_search'], help='boundary attack++: step size search method') parser.add_argument('--bapp-gamma', default=0.01, type=float, help='boundary attack++: to decide binary search threshold') parser.add_argument('--bapp-batch-size', default=256, type=int, help='boundary attack++: batch size for model prediction') parser.add_argument('--bapp-internal-dtype', default='float32', type=str, help='boundary attack++: internal dtype. foolbox default value is float64') # boundary attack parameters parser.add_argument('--ba-iteration', default=1200, type=int, help='boundary attack: number of iterations') parser.add_argument('--ba-max-directions', default=25, type=int, help='boundary attack: batch size') parser.add_argument('--ba-spherical-step', default=1e-2, type=float, help='boundary attack: spherical step size') parser.add_argument('--ba-source-step', default=1e-2, type=float, help='boundary attack: source step size') parser.add_argument('--ba-step-adaptation', default=1.5, type=float, help='boundary attack: step size adaptation multiplier') parser.add_argument('--ba-batch-size', default=1, type=int, help='boundary attack: batch size') parser.add_argument('--ba-no-tune-batch-size', action='store_true', help='boundary attack: disable automatic batch size tuning') parser.add_argument('--ba-no-threaded', action='store_true', help='boundary attack: do not use multi thread to generate candidate and random numbers') parser.add_argument('--ba-internal-dtype', default='float32', type=str, help='boundary attack: internal dtype. foolbox default value is float64') # cw attack (white-box) parameters parser.add_argument('--cw-binary-search-step', default=5, type=int, help='cw attack: number of binary search steps of constant') parser.add_argument('--cw-max-iteration', default=1000, type=int, help='cw attack: maximum number of iterations') parser.add_argument('--cw-confidence', default=0.0, type=float, help='cw attack: confidence threshold') parser.add_argument('--cw-learning-rate', default=0.005, type=float, help='cw learning: initial learning rate') parser.add_argument('--cw-initial-const', default=0.01, type=float, help='cw attack: initial constant') parser.add_argument('--attack_defense', action="store_true") parser.add_argument('--defense_model', type=str, default=None) parser.add_argument('--max_queries', type=int, default=10000) parser.add_argument('--defense_norm', type=str, choices=["l2", "linf"], default='linf') parser.add_argument('--defense_eps', type=str, default="") parser.add_argument('--epsilon', type=float, help='the lp perturbation bound') if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args
0a5ad9b8a91571f9d7f7a050e8bdf7da4b288400
690,808
import os def is_valid_dir(parser, arg): """Checks if the 'dirname' argument is a valid directory name. Positional arguments: parser -- an ArgumentParser instance from argparse arg -- the 'dirname' argument """ if not os.path.isdir(arg): parser.error("The directory {0} does not exist!".format(arg,)) else: return arg
48dfe154f9a5f5b20379297fe6c38e2c60063fb4
690,809
from six import string_types def IsListlike(arg): """ This function just tests to check if the object acts like a list """ if isinstance(arg, string_types): return False try: _ = [x for x in arg] return True except TypeError: # catch when for loop fails return False
79ce71a8a1e53815207c2e442fb7733aae530a68
690,810
def invisible_colorbar(x, y): """ hacky way to remove second colorbar - set x position so not visible """ return dict( x=x, y=y, len=0.1, thickness=1, tickfont=dict(size=1, color="rgba(0,0,0,0)" ) )
673e60411edd77e7b0ed050dc56ee48523fe3188
690,811
import os def basepath(path): """Get full basepath from path.""" return os.path.basename(os.path.normpath(path))
94aa1cd25c58d67fbbcc5fa1f8ff0b1786e7e10b
690,813
def is_a_valid_response_code(response): """Returns True if 200""" return response.status_code == 200
456b538252cfce2b425f9ce3b25a15f551ffd323
690,814
import csv import os import json def convert_csv_to_json(csvfile, xmlNodes): """ Convert CSV to JSON format""" fp = open(csvfile, "rt", encoding="utf8"); # Open CSV file next(fp) # Ignore header csv_reader = csv.DictReader(fp, xmlNodes) tempjson = os.path.join(os.path.split(csvfile)[0], "temp.json") with open(tempjson, 'w') as fp: fp.write(json.dumps([r for r in csv_reader])) rjson = json.loads(open(tempjson).read()) os.remove(tempjson) return rjson
9a9b3f2eb44561ab8b1e283f45faebbc627bd66b
690,815
import functools import six def filter_data(f): """Decorator that post-processes data returned by Docker. This will avoid any surprises with different versions of Docker. """ @functools.wraps(f, assigned=[]) def wrapper(*args, **kwds): out = f(*args, **kwds) def _filter(obj): if isinstance(obj, list): new_list = [] for o in obj: new_list.append(_filter(o)) obj = new_list if isinstance(obj, dict): for k, v in obj.items(): if isinstance(k, six.string_types): obj[k.lower()] = _filter(v) return obj return _filter(out) return wrapper
26354c3fcced4a21cf095a8d4b2f75ee38cb68a1
690,817