seed
stringlengths
1
14k
source
stringclasses
2 values
def _get_train_steps(num_examples, train_epochs, train_batch_size): """Determine the number of training steps.""" return num_examples * train_epochs // train_batch_size + 1
bigcode/self-oss-instruct-sc2-concepts
def make_synteny(genes, isoforms): """Return synteny for a list of genes and dict of isoforms.""" return len(list(set([isoforms.get(gene) for gene in genes])))
bigcode/self-oss-instruct-sc2-concepts
import torch def view_as_real(data): """Named version of `torch.view_as_real()`""" names = data.names return torch.view_as_real(data.rename(None)).refine_names(*names + ("complex",))
bigcode/self-oss-instruct-sc2-concepts
def bitstring_readable(data, batch_size, model_output=None, whole_batch=False): """Produce a human readable representation of the sequences in data. Args: data: data to be visualised batch_size: size of batch model_output: optional model output tensor to visualize alongside data. whole_batch: whether to visualise the whole batch. Only the first sample will be visualized if False Returns: A string used to visualise the data batch """ def _readable(datum): return '+' + ' '.join(['-' if x == 0 else '%d' % x for x in datum]) + '+' obs_batch = data.observations targ_batch = data.target iterate_over = range(batch_size) if whole_batch else range(1) batch_strings = [] for batch_index in iterate_over: obs = obs_batch[batch_index, :, :] targ = targ_batch[batch_index, :, :] readable_obs = 'Observations:\n' + '\n'.join([_readable(obs_vector) for obs_vector in obs]) readable_targ = 'Targets:\n' + '\n'.join([_readable(targ_vector) for targ_vector in targ]) strings = [readable_obs, readable_targ] if model_output is not None: output = model_output[batch_index, :, :] strings.append('Model Output:\n' + '\n'.join([_readable(output_vec) for output_vec in output])) batch_strings.append('\n\n'.join(strings)) return '\n' + '\n\n\n\n'.join(batch_strings)
bigcode/self-oss-instruct-sc2-concepts
def parse_crs_string(string: str) -> str: """Parses a string to determine the CRS/spatial projection format. Args: string: a string with CRS/projection data. Returns: crs_type: Str in ["wkt", "proj4", "epsg", "string"]. """ if "epsg:" in string.lower(): return "epsg" elif "+proj" in string: return "proj4" elif "SPHEROID" in string: return "wkt" else: return "string"
bigcode/self-oss-instruct-sc2-concepts
def bytes_to_int(s): """Return converted bytestring to integer. Args: s: str of bytes Returns: int: numeric interpretation of binary string `s` """ # int type casts may return a long type return int(s.encode('hex'), 16)
bigcode/self-oss-instruct-sc2-concepts
def make_space(space_padding=0): """ Return string with x number of spaces. Defaults to 0. """ space = '' for i in range(space_padding): space += ' ' return space
bigcode/self-oss-instruct-sc2-concepts
def rename_bindnames(tqry, li_adjust): """use this to alter the query template to match expected attribute names in bind objects/dictionaries For example, a predefined query may be: "select * from customers where custid = %(custid)s" But you are repeatedly passing bind dictionaries like {"customer" : "cust001"}, {"customer" : "cust002"} in that case qry_template = rename_bindnames(qry_template, [("custid","customer")]) can make your client code simpler and speed it up as well. """ for bindname, attrname in li_adjust: from_ = "%(" + bindname + ")s" to_ = "%(" + attrname + ")s" tqry = tqry.replace(from_, to_) return tqry
bigcode/self-oss-instruct-sc2-concepts
def _format(string): """ Formats a class name correctly for checking function and class names. Strips all non-alphanumeric chars and makes lowercase. """ return ''.join(list(filter(str.isalnum, string))).lower()
bigcode/self-oss-instruct-sc2-concepts
def open_and_read_file(file_path): """Read the entire contents of the file in as a string.""" contents = open(file_path).read() return contents
bigcode/self-oss-instruct-sc2-concepts
def createFromDocument(doc): """ Create an empty JS range from a document @param doc DOM document @return a empty JS range """ return doc.createRange()
bigcode/self-oss-instruct-sc2-concepts
def _lsb_2fold(aa, bit): """ This function embeds a pair of bits in 2/3 fold degenerative codon. :param aa: amino acid information. :param bit: bit (character 2 e.g. 0) which should be embedded in codon. :return: watermarked codon (string) e.g. AGA. """ if bit == '0': return aa["codons"][0] else: return aa["codons"][1]
bigcode/self-oss-instruct-sc2-concepts
def encoder_type(encode): """ Takes the value sent from the user encoding menu and returns the actual value to be used. """ return { '0': "", '1': "shikata_ga_nai", '2': "", '3': "MULTIENCODE", '4': "BACKDOOR", }.get(encode, "ERROR")
bigcode/self-oss-instruct-sc2-concepts
def bounding_box_circle(svg, node, font_size): """Bounding box for circle node.""" cx, cy = svg.point(node.get('cx'), node.get('cy'), font_size) r = svg.length(node.get('r'), font_size) return cx - r, cy - r, 2 * r, 2 * r
bigcode/self-oss-instruct-sc2-concepts
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id): """Merge segments A and B, add [CLS] and [SEP] and build tokentypes.""" tokens = [] tokentypes = [] # [CLS]. tokens.append(cls_id) tokentypes.append(0) # Segment A. for token in tokens_a: tokens.append(token) tokentypes.append(0) # [SEP]. tokens.append(sep_id) tokentypes.append(0) # Segment B. for token in tokens_b: tokens.append(token) tokentypes.append(1) if tokens_b: # [SEP]. tokens.append(sep_id) tokentypes.append(1) return tokens, tokentypes
bigcode/self-oss-instruct-sc2-concepts
def _get_parameters_proto(host_calls_dictionary): """Get the FormalParameterProtos for the first host call in the dictionary.""" return host_calls_dictionary['host_calls'][0].parameters
bigcode/self-oss-instruct-sc2-concepts
def sql_flush(style, connection, only_django=False): """ Returns a list of the SQL statements used to flush the database. If only_django is True, then only table names that have associated Django models and are in INSTALLED_APPS will be included. """ if only_django: tables = connection.introspection.django_table_names(only_existing=True) else: tables = connection.introspection.table_names() statements = connection.ops.sql_flush(style, tables, connection.introspection.sequence_list()) return statements
bigcode/self-oss-instruct-sc2-concepts
def lit_eq(lit1,lit2): """ Returns true lits are syntactically equal """ return lit1 == lit2
bigcode/self-oss-instruct-sc2-concepts
def greedy_action(q, state): """ Computes the greedy action. :param q: action-value table. :type q: bidimensional numpy array. :param state: current state. :type state: int. :return: greedy action. :rtype: int. """ greedy_act = 0 q_max = q[state][greedy_act] for action in range(q.shape[1]): val = q[state][action] if val > q_max: greedy_act = action q_max = val return greedy_act
bigcode/self-oss-instruct-sc2-concepts
import yaml def load_config_file(path): """ Load and parser yaml file. Parameters: path (str): full yaml path location Returns: dict: yaml file in parsed into a dict """ with open(path) as file: return yaml.load(file, Loader=yaml.FullLoader)
bigcode/self-oss-instruct-sc2-concepts
def clean_nginx_git_tag(tag): """ Return a cleaned ``version`` string from an nginx git tag. Nginx tags git release as in `release-1.2.3` This removes the the `release-` prefix. For example: >>> clean_nginx_git_tag("release-1.2.3") == "1.2.3" True >>> clean_nginx_git_tag("1.2.3") == "1.2.3" True """ if tag.startswith("release-"): _, _, tag = tag.partition("release-") return tag
bigcode/self-oss-instruct-sc2-concepts
def check_for_running_sfn(session, arn): """Check if a downsample step function already running Args: session (boto3.session): arn (str): Step function arn Returns: (bool) """ client = session.client('stepfunctions') resp = client.list_executions(stateMachineArn=arn, statusFilter='RUNNING', maxResults=1) return 'executions' in resp and len(resp['executions']) > 0
bigcode/self-oss-instruct-sc2-concepts
def has_usable_review_ui(user, review_request, file_attachment): """Returns whether a review UI is set and can be used.""" review_ui = file_attachment.review_ui return (review_ui and review_ui.is_enabled_for(user=user, review_request=review_request, file_attachment=file_attachment))
bigcode/self-oss-instruct-sc2-concepts
def to_binary(number: int) -> str: """Convert a decimal number to a binary numbers. :param number: The number to convert to binary :return: The binary representation of the number """ return bin(number)[2:]
bigcode/self-oss-instruct-sc2-concepts
import base64 def base64_encode(string): """ base64's `urlsafe_b64encode` uses '=' as padding. These are not URL safe when used in URL parameters. Removes any `=` used as padding from the encoded string. """ encoded = base64.urlsafe_b64encode(string) return encoded.rstrip(b"=")
bigcode/self-oss-instruct-sc2-concepts
def handler(value, **kwargs): """Split the supplied string on the given delimiter, providing a list. Format of value: <delimiter>::<value> For example: Subnets: ${split ,::subnet-1,subnet-2,subnet-3} Would result in the variable `Subnets` getting a list consisting of: ["subnet-1", "subnet-2", "subnet-3"] This is particularly useful when getting an output from another stack that contains a list. For example, the standard vpc blueprint outputs the list of Subnets it creates as a pair of Outputs (PublicSubnets, PrivateSubnets) that are comma separated, so you could use this in your config: Subnets: ${split ,::${output vpc::PrivateSubnets}} """ try: delimiter, text = value.split("::", 1) except ValueError: raise ValueError("Invalid value for split: %s. Must be in " "<delimiter>::<text> format." % value) return text.split(delimiter)
bigcode/self-oss-instruct-sc2-concepts
def replace_string_in_list(str_list: list, original_str: str, target_str: str): """ Replace a string in a list by provided string. Args: str_list (list): A list contains the string to be replaced. original_str (str): The string to be replaced. target_str (str): The replacement of string. Returns, list, the original list with replaced string. """ return [s.replace(original_str, target_str) for s in str_list]
bigcode/self-oss-instruct-sc2-concepts
def _wait_before_serving(seconds): """Tell the server not to write to this socket for the specified time.""" def _helper(ps, soc): ps.delay_writing_for(seconds * 1000, soc) return _helper
bigcode/self-oss-instruct-sc2-concepts
def test_stability(v1, v2, precision=10e-3): """tests if two lists of lists of floats are equal but a certain precision Args: v1 (list[list[float]]): first list containing ints v2 (list[list[float]]): second list containing ints precision (float, optional): the precision after which v1 and v2 are not equal Returns: bool: True if the two lists are close enought, False otherwise """ v1 = [x for y in v1 for x in y] v2 = [x for y in v2 for x in y] for x1, x2 in zip(v1, v2): if abs(x2 - x1) > precision: return False return True
bigcode/self-oss-instruct-sc2-concepts
def cast_elements_to_string(cast_list): """ This function casts the top level elements of a list to strings. Note that it does not flatten lists before doing so, so if its elements contain lists, it will cast these lists to strings. Apply flatten_list() before applying cast_elements_to_string() if you want to change this behavior. """ if isinstance(cast_list, list): return [str(element) for element in cast_list] else: raise TypeError('cast_elements_to_string() must be passed a list!')
bigcode/self-oss-instruct-sc2-concepts
import torch def test_observe_get_and_verify_response_input_unit(tmp_observe_class, method, tmp_val, monkeypatch): """ test that _get_and_verify_response_input works for self.sampling["method"] = "iteratuve" or "functions". Leverage monkeypatching and create false class to mock that greattunes._observe will be called inside TuneSession class in greattunes.__init__. Rely on manual input for "iterative" option """ # # define class cls = tmp_observe_class cls.sampling["method"] = method # monkeypatch the "support" functions _get_response_function_input, _read_response_manual_input def mock_get_response_function_input(): return torch.tensor([[tmp_val]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) monkeypatch.setattr( cls, "_get_response_function_input", mock_get_response_function_input ) manual_tmp_val = tmp_val + 1.0 def mock_read_response_manual_input(additional_text): return torch.tensor([[manual_tmp_val]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) monkeypatch.setattr( cls, "_read_response_manual_input", mock_read_response_manual_input ) # set kwarg response to None (so manually provided input is used) kwarg_response = None # run test output = cls._get_and_verify_response_input(response=kwarg_response) if method == "functions": assert output[0].item() == tmp_val elif method == "iterative": assert output[0].item() == manual_tmp_val
bigcode/self-oss-instruct-sc2-concepts
def no_walk_revctrl(dirname=''): """Return empty list. """ # Returning a non-empty list prevents egg_info from reading the # existing SOURCES.txt return ['']
bigcode/self-oss-instruct-sc2-concepts
def get_rm(g): """Return membrane resistivity in Ohm*m^2 g -- membrane conductivity in S/m^2 """ return 1/g
bigcode/self-oss-instruct-sc2-concepts
def load_md(path: str) -> list: """ Loads an existing file into a list. :param path: path where the file is stored :return: list with the lines from the file """ with open(path, "r", encoding="UTF-8") as mdfile: return mdfile.readlines()
bigcode/self-oss-instruct-sc2-concepts
def clean_name(name): """ Cleans a proposed character name. """ new_name = ''.join(ch for ch in name if ch.isalpha()) new_name = new_name.title() return new_name
bigcode/self-oss-instruct-sc2-concepts
def merge_dicts(*dicts, **kwargs): """Merge all dicts in `*dicts` into a single dict, and return the result. If any of the entries in `*dicts` is None, and `default` is specified as keyword argument, then return `default`.""" result = {} for d in dicts: if d is None and "default" in kwargs: return kwargs["default"] if d: result.update(d) return result
bigcode/self-oss-instruct-sc2-concepts
def calc_median(values_list): """calculates the median of the list in O(n log n); thus also returns sorted list for optional use""" median = 0.0 sorted_list = sorted(values_list) n = len(sorted_list) if n == 0: return median, sorted_list, n half = n >> 1 if n % 2 == 1: median = sorted_list[half] else: median = 0.5 * (sorted_list[half] + sorted_list[half + 1]) return median, sorted_list, n
bigcode/self-oss-instruct-sc2-concepts
import json def json_pp(json_object): """ Helper method to convert objects into json formatted pretty string :param json_object: The object to be converted into pretty string :return: A pretty formatted string """ formatted_json = json.dumps(json_object, sort_keys=True, indent=4, separators=(',', ': ')) return formatted_json
bigcode/self-oss-instruct-sc2-concepts
import string def flow_key(flow): """Model a flow key string for ``ovs-ofctl``. Syntax taken from ``ovs-ofctl`` manpages: http://openvswitch.org/cgi-bin/ovsman.cgi?page=utilities%2Fovs-ofctl.8 Example flow dictionary: flow = { 'in_port': '1', 'idle_timeout': '0', 'actions': ['output:3'] } :param flow: Flow description as a dictionary :return: String :rtype: str """ _flow_add_key = string.Template('${fields},action=${actions}') _flow_del_key = string.Template('${fields}') field_params = [] user_params = (x for x in list(flow.items()) if x[0] != 'actions') for (key, default) in user_params: field_params.append('%(field)s=%(value)s' % {'field': key, 'value': default}) field_params_str = ','.join(field_params) _flow_key_param = { 'fields': field_params_str, } # no actions == delete key if 'actions' in flow: _flow_key_param['actions'] = ','.join(flow['actions']) flow_str = _flow_add_key.substitute(_flow_key_param) else: flow_str = _flow_del_key.substitute(_flow_key_param) return flow_str
bigcode/self-oss-instruct-sc2-concepts
def cleanup_decorator(func): """Decorator which runs cleanup before and after a function""" def clean_before_after(self, *args, **kwargs): # pylint: disable=missing-docstring # pylint only complains about a missing docstring on py2.7? self.cleanup() result = func(self, *args, **kwargs) self.cleanup() return result return clean_before_after
bigcode/self-oss-instruct-sc2-concepts
def formatUintHex64(value): """ Format an 64 bits unsigned integer. """ return u"0x%016x" % value
bigcode/self-oss-instruct-sc2-concepts
def get_account_id(sts_client): """Retrieve the AWS account ID for the authenticated user or role""" response = sts_client.get_caller_identity() return response['Account']
bigcode/self-oss-instruct-sc2-concepts
def bull_engulf(Open, high, low, close, t=4): """ Identifies if prices is a Bullish Engulfing Pattern of not Param: Open: array of open prices (5-day) high: array of high prices (5-day) low: array of low prices (5-day) close: array of close prices (5-day) t: int num. day -1 (5-1=4) Return: status: boolean true if it is the pattern """ if len(Open) < 5: raise AttributeError('Prices are not length 5') if (Open[t] < close[t-1] and close[t] > Open[t-1] and Open[t] < close[t] and Open[t-1] > close[t-1] and (Open[t-2]>close[t-2] and Open[t-3]>close[t-3] and Open[t-4]>close[t-4]) and (close[t-2]<close[t-3]<close[t-4])): return True return False
bigcode/self-oss-instruct-sc2-concepts
def _fit_one_ovo(bin_clf_idx, multi_ovo, dataset, verbose): """Fit the OVO classifier given an index. This method fits a one-vs-one classifier wrt the positive and negative labels taken from the list clf_pair_idx at the index bin_clf_idx. Parameters ---------- bin_clf_idx : int Index of the binary classifier multi_ovo : CClassifierMulticlassOVO Instance of the multiclass OVO classifier. dataset : CDataset Training set. Must be a :class:`.CDataset` instance with patterns data and corresponding labels. verbose : int Verbosity level of the logger. """ # Resetting verbosity level. This is needed as objects # change id when passed to subprocesses and our logging # level is stored per-object looking to id multi_ovo.verbose = verbose # Take the classes indices tr_class_idx = multi_ovo._clf_pair_idx[bin_clf_idx][0] vs_class_idx = multi_ovo._clf_pair_idx[bin_clf_idx][1] multi_ovo.logger.info( "Training class {:} against class: {:}".format( tr_class_idx, vs_class_idx)) # Create the training dataset train_ds = multi_ovo.binarize_subset(tr_class_idx, vs_class_idx, dataset) # Extracting the internal classifier classifier_instance = multi_ovo._binary_classifiers[bin_clf_idx] # Setting verbosity level classifier_instance.verbose = multi_ovo.verbose # Training the one-vs-ne classifier classifier_instance.fit(train_ds.X, train_ds.Y) return classifier_instance
bigcode/self-oss-instruct-sc2-concepts
def get2dgridsize(sz, tpb = (8, 8)): """Return CUDA grid size for 2d arrays. :param sz: input array size :param tpb: (optional) threads per block """ bpg0 = (sz[0] + (tpb[0] - 1)) // tpb[0] bpg1 = (sz[1] + (tpb[1] - 1)) // tpb[1] return (bpg0, bpg1), tpb
bigcode/self-oss-instruct-sc2-concepts
import torch def pitchVocabularyFmt(X, vocab_col): """ Produces the tensors for training with a pitch vocabulary encoding. """ pitch = torch.tensor(X[:, vocab_col], dtype=torch.long) score_feats = torch.cat([torch.tensor(X[:, :vocab_col], dtype=torch.float), torch.tensor(X[:, vocab_col + 1:], dtype=torch.float)], dim=1) return pitch, score_feats
bigcode/self-oss-instruct-sc2-concepts
def IsSimulator(target_cpu): """Returns whether the |target_cpu| corresponds to a simulator build.""" return not target_cpu.startswith('arm')
bigcode/self-oss-instruct-sc2-concepts
import socket import struct def ipv6_to_long(ip): """Return the IPv6 address string as a long >>> ipv6_to_long("2001:db8::1") 42540766411282592856903984951653826561L >>> ipv6_to_long("::1") 1L """ ip_bytes_n = socket.inet_pton(socket.AF_INET6, ip) ip_parts = struct.unpack('!QQ', ip_bytes_n) return 2**64 * ip_parts[0] + ip_parts[1]
bigcode/self-oss-instruct-sc2-concepts
import functools def join(*expressions): """ Convenient function for joining many expressions in series using ``ObserverExpression.then`` Parameters ---------- *expressions : iterable of ObserverExpression Returns ------- new_expression : ObserverExpression Joined expression. """ return functools.reduce(lambda e1, e2: e1.then(e2), expressions)
bigcode/self-oss-instruct-sc2-concepts
def ssh_auth(username, address): """Render username and address part.""" if username: return '{}@{}'.format(username, address) return '{}'.format(address)
bigcode/self-oss-instruct-sc2-concepts
def round_int(value): """Cast the specified value to nearest integer.""" if isinstance(value, float): return int(round(value)) return int(value)
bigcode/self-oss-instruct-sc2-concepts
def accel_within_limits(v, a, v_range): """ Accelerate the car while clipping to a velocity range Args: v (int): starting velocity a (int): acceleration v_range (tuple): min and max velocity Returns: (int): velocity, clipped to min/max v_range """ v = v + a v = max(v, v_range[0]) v = min(v, v_range[1]) return v
bigcode/self-oss-instruct-sc2-concepts
from typing import Iterable def check_type(data): """ Check type of an object. Return False if it is dictionary or list, True - otherwise. """ if isinstance(data, str): return True elif isinstance(data, Iterable): return False return True
bigcode/self-oss-instruct-sc2-concepts
def get_ap_vel(df, time_step, scaling_factor): # Calculates 'angular persistence', 'velocity', and 'directed velocity' """ Primary function called by "get_chemotaxis_stats" and "get_chemotaxis_stats_by_interval". Calculates the 'Angular_persistence', 'Velocity', and 'Directed_velocity' for each timepoint of each unique cell. Parameters ---------- df: DataFrame Typically supplied by the calling function. Must include columns labeled 'Time', 'Experiment_number', 'Cell_line', 'Cell_number', 'x', and 'y'. 'Time', 'Experiment_number', and 'Cell_number', must be series of integers; 'Cell_line' must be a series of strings; 'x' and 'y' must be series of floats. IMPORTANT: Ensure that 'x' and 'y' are in units of pixels. time_step: integer Typically supplied by the calling function. This value specifies the duration of the interval between each timepoint for a cell track. scaling_factor: float Typically supplied by the calling function. Factor for conversion of 'x' and 'y' series of 'df' from pixels to real units of length. IMPORTANT: If designing a pipeline with other functions in this toolbox, ensure that the same real units of length are used in all cases (e.g., everything is coverted to microns). Returns ------- output: DataFrame This DataFrame contains all the original columns with the further addition of 'Velocity', 'Angular_persistence', and 'Directed_velocity' columns. """ diff_df = df[['x', 'y', 'x_from_center', 'y_from_center']].diff() dot_product = df['x_from_center'] * diff_df['x_from_center'] + df['y_from_center'] * diff_df['y_from_center'] magnitude = (df['x_from_center']**2 + df['y_from_center']**2)**0.5 * (diff_df['x_from_center']**2 + diff_df['y_from_center']**2)**0.5 df['Angular_persistence'] = dot_product / magnitude * -1 df['Velocity'] = (diff_df['x']**2 + diff_df['y']**2)**0.5 * scaling_factor / time_step df['Directed_velocity'] = df['Velocity'] * df['Angular_persistence'] return df
bigcode/self-oss-instruct-sc2-concepts
def _num_tokens_of(rule): """Calculate the total number of tokens in a rule.""" total = len(rule.get("tokens")) for _ in ("prev_classes", "prev_tokens", "next_tokens", "next_classes"): val = rule.get(_) if val: total += len(val) return total
bigcode/self-oss-instruct-sc2-concepts
def sort_dataframe(dataframe, sort_column, order='ascending', nulls_position='last', inplace=True): """ Sort the dataframe by the sort column Arguments are the dataframe and column that you want to sort by Optional arguments are: - order (default ascending, can be ascending or descending) which determines whether to sort by the column ascending or descending - nulls_position (default last, can be first or last) which determines whether null values (NaN) are sorted first or last - inplace (default True, can be True or False) which determines whether to change the existing dataframe, or return a new dataframe """ if order == 'ascending': ascending = True else: ascending = False df = dataframe.sort_values(sort_column, ascending=ascending, na_position=nulls_position, inplace=inplace) if not inplace: return df
bigcode/self-oss-instruct-sc2-concepts
def job_id() -> str: """Returns a mock job ID.""" return "00000000-0000-0000-0000-000000000000"
bigcode/self-oss-instruct-sc2-concepts
import re def _split_text_by_opening(pattern, text): """ Splits text into parts identified by opening that matches `pattern`. For example, --pattern='\n\nCHAPTER \\d+\n\n' may be used to split text into chapters. """ openings = re.findall(pattern, text) if len(openings) == 0: print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n') return [] texts = re.split(pattern, text) texts = [d + t for d, t in zip(openings, texts[1:])] return texts
bigcode/self-oss-instruct-sc2-concepts
import json def format_json(data, default=None): """ Pretty print JSON. Arguments: data (dict): JSON blob. Returns: str: Formatted JSON """ return json.dumps( data, sort_keys=True, indent=2, separators=(",", ": "), default=default )
bigcode/self-oss-instruct-sc2-concepts
def oo_split(string, separator=','): """ This splits the input string into a list. If the input string is already a list we will return it as is. """ if isinstance(string, list): return string return string.split(separator)
bigcode/self-oss-instruct-sc2-concepts
from functools import reduce import operator def MergeDicts( *dicts ): """Construct a merged dictionary from the given dicts. If two dicts define the same key, the key from the dict later in the list is chosen.""" return dict( reduce( operator.add, map( dict.items, dicts ) ) )
bigcode/self-oss-instruct-sc2-concepts
def get_label_color(status): """ Get a customized color of the status :param status: The requested status to get a customized color for :return: customized color """ colors = {'NEW':'grey', 'ASSIGNED':'blue', 'OPEN': 'orange', 'FIXED': 'purple', 'RETEST':'cyan', 'REOPENED':'orange', 'VERIFIED': 'green', 'BLOCKED': 'red', 'CLOSED':'black', } return colors[status]
bigcode/self-oss-instruct-sc2-concepts
def remove_duplicates(seq): """ Removes duplicates from a list. This is the fastest solution, source: http://www.peterbe.com/plog/uniqifiers-benchmark Input arguments: seq -- list from which we are removing duplicates Output: List without duplicates. Example: >>> seq = ['a', 'a', 'b', 'c', 'a'] >>> print remove_duplicates_from_list(seq) ['a', 'b', 'c'] """ seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
bigcode/self-oss-instruct-sc2-concepts
import torch def compute_bboxes_from_keypoints(keypoints): """ keypoints: B x 68*2 return value: B x 4 (t, b, l, r) Compute a very rough bounding box approximate from 68 keypoints. """ x, y = keypoints.float().view(-1, 68, 2).transpose(0, 2) face_height = y[8] - y[27] b = y[8] + face_height * 0.2 t = y[27] - face_height * 0.47 midpoint_x = (x.min(dim=0)[0] + x.max(dim=0)[0]) / 2 half_height = (b - t) * 0.5 l = midpoint_x - half_height r = midpoint_x + half_height return torch.stack([t, b, l, r], dim=1)
bigcode/self-oss-instruct-sc2-concepts
def write_matlabbatch(template, nii_file, tpm_file, darteltpm_file, outfile): """ Complete matlab batch from template. Parameters ---------- template: str path to template batch to be completed. nii_files: list the Nifti image to be processed. tpm_file: str path to the SPM TPM file. darteltpm_file: str path to the CAT12 tempalte file. outfile: str path to the generated matlab batch file that can be used to launch CAT12 VBM preprocessing. """ nii_file_str = "" for i in nii_file: nii_file_str += "'{0}' \n".format(i) with open(template, "r") as of: stream = of.read() stream = stream.format(anat_file=nii_file_str, tpm_file=tpm_file, darteltpm_file=darteltpm_file) with open(outfile, "w") as of: of.write(stream) return 0
bigcode/self-oss-instruct-sc2-concepts
import html import re def reddit_sanitize( text ): """ Convert comments in the Reddit API format to actual plain-text likely constructed by the individual who posted it. HTML is unescaped, markup is removed, and quotes are removed. """ # Unescape HTML (IE, '&gt;' becomes '>') text = html.unescape( text ) # Remove markup enclosed_text_regexes = [ re.compile( r"\*\*(\S+[^*]*\S+|\S)\*\*" ), # Bold re.compile( r"\*(\S+[^*]*\S+|\S)\*" ), # Italic re.compile( r"_(\S+[^_]*\S+|\S)_" ), # Undelrine re.compile( r"\~\~(\S+[^\~]*\S+|\S)\~\~" ), # Strikethrough re.compile( r"\>\!(\S+[^(!<)]*\S+|\S)\!\<" ), # Spoilers re.compile( r"\^(\S+)" ), # Superscript re.compile( r"\[([^\]]*)\]\([^\)]+\)" ), # Links, remove link but keep text. ] for rgx in enclosed_text_regexes: text = re.sub( rgx, r"\1", text ) # Remove quoted and preformatted lines quote_filter_pred = lambda line: len( line ) <= 0 or line[ 0 ] != ">" pref_filter_pred = lambda line: ( ( len( line ) <= 4 or line[ :4 ] != " " ) and ( len( line ) <= 0 or line[ 0 ] != "\t" ) ) lines = text.split( "\n" ) return "\n".join( [ x for x in lines if quote_filter_pred( x ) and pref_filter_pred( x ) ] )
bigcode/self-oss-instruct-sc2-concepts
def config2object(config): """ Convert dictionary into instance allowing access to dictionary keys using dot notation (attributes). """ class ConfigObject(dict): """ Represents configuration options' group, works like a dict """ def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) def __getattr__(self, name): return self[name] def __setattr__(self, name, val): self[name] = val if isinstance(config, dict): result = ConfigObject() for key in config: result[key] = config2object(config[key]) return result else: return config
bigcode/self-oss-instruct-sc2-concepts
import socket def _get_available_ports(n: int) -> list[int]: """ Get available ports. Parameters ---------- n : int number of ports to get. Returns ------- list[int] Available ports. """ socks: list[socket.socket] = [socket.socket() for _ in range(n)] list(map(lambda sock: sock.bind(("", 0)), socks)) ports: list[int] = [int(sock.getsockname()[1]) for sock in socks] for sock in socks: sock.close() return ports
bigcode/self-oss-instruct-sc2-concepts
def mean(sequence): """ Calculates the arithmetic mean of a list / tuple """ return sum(sequence) / float(len(sequence))
bigcode/self-oss-instruct-sc2-concepts
import textwrap def proteins_to_fasta(proteins, seqids=[], use_safe_seqid=False, width=50): """ Takes a proteins dictionary and returns a string containing all the sequences in FASTA format. Option parameters are a list of seqids to output (seqids) and the line width (width). """ if seqids: idlist = seqids else: idlist = proteins fasta_out = "" for seqid in idlist: seq_wrap = textwrap.fill(proteins[seqid]['seq'], width) if use_safe_seqid: header = proteins[seqid]['safe_seqid'] else: header = proteins[seqid]['name'] fasta_out += ">%s\n%s\n" % (header, seq_wrap) return fasta_out
bigcode/self-oss-instruct-sc2-concepts
def lowercase(data): """Lowercase text Args: data (list,str): Data to lowercase (either a string or a list [of lists..] of strings) Returns: list, str: Lowercased data """ if isinstance(data, (list, tuple)): return [lowercase(item) for item in data] elif isinstance(data, dict): return {k: lowercase(v) for k, v in data.items()} elif isinstance(data, str): return data.lower() else: raise ValueError("Can only lowercase strings or lists of strings")
bigcode/self-oss-instruct-sc2-concepts
def overlay_image(foreground_image, mask, background_image): """ Overlay foreground image onto the background given a mask :param foreground_image: foreground image points :param mask: [0-255] values in mask :param background_image: background image points :returns: image with foreground where mask > 0 overlaid on background image """ blend_ratio = mask / 255 blend_ratio = blend_ratio.reshape(background_image.shape[0], background_image.shape[1], 1) background_image[..., :3] = background_image[..., :3] * (1 - blend_ratio) + foreground_image[..., :3] * blend_ratio return background_image
bigcode/self-oss-instruct-sc2-concepts
def eq_or_in(val, options): """Return True if options contains value or if value is equal to options.""" return val in options if isinstance(options, tuple) else val == options
bigcode/self-oss-instruct-sc2-concepts
from typing import Counter def get_bow(tokenized_text): """ Function to generate bow_list and word_freq from a tokenized_text -----PARAMETER----- tokenized_text should be in the form of [['a'], ['a', 'b'], ['b']] format, where the object is a list of survey response, with each survey response as a list of word tokens -----OUTPUT----- The function returns two objects bow_list: a list of Counter objects with word frequency of each response word_freq: a Counter object that summarizes the word frequency of the input tokenized_text """ bow_list = [] word_freq = Counter() for text in tokenized_text: bow = Counter(text) word_freq.update(text) bow_list.append(bow) print(f"This corpus has {len(word_freq.keys())} key words, and the 10 \ most frequent words are: {word_freq.most_common(10)}") return bow_list, word_freq
bigcode/self-oss-instruct-sc2-concepts
def is_same_py_file(file_1, file_2): """Compares 2 filenames accounting for .pyc files.""" if file_1.endswith('.pyc') or file_1.endswith('.pyo'): file_1 = file_1[:-1] if file_2.endswith('.pyc') or file_2.endswith('.pyo'): file_2 = file_2[:-1] return file_1 == file_2
bigcode/self-oss-instruct-sc2-concepts
import torch def loglikelihood(w, weights=None): """ Calculates the estimated loglikehood given weights. :param w: The log weights, corresponding to likelihood :type w: torch.Tensor :param weights: Whether to weight the log-likelihood. :type weights: torch.Tensor :return: The log-likelihood :rtype: torch.Tensor """ maxw, _ = w.max(-1) # ===== Calculate the second term ===== # if weights is None: temp = torch.exp(w - (maxw.unsqueeze(-1) if maxw.dim() > 0 else maxw)).mean(-1).log() else: temp = (weights * torch.exp(w - (maxw.unsqueeze(-1) if maxw.dim() > 0 else maxw))).sum(-1).log() return maxw + temp
bigcode/self-oss-instruct-sc2-concepts
def resolve_crop(im, crop): """Convert a crop (i.e. slice definition) to only positive values crops might contain None, or - values""" # only works for two dimension crop = list(crop) assert len(crop) == 2 for i in (0, 1): assert len(crop[i]) == 2 for j in (0, 1): if crop[i][j] is None: crop[i][j] = j * im.shape[1-j] elif crop[i][j] < 0: crop[i][j] += im.shape[1-j] return crop
bigcode/self-oss-instruct-sc2-concepts
def normalize_spaces(s): """replace any sequence of whitespace characters with a single space""" return ' '.join(s.split())
bigcode/self-oss-instruct-sc2-concepts
def checksum(digits): """ Returns the checksum of CPF digits. References to the algorithm: https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm """ s = 0 p = len(digits) + 1 for i in range(0, len(digits)): s += digits[i] * p p -= 1 reminder = s % 11 if reminder == 0 or reminder == 1: return 0 else: return 11 - reminder
bigcode/self-oss-instruct-sc2-concepts
def find_rlc(p_utility, q_utility, r_set, l_set, c_set): """ Proportional controllers for adjusting the resistance and capacitance values in the RLC load bank :param p_utility: utility/source active power in watts :param q_utility: utility/source reactive power in var :param r_set: prior resistor % change :param l_set: prior inductor % change :param c_set: prior capacitor % change :return: """ smoothing_factor = 0.50 # only move a small percentage of the desired change for stability cap = c_set + (6./1300. * q_utility) * smoothing_factor res = r_set + (50.5/11700. * p_utility) * smoothing_factor return res, l_set, cap
bigcode/self-oss-instruct-sc2-concepts
def score1(rule, c=0): """ Calculate candidate score depending on the rule's confidence. Parameters: rule (dict): rule from rules_dict c (int): constant for smoothing Returns: score (float): candidate score """ score = rule["rule_supp"] / (rule["body_supp"] + c) return score
bigcode/self-oss-instruct-sc2-concepts
import logging import json def extract_english_corpus(json_str, verbose=False): """A helper function to extract English corpus from KPTimes dataset in json :param: json_str: the json string :param: verbose: bool, if logging the process of data processing :returns: the articles and keywords for each article :rtype: src (list of string), tgt (list of keyword list) """ src = [] tgt = [] for idx in range(len(json_str)): if idx % 1000 == 0: if verbose: logging.info('processing idx: ', idx) data = json.loads(json_str[idx]) article = data['abstract'] keyword = data['keyword'] keyword = keyword.split(';') src.append(article) tgt.append(keyword) return src, tgt
bigcode/self-oss-instruct-sc2-concepts
import re def humansorted_datasets(l, key=None): """Sort a list of datasets according to a key of a dataset Parameters ---------- l : list The list of datasets to be sorted key : str (optional) The key of the dataset the datasets should be sorted according to. Defaults to 'name'. Returns ------- list The sorted list of datasets. """ key = key or 'name' def alphanum_key(s): key = re.split(r'(\d+)', s) key[1::2] = map(int, key[1::2]) return key def alphanum_dataset(d): s = d[key] return alphanum_key(s) return sorted(l, key=alphanum_dataset)
bigcode/self-oss-instruct-sc2-concepts
def sum_multiples_three_five(number): """ number: random integer return: the sum of all multipliers of 3 and 5 below number """ multipliers = [] n = 0 while n < number: if n % 3 == 0 or n % 5 == 0: multipliers.append(n) n += 1 return sum(multipliers)
bigcode/self-oss-instruct-sc2-concepts
def asURL(epsg): """ convert EPSG code to OGC URL CRS ``http://www.opengis.net/def/crs/EPSG/0/<code>`` notation """ return "http://www.opengis.net/def/crs/EPSG/0/%d" % int(epsg)
bigcode/self-oss-instruct-sc2-concepts
import copy def min_specializations(h,domains,x): """Implement a function min_specializations(h, domains, x) for a hypothesis h and an example x. The argument domains is a list of lists, in which the i-th sub-list contains the possible values of feature i. The function should return all minimal specializations of h with respect to domains which are not fulfilled by x.""" specializations = [] for i,element in enumerate(h): if element == "?": possible_values = copy.deepcopy(domains[i]) possible_values.remove(x[i]) for val in possible_values: temp_h = list(h) temp_h[i] = val specializations.append(tuple(temp_h)) else: temp_h = list(h) temp_h[i] = "T" specializations.append(tuple(temp_h)) return specializations
bigcode/self-oss-instruct-sc2-concepts
import random def random_cell(grid, snake): """ Generates a new random position on the space of free cells. :param grid: The grid. :param snake: The snake whose body will represent occupied cells. :returns: Position of a free cell. """ while True: x = random.randrange(grid.rows) y = random.randrange(grid.rows) if len(list(filter(lambda z: z.pos == (x, y), snake.body))) > 0: continue else: break return x, y
bigcode/self-oss-instruct-sc2-concepts
def point_in_polygon(point, polygon): """ Determines whether a [x,y] point is strictly inside a convex polygon defined as an ordered list of [x,y] points. :param point: the point to check :param polygon: the polygon :return: True if point is inside polygon, False otherwise """ x = point[0] y = point[1] n = len(polygon) inside = False xints = 0.0 p1x, p1y = polygon[0] for i in range(n + 1): p2x, p2y = polygon[i % n] if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): if p1y != p2y: xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or x <= xints: inside = not inside p1x, p1y = p2x, p2y return inside
bigcode/self-oss-instruct-sc2-concepts
def tag(pages, tag): """Pages with a given tag.""" if not tag: return pages return [p for p in pages if tag in p.tags]
bigcode/self-oss-instruct-sc2-concepts
def get_speciesindices(specieslist): """ Create a dictionary to assign an arbitrary index to each of the species in the kinetic scheme. Parameters ---------- specieslist : list a list of all the species in the model Returns ------- speciesindices : dict a dictionary of arbitrary indices with the species from specieslist as keys indices_to_species : dict the reverse of speciesindices (keys are the indices and values are the species) """ speciesindices = {} index = 0 for x in specieslist: speciesindices[x] = index index += 1 indices_to_species = dict(zip(speciesindices.values(), speciesindices.keys())) return speciesindices, indices_to_species
bigcode/self-oss-instruct-sc2-concepts
def dump_cookies(cookies_list): """Dumps cookies to list """ cookies = [] for c in cookies_list: cookies.append({ 'name': c.name, 'domain': c.domain, 'value': c.value}) return cookies
bigcode/self-oss-instruct-sc2-concepts
from typing import List from typing import Dict def divide_blocks( blocks: List[int], world_size: int) -> Dict[int, List[int]]: """ Divide the blocks into world_size partitions, and return the divided block indexes for the given work_rank :param blocks: the blocks and each item is the given block size :param world_size: total world size :return: a dict, the key is the world rank, and the value the block indexes """ if len(blocks) < world_size: raise Exception("do not have enough blocks to divide") results = {} tmp_queue = {} for i in range(world_size): results[i] = [] tmp_queue[i] = 0 indexes = range(len(blocks)) blocks_with_indexes = dict(zip(indexes, blocks)) blocks_with_indexes = dict(sorted(blocks_with_indexes.items(), key=lambda item: item[1], reverse=True)) for i, block in blocks_with_indexes.items(): rank = sorted(tmp_queue, key=lambda x: tmp_queue[x])[0] results[rank].append(i) tmp_queue[rank] = tmp_queue[rank] + block for i, indexes in results.items(): results[i] = sorted(indexes) return results
bigcode/self-oss-instruct-sc2-concepts
def split_train_test(X, y, test_percentage): """ Randomly split given dataset into training- and testing sets :param X: Design matrix to split :param y: Response vector to split :param test_percentage: Percentage of samples to use as test :return: Two tuples of: (train set X, train set y), (test set X, test set y) """ X = X.sample(frac=1) y = y.reindex_like(X) n = round(test_percentage * len(y)) return (X[:-n], y[:-n]), (X[-n:], y[-n:])
bigcode/self-oss-instruct-sc2-concepts
def p1_f_linear(x): """DocTest module Expected Output Test - don't change or delete these lines >>> x = [565, 872, 711, 964, 340, 761, 2, 233, 562, 854] >>> print("The minimum is: ",p1_f_linear(x)) The minimum is: 2 """ # ******ENTER YOUR FINAL CHECKED CODE AFTER THIS COMMENT BLOCK******************* # Setting the first element as the tentative minimum minimum = x[0] # For each item in the list, compare to the tentative minimum and set item to minimum if lower for item in x: if item < minimum: minimum = item return minimum
bigcode/self-oss-instruct-sc2-concepts
def erroCsv(csvFile): """ Rename the csv file with err notation :param csvFile: input csv file name :return: new file name """ return csvFile.replace('.csv', '_err.csv')
bigcode/self-oss-instruct-sc2-concepts
from typing import List def _generate_sharded_filenames(filename: str) -> List[str]: """Generates filenames of the each file in the sharded filepath. Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py. Args: filename: The sharded filepath. Returns: A list of filepaths for each file in the shard. """ base, count = filename.split('@') count = int(count) return ['{}-{:05d}-of-{:05d}'.format(base, i, count) for i in range(count)]
bigcode/self-oss-instruct-sc2-concepts
def drop_columns(tabular, n): """drops first n items from each row and returns new tabular data >>> drop_columns([[1, 2, 3], [21, 22, 23], [31, 32, 33]], 1) [[2, 3], [22, 23], [32, 33]] """ return [row[n:] for row in tabular]
bigcode/self-oss-instruct-sc2-concepts
def clicked_quality_reward(responses): """Calculates the total clicked watchtime from a list of responses. Args: responses: A list of IEvResponse objects Returns: reward: A float representing the total watch time from the responses """ qual = 0.0 watch = 0.0 for response in responses: if response.clicked: qual += float(response.quality) watch += float(response.watch_time) return [qual, watch]
bigcode/self-oss-instruct-sc2-concepts
def get_nim_sum(state: tuple[int, ...]) -> int: """ Get the nim sum of a position. See https://www.archimedes-lab.org/How_to_Solve/Win_at_Nim.html :param state: the state of the game :return: the nim sum of the current position """ cur_sum = 0 for n in state: cur_sum ^= n return cur_sum
bigcode/self-oss-instruct-sc2-concepts
def _prepare_shape_for_expand_dims(shape, axes): """ Creates the expanded new shape based on the shape and given axes Args: shape (tuple): the shape of the tensor axes Union(int, tuple(int), list(int)): the axes with dimensions expanded. Returns: new_shape(tuple): the shape with dimensions expanded. """ new_shape = [] shape_idx = 0 new_shape_length = len(shape) # Convert to set if isinstance(axes, int): new_shape_length += 1 if axes >= new_shape_length or axes < -new_shape_length: raise ValueError( f"axis {axes} is out of bounds for tensor of dimension {new_shape_length}") axes = {axes} elif isinstance(axes, (list, tuple)): new_shape_length += len(axes) for axis in axes: if axis >= new_shape_length or axis < -new_shape_length: raise ValueError( f"axis {axis} is out of bounds for tensor of dimension {new_shape_length}") axes = set(axes) else: raise TypeError( f"only int, tuple and list are allowed for axes, but got {type(axes)}") for new_shape_idx in range(new_shape_length): if new_shape_idx in axes or new_shape_idx - new_shape_length in axes: new_shape.append(1) else: new_shape.append(shape[shape_idx]) shape_idx += 1 return tuple(new_shape)
bigcode/self-oss-instruct-sc2-concepts