seed
stringlengths
1
14k
source
stringclasses
2 values
from typing import Callable from typing import Union import torch from typing import Dict from typing import List from typing import Tuple from typing import Set from typing import Any def _apply_to_tensors( fn: Callable, container: Union[torch.Tensor, Dict, List, Tuple, Set] ) -> Any: """Recursively apply to all tensor in different kinds of container types.""" def apply(x: Union[torch.Tensor, Dict, List, Tuple, Set]) -> Any: if torch.is_tensor(x): return fn(x) elif isinstance(x, dict): return {key: apply(value) for key, value in x.items()} elif isinstance(x, (list, tuple, set)): return type(x)(apply(el) for el in x) else: return x return apply(container)
bigcode/self-oss-instruct-sc2-concepts
def r10s(factor: float = 1) -> float: """ The horizontal screw spacing on the mounting rails of a 10-inch half rack. """ return 236.525 * factor
bigcode/self-oss-instruct-sc2-concepts
def unknown_labels(dim): """ The labels for the "unknown" basis. Just returns an empty list. Parameters ---------- dim : int Dimension Returns ------- list """ return []
bigcode/self-oss-instruct-sc2-concepts
import torch from typing import Sequence from typing import cast def project_point_cloud_to_map( xyz_points: torch.Tensor, bin_axis: str, bins: Sequence[float], map_size: int, resolution_in_cm: int, flip_row_col: bool, ): """Bins an input point cloud into a map tensor with the bins equaling the channels. This code has been adapted from https://github.com/devendrachaplot/Neural-SLAM. # Parameters xyz_points : (x,y,z) pointcloud(s) as a torch.Tensor of shape (... x height x width x 3). All operations are vectorized across the `...` dimensions. bin_axis : Either "x", "y", or "z", the axis which should be binned by the values in `bins`. If you have generated your point clouds with any of the other functions in the `point_cloud_utils` module you almost certainly want this to be "y" as this is the default upwards dimension. bins: The values by which to bin along `bin_axis`, see the `bins` parameter of `np.digitize` for more info. map_size : The axes not specified by `bin_axis` will be be divided by `resolution_in_cm / 100` and then rounded to the nearest integer. They are then expected to have their values within the interval [0, ..., map_size - 1]. resolution_in_cm: The resolution_in_cm, in cm, of the map output from this function. Every grid square of the map corresponds to a (`resolution_in_cm`x`resolution_in_cm`) square in space. flip_row_col: Should the rows/cols of the map be flipped? See the 'Returns' section below for more info. # Returns A collection of maps of shape (... x map_size x map_size x (len(bins)+1)), note that bin_axis has been moved to the last index of this returned map, the other two axes stay in their original order unless `flip_row_col` has been called in which case they are reversed (useful as often rows should correspond to y or z instead of x). """ bin_dim = ["x", "y", "z"].index(bin_axis) start_shape = xyz_points.shape xyz_points = xyz_points.reshape([-1, *start_shape[-3:]]) num_clouds, h, w, _ = xyz_points.shape if not flip_row_col: new_order = [i for i in [0, 1, 2] if i != bin_dim] + [bin_dim] else: new_order = [i for i in [2, 1, 0] if i != bin_dim] + [bin_dim] uvw_points = cast( torch.Tensor, torch.stack([xyz_points[..., i] for i in new_order], dim=-1) ) num_bins = len(bins) + 1 isnotnan = ~torch.isnan(xyz_points[..., 0]) uvw_points_binned: torch.Tensor = torch.cat( ( torch.round(100 * uvw_points[..., :-1] / resolution_in_cm).long(), torch.bucketize( uvw_points[..., -1:].contiguous(), boundaries=uvw_points.new(bins) ), ), dim=-1, ) maxes = ( xyz_points.new() .long() .new([map_size, map_size, num_bins]) .reshape((1, 1, 1, 3)) ) isvalid = torch.logical_and( torch.logical_and( (uvw_points_binned >= 0).all(-1), (uvw_points_binned < maxes).all(-1), ), isnotnan, ) uvw_points_binned_with_index_mat = torch.cat( ( torch.repeat_interleave( torch.arange(0, num_clouds).to(xyz_points.device), h * w ).reshape(-1, 1), uvw_points_binned.reshape(-1, 3), ), dim=1, ) uvw_points_binned_with_index_mat[~isvalid.reshape(-1), :] = 0 ind = ( uvw_points_binned_with_index_mat[:, 0] * (map_size * map_size * num_bins) + uvw_points_binned_with_index_mat[:, 1] * (map_size * num_bins) + uvw_points_binned_with_index_mat[:, 2] * num_bins + uvw_points_binned_with_index_mat[:, 3] ) ind[~isvalid.reshape(-1)] = 0 count = torch.bincount( ind.view(-1), isvalid.view(-1).long(), minlength=num_clouds * map_size * map_size * num_bins, ) return count.view(*start_shape[:-3], map_size, map_size, num_bins)
bigcode/self-oss-instruct-sc2-concepts
def check_sorted(a): """Determines if list is sorted.""" for i, val in enumerate(a): if i > 0 and val < a[i-1]: return False return True
bigcode/self-oss-instruct-sc2-concepts
def extract_properties_values_from_json(data, keys): """Extracts properties values from the JSON data. .. note:: Each of key/value pairs into JSON conventionally referred to as a "property". More information about this convention follow `JSON Schema documentation <https://json-schema.org/understanding-json-schema/reference/object.html>`_. Passing ``data`` argument for an example: .. code-block:: python data = { 'verb': 'GET', 'endpoint': 'users', 'host': 'http://localhost:8080' ... } along with ``keys`` argument for an example: .. code-block:: python keys = ('verb', 'endpoint', 'host') Iterating over ``keys`` parameter values and extracts the property value of ``data`` parameter by key with the exact same value. Result: .. code-block:: python ('GET', 'users, 'http://localhost:8080') :param dict data: An arbitrary data. :param tuple|list|set keys: Iterable with values of type `str`. :returns: Packaged values. :rtype: `tuple` """ return tuple(data[key] for key in keys if key in data)
bigcode/self-oss-instruct-sc2-concepts
def tcl_str(string: str = '') -> str: """ Returns Tcl string surrounded by {} :param string: Python string. """ return ' {' + string + '} '
bigcode/self-oss-instruct-sc2-concepts
from datetime import datetime def create_identifier(hint: str = '') -> str: """ Can be used to create unique names for files by exploiting the uniqueness of the current date. Be aware that if two identifiers are created during the same second they are equal! Follows the form YYYY_MM_DD__hh_mm_ss. :return: YYYY_MM_DD__hh_mm_ss_{hint} """ now = datetime.now() dt_string = now.strftime("%Y_%m_%d__%H_%M_%S") return f"{dt_string}_{hint}" if hint else dt_string
bigcode/self-oss-instruct-sc2-concepts
from typing import List def sieve(n: int) -> List[int]: """ A simple implementation of the http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes :param n: Maximum value to search up to, not included. :return: List of primes upto but not including n. """ all_numbers = [True] * n for i in range(2, int(n ** 0.5 + 1)): if all_numbers[i]: for f in range(i * i, n, i): all_numbers[f] = False primes = [] for i in range(2, n): if all_numbers[i]: primes.append(i) return primes
bigcode/self-oss-instruct-sc2-concepts
def isValidID(id: str) -> bool: """ Check for valid ID. """ #return len(id) > 0 and '/' not in id # pi might be "" return id is not None and '/' not in id
bigcode/self-oss-instruct-sc2-concepts
import math def vershik_kerov_logan_shepp(n): """ Returns asymptotic value of ℓn for large n. For a permutation σ∈Sn, let ℓ(σ) denote the maximal length of an increasing subsequence in σ. Define ℓn = (1/n!) * ∑(σ∈Sn) ℓ(σ), the average value of ℓ(σ) for a σ chosen uniformly at random from Sn. Parameters ---------- n : int denotes n in vershik equation as stated above return : float returns float number denoting asymptotic value of ℓn. """ if(n != int(n)): raise ValueError( "n must be integer" ) return 2 * math.sqrt(n)
bigcode/self-oss-instruct-sc2-concepts
def flip_corner(corner: tuple) -> tuple: """ Flip a tuple of a variable amount of sides :param corner: tuple with number of sides :return: flipped clock-wise tuple """ fliped_sides: list[str] = list() for s in corner: if s == 'N': fliped_sides.append('W') elif s == 'E': fliped_sides.append('S') elif s == 'S': fliped_sides.append('E') elif s == 'W': fliped_sides.append('N') return tuple(fliped_sides)
bigcode/self-oss-instruct-sc2-concepts
def format_import(names): """Format an import line""" parts = [] for _, name, asname in names: if asname is None: parts.append(name) else: parts.append(name + " as " + asname) line = "import " + ", ".join(parts) + "\n" return line
bigcode/self-oss-instruct-sc2-concepts
def count_leading_spaces(string: str) -> int: """ Count the number of spaces in a string before any other character. :param string: input string :return: number of spaces """ return len(string) - len(string.lstrip(" "))
bigcode/self-oss-instruct-sc2-concepts
def replace_special_quotes(html_str: str): """ replace special quotes with html entities """ # special quotes html_str = html_str.replace('“', '&ldquo;') html_str = html_str.replace('”', '&rdquo;') html_str = html_str.replace('’', '&rsquo;') html_str = html_str.replace('‘', '&lsquo;') html_str = html_str.replace('„', '&sbquo;') html_str = html_str.replace('‚', '&obquo;') html_str = html_str.replace('‹', '&usbquo;') html_str = html_str.replace('›', '&ensquo;') return html_str
bigcode/self-oss-instruct-sc2-concepts
def bash_array(lst): """Converts python array [a, b, c] to bash array (a b c)""" contents = ' '.join(str(x) for x in lst) return '({:s})'.format(contents)
bigcode/self-oss-instruct-sc2-concepts
import re def get_length(s): """ Determine the length of the string from it's name which is prepended as: "foobar%d" % N """ x = re.search("[\d]+$", s) # there can be only one or no match here n = 0 if x : n = int(x.group(0)) return n
bigcode/self-oss-instruct-sc2-concepts
from typing import Optional import re def _try_parse_port(port_str: str) -> Optional[int]: """Tries to extract the port number from `port_str`.""" if port_str and re.match(r"^[0-9]{1,5}$", port_str): return int(port_str) return None
bigcode/self-oss-instruct-sc2-concepts
def sum(n): """ Returns the sum of integers between 1 and `n` (inclusive). This implementation uses recursion. """ if n == 1: return 1 elif n > 1: return n + sum(n - 1)
bigcode/self-oss-instruct-sc2-concepts
from typing import Optional import re def read_commit(version_data: str) -> Optional[str]: """Parse commit string from version data @param version_data: Contents of version file @return: commit, or None if not found """ p = re.compile('.*Commit: ([^\n\r]*)', re.DOTALL) match = p.match(version_data) if match is None: return None commit = match.group(1) return commit
bigcode/self-oss-instruct-sc2-concepts
def z2lin(array): """dB to linear values (for np.array or single number)""" return 10 ** (array / 10.)
bigcode/self-oss-instruct-sc2-concepts
def model_field_attr(model, model_field, attr): """ Returns the specified attribute for the specified field on the model class. """ fields = dict([(field.name, field) for field in model._meta.fields]) return getattr(fields[model_field], attr)
bigcode/self-oss-instruct-sc2-concepts
def count_models(block): """Count models in structure file block. :param block: PDBx data block :type block: [str] :return: number of models in block :rtype: int """ atom_obj = block.get_object("atom_site") model_num = [] for i in range(atom_obj.row_count): tmp = atom_obj.get_value("pdbx_PDB_model_num", i) if tmp not in model_num: model_num.append(tmp) return model_num
bigcode/self-oss-instruct-sc2-concepts
def hex_to_bin(txt: str) -> str: """Convert hexadecimal string to binary string.Useful for preprocessing the key and plaintext in different settings.""" return bin(int(txt,16))[2:]
bigcode/self-oss-instruct-sc2-concepts
def clean_lemma(lemma: str, pos: str) -> str: """Cleans whitespace and special symbols Args: lemma: Raw token lemma. pos: Lemma POS. Returns: Clean lemma. """ out_lemma = lemma.strip().replace(" ", "").replace("_", "").lower() if pos != "PUNCT": if out_lemma.startswith("«") or out_lemma.startswith("»"): out_lemma = "".join(out_lemma[1:]) if out_lemma.endswith("«") or out_lemma.endswith("»"): out_lemma = "".join(out_lemma[:-1]) if ( out_lemma.endswith("!") or out_lemma.endswith("?") or out_lemma.endswith(",") or out_lemma.endswith(".") ): out_lemma = "".join(out_lemma[:-1]) return out_lemma
bigcode/self-oss-instruct-sc2-concepts
def to_byte(val): """Cast an int to a byte value.""" return val.to_bytes(1, 'little')
bigcode/self-oss-instruct-sc2-concepts
from typing import Tuple import re def replace_code( begin_delim: str, end_delim: str, content: str, new_code: str ) -> Tuple[str, int]: """Replaces text delimited by `begin_delim` and `end_delim` appearing in `content`, with `new_code`. Returns new string and number of matches made.""" return re.subn( fr"{re.escape(begin_delim)}([\s\S]*?){re.escape(end_delim)}", new_code.replace( "\\", "\\\\" ), # Need to escape backslashes twice for re package content, )
bigcode/self-oss-instruct-sc2-concepts
import requests def get_root_domains(url, filename): """ Updates root domain file. :param url: URL of the root domains list. :param filename: File name to write the list. """ r = requests.get(url) with open(filename, 'w') as f: f.write(r.text) return True
bigcode/self-oss-instruct-sc2-concepts
def create_hf(geom): """Create header and footer for different types of geometries Args: geom (str): geometry type, e.g., polygone """ if geom == "polygone": header = """ { "type": "FeatureCollection", "features": [ { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ """ footer = """ ] } } ] }""" else: raise ValueError(f"{geom} is not implemented.") return header, footer
bigcode/self-oss-instruct-sc2-concepts
import math def string_to_array(s): """Convert pipe separated string to array.""" if isinstance(s, str): out = s.split("|") elif math.isnan(s): out = [] else: raise ValueError("Value must be either string of nan") return out
bigcode/self-oss-instruct-sc2-concepts
import math import hashlib def adventcoin_mine(salt, zeros, prob=0.99): """MD5-hashes salt + counter, increasing counter until hash begins with a given number of 0's in HEX, or until maximum value is reached :param salt: string to append before countes :param zeros: number of zeros to search for :param prob: float between 0 and 1, we stop the search if we didn't find the value with this confidence interval :return: positive number that satisfies the condition, or 0 if the maximum value was exceeded """ i = 0 zeros_string = '0'*zeros if 1-prob > 1e-8: max_i = int(round(math.log(1-prob, 1-(1/16) ** zeros))) else: max_i = 0 while True: if i > max_i > 0: # max_i = 0 means we ignore maximum # We stop here return 0 i += 1 md5_hash = hashlib.md5((salt+str(i)).encode('utf8')).hexdigest() if md5_hash.startswith(zeros_string): break return i
bigcode/self-oss-instruct-sc2-concepts
def _format_cached_grains(cached_grains): """ Returns cached grains with fixed types, like tuples. """ if cached_grains.get("osrelease_info"): osrelease_info = cached_grains["osrelease_info"] if isinstance(osrelease_info, list): cached_grains["osrelease_info"] = tuple(osrelease_info) return cached_grains
bigcode/self-oss-instruct-sc2-concepts
def unfold_fields(lines): """Unfold fields that were split over multiple lines. Returns: A list of strings. Each string represents one field (a name/value pair separated by a colon). >>> unfold_fields("foo \n bar \n baz \nbiz \nboz ") ['foo bar baz ', 'biz ', 'boz '] """ fields = [] for line in lines: if line.startswith(" "): fields[-1] += line elif line.strip(): fields.append(line) return fields
bigcode/self-oss-instruct-sc2-concepts
def analyzer(klass): """Return an instance of the CUT with some defaults.""" a = klass( start_states=["In Progress", ], commit_states=["Selected", "Created"], end_states=["Done", ] ) return a
bigcode/self-oss-instruct-sc2-concepts
def map_symbols_to_currencies(currencies): """ Create dictionary where key is symbol of currency and value is currency itself :param list currencies: List of dictionaries with data about many currencies :return: Dictionary with symbols and currencies :rtype: dict :raises KeyError: When given argument has wrong format """ result_dict = {} for currency_dict in currencies: result_dict[currency_dict["symbol"]] = currency_dict["cc"] return result_dict
bigcode/self-oss-instruct-sc2-concepts
import yaml def load_configs(s: str) -> dict: """Load config from string.""" return yaml.load(s, Loader=yaml.FullLoader)
bigcode/self-oss-instruct-sc2-concepts
def extract_text_body(parsed_email): """ Extract email message content of type "text/plain" from a parsed email Parameters ---------- parsed_email: email.message.Message, required The parsed email as returned by download_email Returns ------- string string containing text/plain email body decoded with according to the Content-Transfer-Encoding header and then according to content charset. None No content of type "text/plain" is found. """ text_content = None text_charset = None if parsed_email.is_multipart(): # Walk over message parts of this multipart email. for part in parsed_email.walk(): content_type = part.get_content_type() content_disposition = str(part.get_content_disposition()) if content_type == 'text/plain' and 'attachment' not in content_disposition: text_content = part.get_payload(decode=True) text_charset = part.get_content_charset() break else: text_content = parsed_email.get_payload(decode=True) text_charset = parsed_email.get_content_charset() if text_content and text_charset: return text_content.decode(text_charset) return
bigcode/self-oss-instruct-sc2-concepts
from typing import List def demand_satisfied(people_after: List) -> List[bool]: """Verifies that each person gets the appropriate number of appointments. We assume that scheduling occurs over a single week. Thus, people in the `1x` cohort get one test, people in the `2x` cohort get two tests, and people without a cohort due to incompatibility (the `None` cohort) get no tests. All people in the `None` cohort should not be `assigned`; people in the other cohorts should be `assigned`. Args: people_after: A roster of people with assigned schedules. Returns: A list of booleans indicating whether each person has the right number of appointments. """ valid = [] cohort_demands = {'1x': 1, '2x': 2, None: 0} for person in people_after: n_appointments = sum(len(day) for day in person['schedule'].values()) if person['assigned']: valid.append(n_appointments == cohort_demands[person['cohort']] and person['cohort'] is not None) else: valid.append(n_appointments == 0 and person['cohort'] is None) return valid
bigcode/self-oss-instruct-sc2-concepts
def progress_to_dict(path: str) -> dict: """ Converts a Delphin progress file into a dict. :param path: path to folder :return: converted progress dict """ file_obj = open(path + '/progress.txt', 'r') lines = file_obj.readlines() file_obj.close() progress_dict = {'simulation_time': [], 'real_time': [], 'percentage': [] } for i in range(1, len(lines)): line = lines[i].split('\t') progress_dict['simulation_time'].append(int(line[0].strip())) progress_dict['real_time'].append(float(line[1].strip())) progress_dict['percentage'].append(float(line[2].strip())) return progress_dict
bigcode/self-oss-instruct-sc2-concepts
import torch def round(tensor, decimal_places): """ Round floats to the given number of decimal places. :param tensor: input tensor :type tensor: torch.Tensor :param decimal_places: number of decimal places :types decimal_places: int :return: rounded tensor :rtype: torch.Tensor """ factor = 10**decimal_places return torch.round(tensor*factor)/factor
bigcode/self-oss-instruct-sc2-concepts
def split_parts(msg): """Splits a key=value pair into a tuple.""" index = msg.find("=") return (msg[:index], msg[index+1:])
bigcode/self-oss-instruct-sc2-concepts
import mimetypes def guess_extension(mime): """Shortcut for getting extension to a given mime string. The parameter mime can be None""" return mimetypes.guess_extension(type=mime or "")
bigcode/self-oss-instruct-sc2-concepts
import re def idify(utext): """Make a string ID-friendly (but more unicode-friendly)""" utext = re.sub(r'[^\w\s-]', '', utext).strip().lower() utext = re.sub(r'[\s-]+', '-', utext) if not len(utext): # Headers must be non-empty return '_' return utext
bigcode/self-oss-instruct-sc2-concepts
def my_func02(num01, num02): """ 返回两个参数的和 :param num01: 数字1 :param num02: 数字2 :return: 两个数字的和 """ return num01 + num02
bigcode/self-oss-instruct-sc2-concepts
def map_msa_names(df, msa_lookup): """ Helper function to handle known MSA name changes/inconsistencies :param df: A pandas dataframe, BLS OEWS data set :param msa_lookup: a dictionary containing MSA code to peer type lookup :return df: A pandas dataframe """ df['area_title'] = df['area'].map(msa_lookup['area_title']) return df
bigcode/self-oss-instruct-sc2-concepts
def legend(is_legend_show=True, legend_orient="horizontal", legend_pos="center", legend_top='top', legend_selectedmode='multiple', **kwargs): """ Legend component. Legend component shows symbol, color and name of different series. You can click legends to toggle displaying series in the chart. In ECharts 3, a single echarts instance may contain multiple legend components, which makes it easier for the layout of multiple legend components. :param is_legend_show: It specifies whether to show the legend component. :param legend_orient: The layout orientation of legend.It can be 'horizontal', 'vertical' :param legend_pos: Distance between legend component and the left side of the container. legend_pos value can be instant pixel value like 20; it can also be percentage value relative to container width like '20%'; and it can also be 'left', 'center', or 'right'. :param legend_top: Distance between legend component and the top side of the container. legend_top value can be instant pixel value like 20; it can also be percentage value relative to container width like '20%'; and it can also be 'top', 'middle', or 'bottom'. :param legend_selectedmode: State table of selected legend. 'single' or 'multiple' :param kwargs: :return: """ _legend = { "selectedMode": legend_selectedmode, "show": is_legend_show, "left": legend_pos, "top": legend_top, "orient": legend_orient } return _legend
bigcode/self-oss-instruct-sc2-concepts
def get_average(numbers): """ Args: numbers (list): A list of floats. Returns: float: The average of the floats in numbers list. """ total = 0.0 for number in numbers: total += number return total/ len(numbers)
bigcode/self-oss-instruct-sc2-concepts
def set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv, list_bus_id_power_hiding_priority=None, list_bus_id_voltage_hiding_priority=None): """ Returns the list of the hidden power bus ids and a list of hidden voltage ids :param num_nodes: number of buses in the grid :param Ns: Number of observable power measurements in the last time step :param Nv: Number of observable voltage measurements in the last time step :param list_bus_id_power_hiding_priority: list of bus indices which was sorted according to the preferred order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden. :param list_bus_id_voltage_hiding_priority: list of bus indices which was sorted according to the preferred order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden. :return: """ if list_bus_id_power_hiding_priority is None: list_bus_id_power_hiding_priority = list(range(num_nodes)) if list_bus_id_voltage_hiding_priority is None: list_bus_id_voltage_hiding_priority = list(range(num_nodes)) hidden_power_bus_id_list = [] next_busid_to_hide = 0 for bus_id in range(Ns, num_nodes): hidden_power_bus_id_list.append(list_bus_id_power_hiding_priority[next_busid_to_hide]) next_busid_to_hide += 1 hidden_voltage_bus_id_list = [] next_busid_to_hide = 0 for bus_id in range(Nv, num_nodes): hidden_voltage_bus_id_list.append(list_bus_id_voltage_hiding_priority[next_busid_to_hide]) next_busid_to_hide += 1 hidden_power_bus_id_list.sort() hidden_voltage_bus_id_list.sort() return hidden_power_bus_id_list, hidden_voltage_bus_id_list
bigcode/self-oss-instruct-sc2-concepts
def denormalize_m11(x): """Inverse of normalize_m11.""" return (x + 1) * 127.5
bigcode/self-oss-instruct-sc2-concepts
def test_row(dataframe): """ test if dataframe contains at least one row Parameters ---------- dataframe: pandas dataframe Raises ------ ValueError If number of row is smaller than 1, raise ValueError Returns ------- is_valid: boolean True if greater than 1, False if lower than 1 """ is_valid = True if len(dataframe) < 1: is_valid = False raise ValueError("dataframe must has at least one row") return is_valid
bigcode/self-oss-instruct-sc2-concepts
def _get_elem_at_rank(rank, data, n_negative, n_zeros): """Find the value in data augmented with n_zeros for the given rank""" if rank < n_negative: return data[rank] if rank - n_negative < n_zeros: return 0 return data[rank - n_zeros]
bigcode/self-oss-instruct-sc2-concepts
def pythonize_yang_name(name): """ Convert a name like "interface-name" to "InterfaceName" or "interface" to "Interface """ if '-' in name: py_name = '' sub_components = name.split('-') for s in sub_components: py_name += s.capitalize() return py_name else: return name.capitalize()
bigcode/self-oss-instruct-sc2-concepts
import requests def get_tv_episode_detail(key, id, season, episode, language="en-US"): """ function get_tv_episode_detail Get the TV episode details by id. inputs: key - TMDB API key. id - id of the movie season - Season of the tv series (INT) episode - Episode number of the series (INT) language - send in variable 'loc' which is set at runtime - defaults to 'en-US' returns: status_code - HTTP - Status code - 200 is success anything else is considered an error jdata - A JSON data structure containing information on the tv series. """ url = f"https://api.themoviedb.org/3/tv/{id}/season/{season}/episode/{episode}?api_key={key}&language={language}" resp = requests.get(url) print(f"StatusCode: {resp.status_code}") if resp.status_code == 200: jdata = resp.json() else: jdata = None return resp.status_code, jdata
bigcode/self-oss-instruct-sc2-concepts
import torch def _kl_divergence_q_prior_normal(mu, logvar, per_dim=False): """ Returns KL-divergence between the variational posterior $q_{\phi}(z|x)$ and the isotropic Gaussian prior $p(z)$. This forms the 'regularization' part of the ELBO. If the variational posterior is taken to be normal with diagonal covariance. Then: $ D_{KL}(q_{\phi(z|x)}||p(z)) = -1/2 * \sum_j (1 + log \sigma_j^2 - \mu_j^2 - \sigma_j^2) $ """ assert mu.shape == logvar.shape, 'Mean and log-variance must share shape (batch, latent_dim)' batch_size, latent_dim = mu.shape latent_kl = 0.5 * (-1 - logvar + mu.pow(2) + logvar.exp()).mean(dim=0) total_kl = torch.sum(latent_kl) # kl_div = -0.5 * (torch.sum(1 + logvar - mu*mu - torch.exp(logvar))) if per_dim: return total_kl, latent_kl else: return total_kl
bigcode/self-oss-instruct-sc2-concepts
def get_blue_green_from_app(app): """ Returns the blue_green object if exists and it's color field if exists >>> get_blue_green_from_app({}) (None, None) >>> get_blue_green_from_app({'blue_green': None}) (None, None) >>> get_blue_green_from_app({'blue_green': {}}) (None, None) >>> get_blue_green_from_app({'blue_green': {'color': None}}) ({'color': None}, None) >>> get_blue_green_from_app({'blue_green': {'color': ''}}) ({'color': ''}, '') >>> get_blue_green_from_app({'blue_green': {'color': 'blue'}}) ({'color': 'blue'}, 'blue') >>> get_blue_green_from_app({'blue_green': {'color': 'green'}}) ({'color': 'green'}, 'green') """ if app.get('blue_green'): return app['blue_green'], app['blue_green'].get('color', None) return None, None
bigcode/self-oss-instruct-sc2-concepts
def get_last_conv_layer_name(model_keras): """ Search for the last convolutional layer Args: model_keras: A keras model object Returns: Name of the layer (str) """ for layer in reversed(model_keras.layers):#loop in reverse order # Select closest 4D layer to the end of the network. if len(layer.output_shape) == 4: return layer.name raise ValueError("Could not find a convolutional layer (layer with 4D).")
bigcode/self-oss-instruct-sc2-concepts
def flatten_args(args): """ Given a dictionary of arguments, produce a string suitable for inclusion in a command line, such as "--name1 value1 --name2 value2" """ return " ".join(["%s %s" % (name, value) for name, value in args.iteritems()])
bigcode/self-oss-instruct-sc2-concepts
def _decimal_lshift_exact(n, e): """ Given integers n and e, return n * 10**e if it's an integer, else None. The computation is designed to avoid computing large powers of 10 unnecessarily. >>> _decimal_lshift_exact(3, 4) 30000 >>> _decimal_lshift_exact(300, -999999999) # returns None """ if n == 0: return 0 elif e >= 0: return n * 10 ** e else: str_n = str(abs(n)) val_n = len(str_n) - len(str_n.rstrip('0')) return None if val_n < -e else n // 10 ** -e
bigcode/self-oss-instruct-sc2-concepts
def calculate_polynomial_derivative_term(coefficient, variable, order): """Calculates the derivative of the nth order term of a polynomial. Args: coefficient (float): The coefficient of the nth order term in the polynomial variable (float): float to plug in for the variable in the polynomial order (int): order of the nth order term in the polynomial (so, n.) Returns: float: The result of taking the derivative of the nth order term a polynomial, :math:`n \\cdot \\text{coefficient} \\cdot \\text{variable}^{n-1}` So, the edge case of taking the derivative of the zeroth-order term is taken care of, since you explicity multiply by the order of the polynomial (which is zero in the n = 0 case.) Raises: TypeError: A non-integer was passed as the order. """ if type(order) != int: raise TypeError('Non-integer order in polynomial term') else: return order * coefficient * variable**(order - 1)
bigcode/self-oss-instruct-sc2-concepts
def single_line(line, report_errors=True, joiner='+'): """Force a string to be a single line with no carriage returns, and report a warning if there was more than one line.""" lines = line.strip().splitlines() if report_errors and len(lines) > 1: print('multiline result:', lines) return joiner.join(lines)
bigcode/self-oss-instruct-sc2-concepts
import unittest def combined_suites(*test_suites): """Combines several suites into one""" combined_suite = unittest.TestSuite(test_suites) return combined_suite
bigcode/self-oss-instruct-sc2-concepts
import unicodedata def strip_diacritics_2(input_string: str) -> str: """Return a copy of `input_string` without diacritics, such that strip_diacritics('skříň') == 'skrin' """ trans_dict = {char: int(unicodedata.decomposition(char).split()[0], base=16) for char in input_string if ord(char) > 0x7f} trans_table = str.maketrans(trans_dict) return input_string.translate(trans_table)
bigcode/self-oss-instruct-sc2-concepts
def liquidViscosity(T, lVP): """ liquidViscosity(T, lVP) liquidViscosity (centipoise) = 10^(A + B/T + C*T + D*T^2) Parameters T, temperature in K vPP, A=lVP[0], B=lVP[1], C=lVP[2], D=lVP[3] A, B, C, D and E are regression coefficients Returns liquid viscosity in centipoise at T """ return 10**(lVP[0] + lVP[1]/T + lVP[2]*T + lVP[3]*T**2)
bigcode/self-oss-instruct-sc2-concepts
def valid_xml_char_ordinal(c): """Filters out certain bytes so that XML files contains valid characters. XML standard defines a valid character as: Char ::= #x9 | #xA | #xD | [#x20 - #xD7FF] | [#xE000 - #xFFFD] | [#x10000 - #x10FFFF] Args: c: Character to be checked Returns: true if character codepoint in valid range """ codepoint = ord(c) #conditions ordered by presumed frequency return ( 0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF )
bigcode/self-oss-instruct-sc2-concepts
def add_vp_vs(df): """Calculates the Vp and Vs for a las file Args: df (Pandas.DataFrame): input dataframe MUST CONTAIN `DTCO` and `DTSM` Returns: pandas.DataFrame: input dataframe with vp and vs calculated """ df['Vp'] = (1000000 / df['DTCO']) / 3.281 df['Vs'] = (1000000 / df['DTSM']) / 3.281 df['Vp_max'] = df['Vp'].max() + 200 return df
bigcode/self-oss-instruct-sc2-concepts
def calculate_simpson_index(set1, set2): """Calculates the Simpson index of two sets""" size_intersection = float(len(set1.intersection(set2))) size_smaller_set = min(float(len(set1)), float(len(set2))) return size_intersection / size_smaller_set
bigcode/self-oss-instruct-sc2-concepts
def return_label(file_path): """ Extract label from filename Inputs: --------------- file_name: Source of raw wav signal str Outputs: --------------- y: target as string """ if "silence" in file_path.lower(): y = 'silence' elif "song" in file_path.lower(): y = 'singing' else: y = 'speaking' return y
bigcode/self-oss-instruct-sc2-concepts
from typing import List def get_sum_of_elements(lst: List) -> int: """Sum of list.""" return sum(lst)
bigcode/self-oss-instruct-sc2-concepts
from typing import Optional def get_location_id(data: dict) -> Optional[str]: """ Returns location_id from a data dictionary, or defaults to None :param dict data: The event data :return str|None: A string containing the location id, or None """ try: return data["event"]["data"]["new"]["location_id"] except (TypeError, KeyError): return None
bigcode/self-oss-instruct-sc2-concepts
import re def get_cell_barcode(record, cell_barcode_pattern): """Return the cell barcode in the record name. Parameters ---------- record : screed record screed record containing the cell barcode cell_barcode_pattern: regex pattern cell barcode pattern to detect in the record name Returns ------- barcode : str Return cell barcode from the name, if it doesn't exit, returns None """ found_cell_barcode = re.findall(cell_barcode_pattern, record['name']) if found_cell_barcode: return found_cell_barcode[0][1]
bigcode/self-oss-instruct-sc2-concepts
from typing import List def spooler_pids() -> List[int]: """Returns a list of all spooler processes IDs.""" return []
bigcode/self-oss-instruct-sc2-concepts
import base64 def encode_file_to_base64(fpath_in, prefix): """ encode_file_to_base64: gets base64 encoding of file Args: fpath_in (str): path to file to encode prefix (str): file data for encoding (e.g. 'data:image/png;base64,') Returns: base64 encoding of file """ with open(fpath_in, 'rb') as file_obj: return prefix + base64.b64encode(file_obj.read()).decode('utf-8')
bigcode/self-oss-instruct-sc2-concepts
def le_bytes_to_int(as_bytes: bytes, signed: bool) -> int: """Converts a little endian byte array to an integer. :param as_bytes: A little endian encoded byte array integer. :param signed: Flag indicating whether integer is signed. """ return int.from_bytes(as_bytes, byteorder='little', signed=signed)
bigcode/self-oss-instruct-sc2-concepts
import string def base26(x, _alphabet=string.ascii_uppercase): """Return positive ``int`` ``x`` as string in bijective base26 notation. >>> [base26(i) for i in [0, 1, 2, 26, 27, 28, 702, 703, 704]] ['', 'A', 'B', 'Z', 'AA', 'AB', 'ZZ', 'AAA', 'AAB'] >>> base26(344799) # 19 * 26**3 + 16 * 26**2 + 1 * 26**1 + 13 * 26**0 'SPAM' >>> base26(256) 'IV' """ result = [] while x: x, digit = divmod(x, 26) if not digit: x -= 1 digit = 26 result.append(_alphabet[digit - 1]) return ''.join(result[::-1])
bigcode/self-oss-instruct-sc2-concepts
def pgcd(a, b): """Renvoie le Plus Grand Diviseur Communs des entiers ``a`` et ``b``. Arguments: a (int) : un nombre entier b (int) : un nombre entier """ if a < 0 or b < 0: return pgcd(abs(a), abs(b)) if b == 0: if a == 0: raise ZeroDivisionError( "Le PGCD de deux nombres nuls n'existe pas") return a return pgcd(b, a % b)
bigcode/self-oss-instruct-sc2-concepts
from typing import Dict from typing import Tuple from typing import Set def prepare_senses_index_for_search(senses_dict: Dict[str, Dict[str, Tuple[tuple, Tuple[int, int]]]]) -> \ Dict[str, Set[str]]: """ Build a search index for a fast selection of sentence candidates, which contain some sense from the RuWordNet. The RuWordNet contains a lot of terms (senses in the RuWordNet terminology), and if we want to find possible occurrences in each input sentence using the exhaustive search, then we will do it very-very long, with time complexity is O(n). So, we can divide the search procedure into two steps: 1) we select a sub-set of all RuWordNet's terms, which potentially can be a part of some sentence, using a hash table of single words from all terms, and we do it with the constant time complexity O(1), because it is the search complexity in the hash table; 2) we apply a full linear search for the selected sub-set of terms instead of all RuWordNet's terms. And this function needs for building such search index in a form of the hash table (i.e., the Python's dictionary), where keys are single words of the RuWordNet terms, and values are sense IDs of terms with these words. :param senses_dict: a dictionary with inflected terms (see `ruwordnet_parsing.load_and_inflect_senses` function). :return: the created search index. """ index = dict() for sense_id in senses_dict: for morpho_tag in senses_dict[sense_id]: tokens = senses_dict[sense_id][morpho_tag][0] main_word_start, main_word_end = senses_dict[sense_id][morpho_tag][1] for main_token in filter(lambda it: it.isalnum(), tokens[main_word_start:main_word_end]): if main_token in index: index[main_token].add(sense_id) else: index[main_token] = {sense_id} return index
bigcode/self-oss-instruct-sc2-concepts
from typing import Sequence def to_sequence(obj): """Convert an object to sequence. Parameters ---------- obj : `object` Returns ------- `collections.Sequence` Examples -------- >>> to_sequence(None) () >>> to_sequence(1) (1,) >>> to_sequence('str') ('str',) >>> x = [0, 1, 2] >>> to_sequence(x) [0, 1, 2] >>> to_sequence(x) is x True """ if obj is None: return () if isinstance(obj, str) or not isinstance(obj, Sequence): return (obj,) return obj
bigcode/self-oss-instruct-sc2-concepts
def select(t, *columns): """ Select columns from table >>> t = Symbol('t', 'var * {x: int, y: int, z: int}') >>> select(t, t.x, t.z) t[['x', 'z']] """ return t[[c._name for c in columns]]
bigcode/self-oss-instruct-sc2-concepts
from typing import Dict import pickle def _from_checkpoint( fname: str='checkpoint.pkl') -> Dict: """ Load a checkpoint file """ with open(fname, 'rb') as f: checkpoint = pickle.load(f) return checkpoint
bigcode/self-oss-instruct-sc2-concepts
from typing import List from typing import Dict from typing import Any def _transform_dto_list_to_list_of_dicts(dto_list) -> List[Dict[str, Any]]: """ Given a list of DTO objects, this function returns a list of dicts, that can be passed to jsonify function. """ return [vars(dto_obj) for dto_obj in dto_list]
bigcode/self-oss-instruct-sc2-concepts
def join_rows(rows, joiner=' '): """ Given a series of rows, return them as a single row where the inner edge cells are merged. By default joins with a single space character, but you can specify new-line, empty string, or anything else with the 'joiner' kwarg. """ rows = list(rows) fixed_row = rows[0][:] for row in rows[1:]: if len(row) == 0: row = [''] fixed_row[-1] += "%s%s" % (joiner, row[0]) fixed_row.extend(row[1:]) return fixed_row
bigcode/self-oss-instruct-sc2-concepts
def parse_float(val): """parses string as float, ignores -- as 0""" if val == '--': return 0 return float(val)
bigcode/self-oss-instruct-sc2-concepts
def running_mean(l, N): """From a list of values (N), calculate the running mean with a window of (l) items. How larger the value l is, the more smooth the graph. """ sum = 0 result = list(0 for x in l) for i in range(0, N): sum = sum + l[i] result[i] = sum / (i + 1) for i in range(N, len(l)): sum = sum - l[i - N] + l[i] result[i] = sum / N return result
bigcode/self-oss-instruct-sc2-concepts
def get_outbreaks(flowmat, incidence, R0=2.5, asymf=10, attenuate=1.0): """ Calculate the probabilities of outbreak for all regions :param flowmat: Arriving passengers row -> column :param incidence: fraction of infectious in the populations :param R0: Basic reproduction number :param asymf: how many asymptomatics per reported case :param attenuate: Attenuation factor for flow :return: """ # Adjusting arrivals by incidence inflows = (flowmat.T * attenuate) @ incidence probs = 1 - (1 / R0) ** (inflows * 8 * asymf) return probs
bigcode/self-oss-instruct-sc2-concepts
def bin_to_hex(x): """Convert Binary to Hex.""" y = hex(int(x, 2))[2:] if len(y) < 8: y = (8 - len(y)) * "0" + y return y
bigcode/self-oss-instruct-sc2-concepts
import re def process_tweets(text): """Exclude mentions, urls, and html reference characters in a string using regular expression""" text = re.sub("(\@|https:\/\/)\S+", "", text) # remove mentions and urls text = re.sub(r"&[a-z]+;", "", text) # exclude html reference characters return text
bigcode/self-oss-instruct-sc2-concepts
def f90bool(s): """Convert string repr of Fortran logical to Python logical.""" assert type(s) == str try: s_bool = s[1].lower() if s.startswith('.') else s[0].lower() except IndexError: raise ValueError('{0} is not a valid logical constant.'.format(s)) if s_bool == 't': return True elif s_bool == 'f': return False else: raise ValueError('{0} is not a valid logical constant.'.format(s))
bigcode/self-oss-instruct-sc2-concepts
from functools import cmp_to_key def argsort(mylist, comp=None): """Returns the indices that sort a list. Parameters ---------- mylist : list of objects List to sort. comp : function, optional A comparison function used to compare two objects in the list. Defaults to None. Returns ------- list of int The permutation that sorts the list. """ # Based on https://stackoverflow.com/questions/3382352/equivalent-of-numpy-argsort-in-basic-python if comp is None: return sorted(range(len(mylist)), key=mylist.__getitem__) else: return sorted(range(len(mylist)), key=cmp_to_key(comp))
bigcode/self-oss-instruct-sc2-concepts
from typing import Optional import torch def file2ckpt(path: str, device: Optional[str] = None) -> dict: """ Load the ckpt file into a dictionary to restart a past simulation. It is a thin wrapper around torch.load. Args: path: A string specifying the location of the ckpt file (required) device: A string either "cuda" or "cpu" specifying the device of the intended (optional). This is usefull when a model trained on a GPU machine need to be loaded into a CPU machine or viceversa. Examples: >>> # to load a ckpt of a model which was trained on GPU into a CPU machine. >>> ckpt = file2ckpt(path="pretrained_GPU_model.pt", device="cpu") >>> vae = CompositionalVae(params=ckpt.get("params")) """ if device is None: ckpt = torch.load(path) elif device == 'cuda': ckpt = torch.load(path, map_location="cuda:0") elif device == 'cpu': ckpt = torch.load(path, map_location=torch.device('cpu')) else: raise Exception("device is not recognized") return ckpt
bigcode/self-oss-instruct-sc2-concepts
from typing import Sequence from typing import Sized def are_none(sequences: Sequence[Sized]) -> bool: """ Returns True if all sequences are None. """ if not sequences: return True return all(s is None for s in sequences)
bigcode/self-oss-instruct-sc2-concepts
def concat(str_one, str_two): """ Returns the concatenation of 2 strings. A string with null value is considered as an empty string. """ if not str_one: str_one = "" if not str_two: str_two = "" return str_one + str_two
bigcode/self-oss-instruct-sc2-concepts
from shutil import which def is_tool(name): """Check whether `name` is on PATH and marked as executable.""" # from whichcraft import which return which(name) is not None
bigcode/self-oss-instruct-sc2-concepts
import functools def cached(func): """Decorator to cache the result of a function call.""" func.cache = {} @functools.wraps(func) def wrapper(*args, **kwargs): if kwargs: key = args, frozenset(kwargs.items()) else: key = args if key not in func.cache: func.cache[key] = func(*args, **kwargs) return func.cache[key] return wrapper
bigcode/self-oss-instruct-sc2-concepts
import re from datetime import datetime def _get_iso_date(date_string: str) -> str: """ convert date from the form 1/22/2021 13:28:27 to iso format """ regex = r'\d{1,2}/\d{1,2}/\d{4} \d{1,2}:\d{1,2}:\d{1,2}' found_list = re.findall(regex, date_string) if found_list: date_value = datetime.strptime(date_string, '%m/%d/%Y %H:%M:%S') return date_value.isoformat() return date_string
bigcode/self-oss-instruct-sc2-concepts
def remove_erroneous_blocks(blocks, delta_time=2.0, n_blocks=3): """ Remove sessions with erroneous data due to a NeuroPsy Research App malfunction. The error causes block data to be duplicated and the values for df1 & df2 multiplied again by 100. The duplicated blocks are identified by comparing their time stamps to the previous block (less than 2 seconds difference). If the error caused the session to end early, the whole session is removed. NeuroPsyResearchApp issue #1. :param pandas.DataFrame blocks: Data about blocks. :param float delta_time: Threshold in seconds for which a consecutive block in a session is considered invalid if it was completed within this period after the previous. Default is 2.0 seconds. :param int n_blocks: Required number of blocks per session. If a session doesn't have this many blocks, it gets removed. :returns: Cleaned block data. Number of errors found. List of sessions that were removed as a consequence. :rtype: tuple[pandas.DataFrame, int, list] """ # Identify duplicated blocks. Consecutive time stamps are usually less than 2 seconds apart. mask = blocks.groupby(['session_uid'])['time'].diff() < delta_time try: n_errors = mask.value_counts()[True] except KeyError: n_errors = 0 blocks = blocks.loc[~mask, :] # Now, after removal of erroneous data a session might not have all 3 blocks we expect. Exclude whole session. invalid_sessions = blocks['session_uid'].value_counts() != n_blocks invalid_sessions = invalid_sessions.loc[invalid_sessions].index.to_list() blocks = blocks.loc[~blocks['session_uid'].isin(invalid_sessions), :] return blocks, n_errors, invalid_sessions
bigcode/self-oss-instruct-sc2-concepts
def get_sitemap(app, excludes=("/", "/static/<path:filename>")): """Returns a sitemap for the given application. Args: app (flask.Flask): Application to be scanned. excludes (tuple): Tuple of endpoints to be hidden. Returns: list: Returns a list containing valid endpoint urls and their methods. Example: [ {"url": "/", "methods": ["GET"]}, {"url": "/username", "methods": ["GET", "POST"]} ] """ endpoints = [] for rule in app.url_map.iter_rules(): if str(rule) in excludes: continue endpoint = {} endpoint["url"] = str(rule) endpoint["methods"] = ",".join(rule.methods) endpoints.append(endpoint) endpoints.sort(key= lambda i: i["url"]) return endpoints
bigcode/self-oss-instruct-sc2-concepts
def estimate_infectious_rate_constant(events, t_start, t_end, kernel_integral, count_events=None): """ Returns estimation of infectious rate for given events on defined interval. The infectious is expected to be constant on given interval. :param events: array of event tuples containing (event_time, follower_cnt) :param t_start: time interval start :param t_end: time interval end :param kernel_integral: integral function of kernel function :param count_events: count of observed events in interval (used for time window approach) :return: estimated value for infectious rate """ kernel_int = [ fol_cnt * kernel_integral(t_start - event_time, t_end - event_time) for event_time, fol_cnt in events ] if count_events is not None: return count_events / sum(kernel_int) else: return (len(events)) / sum(kernel_int)
bigcode/self-oss-instruct-sc2-concepts
def survey_media(instance, filename): """Return an upload path for survey media.""" if not instance.survey.id: instance.survey.save() return 'survey/{0}/{1}'.format(instance.survey.id, filename)
bigcode/self-oss-instruct-sc2-concepts
import requests def check_main_service_healthcheck(SVC_URL): """ Check the main service url health. Returns True of False based on HTTP response code""" try: r =requests.get(SVC_URL+'/NexTrip') if r.status_code ==200: return True else: return False except Exception as e: return False
bigcode/self-oss-instruct-sc2-concepts
from textwrap import dedent import inspect def get_func_code(f): """Get the code of function f without extra indents""" return dedent(inspect.getsource(f))
bigcode/self-oss-instruct-sc2-concepts