seed
stringlengths
1
14k
source
stringclasses
2 values
def get_list_index(str_list, selection): """ Gets the index of the string in a list of strings. This is case case insensitive and returns the index of the default string. It removes any EOL in the strings. Parameters: str_list(list of (:term:`String`)) selection (:term:`String`): default(:term:`String`): Returns: index of selected entry Exception: ValueError if the selection cannot be found in the list """ l1 = [li.replace('\n', '').lower() for li in str_list] # l2 = [hi.lower() for hi in l1] # ValueError if match fails col_index = l1.index(selection.lower()) return col_index
bigcode/self-oss-instruct-sc2-concepts
import re def _clean_maxspeed(value, convert_mph=True): """ Clean a maxspeed string and convert mph to kph if necessary. Parameters ---------- value : string an OSM way maxspeed value convert_mph : bool if True, convert mph to kph Returns ------- value_clean : string """ MPH_TO_KPH = 1.60934 pattern = re.compile(r"[^\d\.,;]") try: # strip out everything but numbers, periods, commas, semicolons value_clean = float(re.sub(pattern, "", value).replace(",", ".")) if convert_mph and "mph" in value.lower(): value_clean = value_clean * MPH_TO_KPH return value_clean except ValueError: return None
bigcode/self-oss-instruct-sc2-concepts
def equalContents(arr1, arr2) -> bool: """ Checks if the set of unique elements of arr1 and arr2 are equivalent. """ return frozenset(arr1) == frozenset(arr2)
bigcode/self-oss-instruct-sc2-concepts
def colorbar_extension(colour_min, colour_max, data_min, data_max): """ For the range specified by `colour_min' to `colour_max', return whether the data range specified by `data_min' and `data_max' is inside, outside or partially overlapping. This allows you to automatically set the `extend' keyword on a `matplotlib.pyplot.colorbar' call. Parameters ---------- colour_min, colour_max : float Minimum and maximum value of the current colour bar limits. data_min, data_max : float Minimum and maximum value of the data limits. Returns ------- extension : str Will be 'neither', 'both', 'min, or 'max' for the case when the colour_min and colour_max values are: equal to the data; inside the data range; only larger or only smaller, respectively. """ if data_min < colour_min and data_max > colour_max: extension = 'both' elif data_min < colour_min and data_max <= colour_max: extension = 'min' elif data_min >= colour_min and data_max > colour_max: extension = 'max' else: extension = 'neither' return extension
bigcode/self-oss-instruct-sc2-concepts
def publish_changes(isamAppliance, check_mode=False, force=False): """ Publish configuration changes """ if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put("Publishing configuration changes", "/docker/publish", {})
bigcode/self-oss-instruct-sc2-concepts
def toposort(graph, start): """Standard topological sort of graph from start nodes. Vertices are represented by integer ids. Args: graph: graph represented by dictionary keyed by vertex id, each value being a list of the connected vertex ids. start: list of starting vertices' ids for initializing topological sort. Returns: list of vertex ids giving a topological sort of vertices. """ seen, stack, order = set(), [], [] q = start while q: v = q.pop() if v not in seen: seen.add(v) q.extend(graph[v]) while stack and v not in graph[stack[-1]]: order.append(stack.pop()) stack.append(v) return stack + order[::-1]
bigcode/self-oss-instruct-sc2-concepts
from typing import Tuple from typing import List import csv import json def load_tsv_data(path: str) -> Tuple[List[str], List[List[str]], List[str]]: """Load arbitrary tsv file of formed like (id, dialogue, summary) with header each `dialogue` should be dumped json string from a list of utterances. ex) '["안녕", "잘가"]' Args: path: path of tsv file Returns: result of file, which is a tuple of ids, dialogues, summaries """ ids = [] dialogues = [] summaries = [] with open(path) as f: for row in csv.DictReader(f, delimiter="\t"): ids.append(row["id"]) dialogues.append(json.loads(row["dialogue"])) summaries.append(row.get("summary")) return ids, dialogues, summaries
bigcode/self-oss-instruct-sc2-concepts
def search(lst: list, target: int): """search the element in list and return index of all the occurances Args: lst (list): List of elements target (int): Element to find Returns: list: list of index. """ left = 0 right = len(lst) - 1 mid = 0 index = [] while left <= right: mid = (left + right) // 2 if lst[mid] == target: index.append(mid) if lst[mid] < target: left = mid + 1 else: right = mid - 1 # search in left side i = index[0] - 1 while i >= 0: if lst[i] == target: index.append(i) i -= 1 # search in right side i = index[0] + 1 while i < len(lst): if lst[i] == target: index.append(i) i += 1 return sorted(index)
bigcode/self-oss-instruct-sc2-concepts
def make_reverse_dict(in_dict, warn=True): """ Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself. """ out_dict = {} for k, v in in_dict.items(): for vv in v: if vv in out_dict: if warn: print("Dictionary collision %i" % vv) out_dict[vv] = k return out_dict
bigcode/self-oss-instruct-sc2-concepts
def sum_total_emissions(emissions, areas, mask): """ Function to sum total emissions across the region of interest. Arguments: emissions : xarray data array for emissions across inversion domain areas : xarray data array for grid-cell areas across inversion domain mask : xarray data array binary mask for the region of interest Returns: Total emissions in Tg/y """ s_per_d = 86400 d_per_y = 365 tg_per_kg = 1e-9 emissions_in_kg_per_s = emissions * areas * mask total = emissions_in_kg_per_s.sum() * s_per_d * d_per_y * tg_per_kg return float(total)
bigcode/self-oss-instruct-sc2-concepts
def upper_power_of_two(value) -> int: """Returns the value of 2 raised to some power which is the smallest such value that is just >= *value*.""" result = 1 while result < value: result <<= 1 return result
bigcode/self-oss-instruct-sc2-concepts
from datetime import datetime def log(message, when=None): """ Log a message with a timestamp. :param message: Message to print. :param when: datetime of when the message occured. Defaults to the present time. :return: """ when = datetime.now() if when is None else when print('{0}: {1}'.format(when, message)) return True
bigcode/self-oss-instruct-sc2-concepts
def tkeo(x): """Calculate the TKEO of a given recording by using 2 samples. github.com/lvanderlinden/OnsetDetective/blob/master/OnsetDetective/tkeo.py Parameters ---------- x : array_like Row vector of data. Returns ------- a_tkeo : array_like Row vector containing the tkeo per sample. """ # Create two temporary arrays of equal length, shifted 1 sample to the # right and left and squared: i = x[1:-1] * x[1:-1] j = x[2:] * x[:-2] # Calculate the difference between the two temporary arrays: a_tkeo = i - j return a_tkeo
bigcode/self-oss-instruct-sc2-concepts
def _get_winner(sgf_game): """Reads the games winner from an sgf. Args: sgf_game: from bytes parsed sgf Returns: 1 if BLACK won -1 if WHITE won 0 if it was a DRAW """ sgf_winner = sgf_game.get_winner() if sgf_winner == 'b': winner = 1 elif sgf_winner == 'w': winner = -1 else: winner = 0 return winner
bigcode/self-oss-instruct-sc2-concepts
def GetPlatformArchiveName(platform): """Get the basename of an archive given a platform string. Args: platform: One of ('win', 'mac', 'linux'). Returns: The basename of the sdk archive for that platform. """ return 'naclsdk_%s.tar.bz2' % platform
bigcode/self-oss-instruct-sc2-concepts
def featurewise_norm(x, mean=None, std=None, epsilon=1e-7): """Normalize every pixels by the same given mean and std, which are usually compute from all examples. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). mean : float Value for subtraction. std : float Value for division. epsilon : float A small position value for dividing standard deviation. Returns ------- numpy.array A processed image. """ if mean: x = x - mean if std: x = x / (std + epsilon) return x
bigcode/self-oss-instruct-sc2-concepts
def shapestring(array): """Returns a compact string describing shape of an array.""" shape = array.shape s = str(shape[0]) for i in range(1, len(shape)): s += 'x' + str(shape[i]) return s
bigcode/self-oss-instruct-sc2-concepts
def waypoint_menu(waypoints_exist): """asks what to do with waypoints""" print("\nWas moechtest du als naechstes tun?") print("1: Wegpunkte hinzufuegen") if waypoints_exist: print("2: Wegpunkte zu Geocaches zuordnen oder loeschen") print("3: nichts") else: print("2: nichts") inp = input(">> ") if inp == "1": return "add" elif inp == "2" and waypoints_exist: return "assign" else: return "continue"
bigcode/self-oss-instruct-sc2-concepts
import copy def clone_processor(p): """ Create a new processor with the same properties as the original Recursive copy child processors :param p: :return: """ return copy.deepcopy(p)
bigcode/self-oss-instruct-sc2-concepts
def binary_search(sorted_list, target): """Find where a number lies in a sorted list. Return lowest item that is greater than or equal to target, or None if no item in list greater than or equal to target """ if sorted_list==[]: return None if len(sorted_list)==1: if target <= sorted_list[0]: return 0 return None mid_index = int(len(sorted_list)/2)-1 mid_value = sorted_list[mid_index] if target <= mid_value: return binary_search(sorted_list[0:mid_index+1], target) else: return mid_index + 1 + binary_search(sorted_list[mid_index+1:], target)
bigcode/self-oss-instruct-sc2-concepts
def findpeptide(pep, seq, returnEnd = False): """Find pep in seq ignoring gaps but returning a start position that counts gaps pep must match seq exactly (otherwise you should be using pairwise alignment) Parameters ---------- pep : str Peptide to be found in seq. seq : str Sequence to be searched. returnEnd : bool Flag to return the end position such that: seq[startPos:endPos] = pep Returns ------- startPos : int Start position (zero-indexed) of pep in seq or -1 if not found""" ng = seq.replace('-', '') ngInd = ng.find(pep) ngCount = 0 pos = 0 """Count the number of gaps prior to the non-gapped position. Add them to it to get the gapped position""" while ngCount < ngInd or seq[pos] == '-': if not seq[pos] == '-': ngCount += 1 pos += 1 startPos = ngInd + (pos - ngCount) if returnEnd: if startPos == -1: endPos = -1 else: count = 0 endPos = startPos while count < len(pep): if not seq[endPos] == '-': count += 1 endPos += 1 return startPos, endPos else: return startPos
bigcode/self-oss-instruct-sc2-concepts
import re def research(needle, haystack, result): """ Look for rgx *needle* in str *haystack*. Put any match in result. If a match was found, return True, else return False. Note that result must be a list. """ found = re.findall(needle, haystack) if found: result.append(found[0]) return True else: result.clear() return False
bigcode/self-oss-instruct-sc2-concepts
def bisect_right(a, x, lo=0, hi=None, *, key=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(i, x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) # Note, the comparison uses "<" to match the # __lt__() logic in list.sort() and in heapq. if key is None: while lo < hi: mid = (lo + hi) // 2 if x < a[mid]: hi = mid else: lo = mid + 1 else: while lo < hi: mid = (lo + hi) // 2 if x < key(a[mid]): hi = mid else: lo = mid + 1 return lo
bigcode/self-oss-instruct-sc2-concepts
import ast def check_return(node): """ Returns `False` if `ast.Return` only appears at the end of `ast.FunctionDef`. """ class Visitor(ast.NodeVisitor): def __init__(self): self.found = False def visit_Return(self, node): self.found = True def visit_FunctionDef(self, node): body = node.body # Traverse all of the function with the exception of # that one conditional break. if not body: return # In all but the last statements 'return' may not occur. for node in body[:-1]: self.visit(node) if not isinstance(body[-1], ast.Return): self.visit(body[-1]) visitor = Visitor() visitor.visit(node) return visitor.found
bigcode/self-oss-instruct-sc2-concepts
def ndvi( redchan, nirchan ): """ Normalized Difference Vegetation Index ndvi( redchan, nirchan ) """ redchan = 1.0*redchan nirchan = 1.0*nirchan if( ( nirchan + redchan ) == 0.0 ): result = -1.0 else: result = ( nirchan - redchan ) / ( nirchan + redchan ) return result
bigcode/self-oss-instruct-sc2-concepts
import json def getDefaultColumnsForScope(syn, scope): """ Fetches the columns which would be used in the creation of a file view with the given scope. Parameters ---------- syn : synapseclient.Synapse scope : str, list The Synapse IDs of the entites to fetch columns for. Returns ------- list of dict """ scope = [scope] if isinstance(scope, str) else scope params = {'scope': scope, 'viewType': 'file'} cols = syn.restPOST('/column/view/scope', json.dumps(params))['results'] return cols
bigcode/self-oss-instruct-sc2-concepts
def bootstrap_field(field, param1=''): """Take a FormField and produce nice HTML for its label, input, etc.""" value = '' if hasattr(field.form, 'cleaned_data'): value = field.form.cleaned_data.get(field.name, '') return { 'field': field, 'type': field.__class__.__name__, 'value': value, 'param1': param1, 'widget': field.field.widget.__class__.__name__, }
bigcode/self-oss-instruct-sc2-concepts
import requests def icmp_echo(baseurl, host, cookie_header): """ Test IP connectivity to a given host :param baseurl: Imported from yaml :param host: IP address of destination :param cookie_header: Object created by loginOS.login_os() :return: REST call response JSON """ url = baseurl + 'ping' headers = {'cookie': cookie_header} ip = {'destination': {'ip_address': {'version': 'IAV_IP_V4', 'octets': host}}} response = requests.post(url, headers=headers, json=ip, verify=False) return response.json()
bigcode/self-oss-instruct-sc2-concepts
def user_is_mozillian(user): """Check if a user belongs to Mozillians's group.""" return user.groups.filter(name='Mozillians').exists()
bigcode/self-oss-instruct-sc2-concepts
def compute_age(date, dob): """ Compute a victim's age. :param datetime.date date: crash date :param datetime.date dob: date of birth :return: the victim's age. :rtype: int """ DAYS_IN_YEAR = 365 # Compute the age. return (date - dob).days // DAYS_IN_YEAR
bigcode/self-oss-instruct-sc2-concepts
import io def _read_bytes(fp, size, error_template="ran out of data"): """ Read from file-like object until size bytes are read. Raises ValueError if not EOF is encountered before size bytes are read. Non-blocking objects only supported if they derive from io objects. Required as e.g. ZipExtFile in python 2.6 can return less data than requested. """ data = bytes() while True: # io files (default in python3) return None or raise on # would-block, python2 file will truncate, probably nothing can be # done about that. note that regular files can't be non-blocking try: r = fp.read(size - len(data)) data += r if len(r) == 0 or len(data) == size: break except io.BlockingIOError: pass if len(data) != size: msg = "EOF: reading %s, expected %d bytes got %d" raise ValueError(msg % (error_template, size, len(data))) else: return data
bigcode/self-oss-instruct-sc2-concepts
from pathlib import Path def extract_extension_from_file(file: str) -> str: """Extract extension from file name. :param file: Path of file we want to extract the extension from. :return: Extension of file (e.g., mp3) """ return Path(file).suffix.lower()[1:]
bigcode/self-oss-instruct-sc2-concepts
def _is_model(layer): """Returns True if layer is a model. Args: layer: a dict representing a Keras model configuration. Returns: bool: True if layer is a model. """ return layer.get('config').get('layers') is not None
bigcode/self-oss-instruct-sc2-concepts
def check_blank_after_last_paragraph(docstring, context, is_script): """Multiline docstring should end with 1 blank line. The BDFL recommends inserting a blank line between the last paragraph in a multi-line docstring and its closing quotes, placing the closing quotes on a line by themselves. """ if (not docstring) or len(eval(docstring).split('\n')) == 1: return blanks = [not line.strip() for line in eval(docstring).split('\n')] if blanks[-3:] != [False, True, True]: return True
bigcode/self-oss-instruct-sc2-concepts
import re def normalize(tokens : list) -> str: """ Removes non-english characters and returns lower case versions of words in string form. """ subbed = [re.sub("[^a-zA-Z]+", "", s).lower() for s in tokens] filtered = filter(None, subbed) return " ".join(list(filtered))
bigcode/self-oss-instruct-sc2-concepts
def one_or_more(pattern, greedy=True): """ one or more repeats of a pattern :param pattern: an `re` pattern :type pattern: str :param greedy: match as much as possible? :type greedy: bool :rtype: str """ return (r'(?:{:s})+'.format(pattern) if greedy else r'(?:{:s})+?'.format(pattern))
bigcode/self-oss-instruct-sc2-concepts
def isstringlike(item): """Checks whether a term is a string or not""" return isinstance(item, str)
bigcode/self-oss-instruct-sc2-concepts
def _update_gn_executable_output_directory(commands): """Update the output path of executables and response files. The GN and ZN builds place their executables in different locations so adjust then GN ones to match the ZN ones. Args: commands: list of command strings from the GN build. Returns: A new list of command strings. """ replacements = { 'TOOLCHAIN/main_with_static': 'TOOLCHAIN/obj/public/canaries/main_with_static', 'TOOLCHAIN/main_with_shared': 'TOOLCHAIN/obj/public/canaries/main_with_shared', 'TOOLCHAIN_SHARED/libfoo_shared': 'TOOLCHAIN_SHARED/obj/public/canaries/libfoo_shared', } result = [] for cmd in commands: for key, val in replacements.items(): cmd = cmd.replace(key, val) result.append(cmd) return result
bigcode/self-oss-instruct-sc2-concepts
def innerscripts_to_array(p): """Extracts inner scripts from page""" res = [] all = p.xpath('//script') for tag in all: if 'src' in tag.attrib: continue else: item = {'text' : tag.text_content()} if 'type' in tag.attrib: item['type'] = tag.attrib['type'] res.append(item) return {'total' : len(res), 'list' : res}
bigcode/self-oss-instruct-sc2-concepts
def validate_int(arg): """Guard against value errors when attempting to convert a null to int""" if len(arg) < 1: return 0 return int(arg)
bigcode/self-oss-instruct-sc2-concepts
def genome_size(peaks_file, haploid=True): """ Finds the genome size of an organsim, based on the peaks file created by kmercountexact.sh :param peaks_file: Path to peaks file created by kmercountexact. :param haploid: Set to True if organism of interest is haploid, False if not. Default True. :return: size of genome, as an int. If size could not be found, return will be 0. """ size = 0 with open(peaks_file) as peaks: lines = peaks.readlines() for line in lines: if haploid: if '#haploid_genome_size' in line: size = int(line.split()[1]) else: if '#genome_size' in line: size = int(line.split()[1]) return size
bigcode/self-oss-instruct-sc2-concepts
import time def issued_and_expiration_times(seconds_to_expire): """ Return the times in unix time that a token is being issued and will be expired (the issuing time being now, and the expiration being ``seconds_to_expire`` seconds after that). Used for constructing JWTs Args: seconds_to_expire (int): lifetime in seconds Return: Tuple[int, int]: (issued, expired) times in unix time """ iat = int(time.time()) exp = iat + int(seconds_to_expire) return (iat, exp)
bigcode/self-oss-instruct-sc2-concepts
def get_json(request_obj, remove_token=True): """ This function is responsible for getting the json data that was sent with with a request or return an empty dict if no data is sent Args: ~~~~~ request_obj: request object that data should be attached to Returns: ~~~~~~~~ dict """ result = {} if not hasattr(request_obj, 'json') or not request_obj.json: if hasattr(request_obj, 'params'): result = request_obj.params result = request_obj.json or {} if remove_token and 'token' in result: del result['token'] return result
bigcode/self-oss-instruct-sc2-concepts
def parse_op_and_node(line): """Parse a line containing an op node followed by a node name. For example, if the line is " [Variable] hidden/weights", this function will return ("Variable", "hidden/weights") Args: line: The line to be parsed, as a str. Returns: Name of the parsed op type. Name of the parsed node. """ op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "") # Not using [-1], to tolerate any other items that might be present behind # the node name. node_name = line.strip().split(" ")[1] return op_type, node_name
bigcode/self-oss-instruct-sc2-concepts
from typing import List def lines_to_list(lines: str) -> List[str]: """Split a string into a list of non-empty lines.""" return [line for line in lines.strip().splitlines() if line]
bigcode/self-oss-instruct-sc2-concepts
def _invert(ax,x,y): """ Translate pixel position to data coordinates. """ try: xdata,ydata = ax.transAxes.inverted().transform((x,y)) except: # Support older matplot xdata,ydata = ax.transAxes.inverse_xy_tup((x,y)) return xdata,ydata
bigcode/self-oss-instruct-sc2-concepts
def r(line): """ Selects rho from a given line. """ r, _ = line return r
bigcode/self-oss-instruct-sc2-concepts
def get_value_counts_categorical(df, column, alt_filter, ascending=False): """ Count the number of rows in `df` where `alt_filter == True` for each unique value in `df.loc[alt_filter, column]`. Parameters ---------- df : pandas DataFrame The dataframe containing the column that we wish to get item-counts of. column : str. Should be present in `df.columns`. Should denote the column item-counts should be computed on. alt_filter : 1D boolean ndarray. Denotes the rows of `df` that are to be counted when item-counting. ascending : bool, optional. Denotes whether the counts are to be returned in ascending order or not. Default == False (return the counts from largest to smallest). Returns ------- value_counts : pandas Series The index will contain the unique values from `df.loc[alt_filter, column]`, and the values of the Series will be count of how many times the corresponding index value was in `df.loc[alt_filter, column]`. """ # Value count the rows pertaining to the alternative of interest value_counts = df.loc[alt_filter, column].value_counts() # Sort the value counts in the desired order value_counts = value_counts.sort_values(ascending=ascending) return value_counts
bigcode/self-oss-instruct-sc2-concepts
def canonicalShape(shape): """Calculates a *canonical* shape, how the given ``shape`` should be presented. The shape is forced to be at least three dimensions, with any other trailing dimensions of length 1 ignored. """ shape = list(shape) # Squeeze out empty dimensions, as # 3D image can sometimes be listed # as having 4 or more dimensions for i in reversed(range(len(shape))): if shape[i] == 1: shape = shape[:i] else: break # But make sure the shape # has at 3 least dimensions if len(shape) < 3: shape = shape + [1] * (3 - len(shape)) return shape
bigcode/self-oss-instruct-sc2-concepts
def _to_long_sha_digest(digest, repo): """Returns the full 40-char SHA digest of a commit.""" return repo.git.rev_parse(digest) if len(digest) < 40 else digest
bigcode/self-oss-instruct-sc2-concepts
import torch def tversky_index(yhat, ytrue, alpha=0.3, beta=0.7, epsilon=1e-6): """ Computes Tversky index Args: yhat (Tensor): predicted masks ytrue (Tensor): targets masks alpha (Float): weight for False positive beta (Float): weight for False negative `` alpha and beta control the magnitude of penalties and should sum to 1`` epsilon (Float): smoothing value to avoid division by 0 output: tversky index value """ TP = torch.sum(yhat * ytrue, (1,2,3)) FP = torch.sum((1. - ytrue) * yhat, (1,2,3)) FN = torch.sum((1. - yhat) * ytrue, (1,2,3)) return TP/(TP + alpha * FP + beta * FN + epsilon)
bigcode/self-oss-instruct-sc2-concepts
def _is_plugable_7port_hub(node): """Check if a node is a Plugable 7-Port Hub (Model USB2-HUB7BC) The topology of this device is a 4-port hub, with another 4-port hub connected on port 4. """ if '1a40:0101' not in node.desc: return False if not node.HasPort(4): return False return '1a40:0101' in node.PortToDevice(4).desc
bigcode/self-oss-instruct-sc2-concepts
def load_file(filename): """ Loads and returns the contents of filename. :param filename: A string containing the filepath of the file to be loaded/ :type filename: str :return: Contents of the loaded file. :rtype: str """ with open(filename, "r") as fh: return fh.read()
bigcode/self-oss-instruct-sc2-concepts
def morphology_used_in_fitting(optimized_params_dict, emodel): """Returns the morphology name from finals.json used in model fitting. Args: optimized_params_dict (dict): contains the optimized parameters, as well as the original morphology path emodel (str): name of the emodel Returns: str: the original morphology name used in model fitting """ emodel_params = optimized_params_dict[emodel] morph_path = emodel_params["morph_path"] morph_name = morph_path.split("/")[-1] return morph_name
bigcode/self-oss-instruct-sc2-concepts
def rivers_with_station(stations): """ Args: stations: list of MonitoringStation objects Returns: A set of names (string) of rivers that have an associated monitoring station. """ rivers = set() for s in stations: rivers.add(s.river) return rivers
bigcode/self-oss-instruct-sc2-concepts
def f16(value): """ Multiply with maximum value of a number 16bit (0xFFFF) :param value: Input value :return: value * 0xFFFF """ return int(round(65536 * value))
bigcode/self-oss-instruct-sc2-concepts
def sval(val): """ Returns a string value for the given object. When the object is an instanceof bytes, utf-8 decoding is used. Parameters ---------- val : object The object to convert Returns ------- string The input value converted (if needed) to a string """ if isinstance(val, bytes): return str(val, 'utf-8') else: return str(val)
bigcode/self-oss-instruct-sc2-concepts
def _superclasses(obj, cls): """return remaining classes in object's MRO after cls""" mro = type(obj).__mro__ return mro[mro.index(cls)+1:]
bigcode/self-oss-instruct-sc2-concepts
def _align_sentence_spans_for_long_sentences(original_sentence_spans, trimmed_sentence_spans): """Align new token spans after enforcing limits to the original locations in raw text. This is needed to keep track of each token's location in original document. After enforcing sentence limits, some sentences get split into multiple parts. The locations in original document don't change, but we need to maintain one list per sentence. So we regroup the spans into lists corresponding to the trimmed sentences. Args: original_sentence_spans: list of lists of (start, end) int tuples that came out of the tokenizer. All locations with respect to the beginning of the document. trimmed_sentence_spans: list of lists of (start, end) int tuples, each sentence starting at zero, after splitting any long sentences into multiple chunks. Returns: adjusted spans: spans pointing to the original document, but regrouped into new lists anytime a sentence was split. """ if len(original_sentence_spans) == 0: return [[]] original_sentence_index = 0 sentence_break = 0 adjusted_spans = [] for trimmed_sentence in trimmed_sentence_spans: original_sentence = original_sentence_spans[original_sentence_index] if len(trimmed_sentence) < len(original_sentence): new_sentence_break = sentence_break + len(trimmed_sentence) adjusted_spans.append(original_sentence[sentence_break:new_sentence_break]) if new_sentence_break == len(original_sentence): sentence_break = 0 original_sentence_index += 1 else: sentence_break = new_sentence_break else: adjusted_spans.append(original_sentence) original_sentence_index += 1 return adjusted_spans
bigcode/self-oss-instruct-sc2-concepts
def _strip_mongodb_id(x): """ Rename the ``_id`` key from a dict as ``id``, if the latter doesn't already exist. If that's the case, remove the key. Update the object in-place. """ if "_id" in x: if "id" not in x: x["id"] = x.pop("_id") else: del x["_id"] return x
bigcode/self-oss-instruct-sc2-concepts
def phony(params: dict) -> str: """ Build phony rules according to 42 rules """ phony = "all re clean fclean norm bonus" if params["library_libft"]: phony += " libft" if params["library_mlx"] and params["compile_mlx"]: phony += " minilibx" return phony
bigcode/self-oss-instruct-sc2-concepts
def next_power_of_2(v): """ Returns the next power of 2, or the argument if it's already a power of 2. """ v -= 1 v |= v >> 1 v |= v >> 2 v |= v >> 4 v |= v >> 8 v |= v >> 16 return v + 1
bigcode/self-oss-instruct-sc2-concepts
import requests import json def edit_link(token, link, title): """ Edits an already existing Bitly links title. Args: token (str): Bitly access token. link (str): Shortened URL to be edited by Bitly. title (str): Updated Bitly link title. Returns: Bitly status information and the returned Bitly link on success. """ r = requests.get("https://api-ssl.bitly.com/v3/user/link_edit?access_token={}&link={}&edit=title&title={}".format(token, link, title)) return json.loads(r.content.decode("utf-8"))
bigcode/self-oss-instruct-sc2-concepts
def check_lat(lat): """ Checks whether the input latitude is within range and correct type Parameters ---------- lat : float or int latitude (-90 to 90) in degrees Returns ------- None. Raises an exception in case """ if isinstance(lat, (int, float)): if abs(lat) > 90: raise ValueError('latitude should be -90 <= latitude <= 90') else: raise TypeError('latitude should be "float" or "int"') return None
bigcode/self-oss-instruct-sc2-concepts
def merge_p(bi,p,bigrams): """ Calculates the merge probability by combining the probs of the bigram of words and the prob of merge. Arguments bi : bigram p : p(MG->mg) from grammar (should be 1) Returns combined probability of merge op and bigram """ (w1,w2)=bi return bigrams[w1][w2]*p
bigcode/self-oss-instruct-sc2-concepts
def combine(a, b): """ sandwiches b in two copies of a and wrap by double quotes""" c = '"' + a + b + a + '"' return c
bigcode/self-oss-instruct-sc2-concepts
def _find_get(root, path, value, defa=False): """ Error catching of things required to be set in xml. Gives useful errormessage instead of stuff like "AttributeError: 'NoneType' object has no attribute 'get'" Parameters ---------- root : Element Element in xml-tree to find parameter in path : str What path in the xml-tree the wanted value should be at value : str Name of the value to be extracted, e.g. "value", "path", "error" defa : str, int, None Default to return if not set Returns ------- val : str Extracted value upon location in xml """ tag = path.split("/")[-1] place = root.find(path) if place == None: if defa != False: return defa raise KeyError("Missing tag '{0}' in input!".format(tag)) val = place.get(value) if val == None: if defa != False: return defa raise ValueError("Missing '{0}' in tag '{1}'!".format(value, tag)) return val
bigcode/self-oss-instruct-sc2-concepts
from typing import List def find_string_anagrams(str1: str, pattern: str) -> List[int]: """ This problem follows the Sliding Window pattern and is very similar to Permutation in a String. In this problem, we need to find every occurrence of any permutation of the pattern in the string. We will use a list to store the starting indices of the anagrams of the pattern in the string. Time Complexity: O(N) Space Complexity: O(N) Parameters ---------- str1 : str input string pattern : str input pattern Returns ------- result : List[int] the list of starting indices of the anagrams of the pattern in the given string """ matched = 0 result = [] seen = {} window_start = 0 for char in pattern: if char not in seen: seen[char] = 1 else: seen[char] += 1 for window_end in range(len(str1)): right_char = str1[window_end] if right_char in seen: seen[right_char] -= 1 if seen[right_char] == 0: matched += 1 if matched == len(seen): result.append(window_start) if window_end >= len(pattern) - 1: left_char = str1[window_start] if left_char in seen: if seen[left_char] == 0: matched -= 1 seen[left_char] += 1 window_start += 1 return result
bigcode/self-oss-instruct-sc2-concepts
def create_suffix(suffix, index): """Create suffix using an index Args: suffix (string): Base suffix index (int/string): Index Returns: string: Suffic """ i = "%02d" % (int(index) + 1,) return suffix + "-" + i
bigcode/self-oss-instruct-sc2-concepts
def is_substring(text: str, elements: set) -> bool: """ Check if a string is a substring of any string in a set Args: text (str): text to be tested elements (set(str)): set of string to be tested against for substring condition Return: (bool): whether or not if text is a substring of strings in elements """ for element in elements: if text in element: return True return False
bigcode/self-oss-instruct-sc2-concepts
def _format_error(error: list) -> dict: """ Convert the error type list to a dict. Args: error (list): a two element list with the error type and description Returns: dict: explicit names for the list elements """ return {'error_type': error[0], 'description': error[1]}
bigcode/self-oss-instruct-sc2-concepts
def token_identity(it, token_payload=None): """ Echo back what it gets. """ return (it, token_payload)
bigcode/self-oss-instruct-sc2-concepts
def first(s): """Return the first element from an ordered collection or an arbitrary element from an unordered collection. Raise StopIteration if the collection is empty. """ return next(iter(s))
bigcode/self-oss-instruct-sc2-concepts
def auth_headers(token): """Return a list of Authorization headers corresponding to token.""" return [('Authorization', 'Bearer %s' % token)]
bigcode/self-oss-instruct-sc2-concepts
import json def burn_in_info(skeleton, info): """Burn model info into the HTML skeleton. The result will render the hard-coded model info and have no external network dependencies for code or data. """ # Note that Python's json serializer does not escape slashes in strings. # Since we're inlining this JSON directly into a script tag, a string # containing "</script>" would end the script prematurely and # mess up our page. Unconditionally escape fixes that. return skeleton.replace( "BURNED_IN_MODEL_INFO = null", "BURNED_IN_MODEL_INFO = " + json.dumps(info).replace("/", "\\/"))
bigcode/self-oss-instruct-sc2-concepts
import requests def make_request(url): """ Make request to an URL :param url: any url :return: success and response if successful, otherwise error """ try: return 'success', requests.get(url) except requests.exceptions.ConnectionError: return 'connection error', None
bigcode/self-oss-instruct-sc2-concepts
def quotes_inner(quoted: str) -> str: """ For a string containing a quoted part returns the inner part """ left_quote = quoted.find('"') right_quote = quoted.rfind('"') if right_quote < 0: right_quote = len(quoted) return quoted[left_quote + 1:right_quote]
bigcode/self-oss-instruct-sc2-concepts
def guess_lon_lat_columns(colnames): """ Given column names in a table, return the columns to use for lon/lat, or None/None if no high confidence possibilities. """ # Do all the checks in lowercase colnames_lower = [colname.lower() for colname in colnames] for lon, lat in [('ra', 'dec'), ('lon', 'lat'), ('lng', 'lat')]: # Check first for exact matches lon_match = [colname == lon for colname in colnames_lower] lat_match = [colname == lat for colname in colnames_lower] if sum(lon_match) == 1 and sum(lat_match) == 1: return colnames[lon_match.index(True)], colnames[lat_match.index(True)] # Next check for columns that start with specified names lon_match = [colname.startswith(lon) for colname in colnames_lower] lat_match = [colname.startswith(lat) for colname in colnames_lower] if sum(lon_match) == 1 and sum(lat_match) == 1: return colnames[lon_match.index(True)], colnames[lat_match.index(True)] # We don't check for cases where lon/lat are inside the name but not at # the start since that might be e.g. for proper motions (pm_ra) or # errors (dlat). return None, None
bigcode/self-oss-instruct-sc2-concepts
def get_pkt_data(il, offset, use_index=False, size=4): """ Returns llil expression to get data from packet at offset :param il: llil function to generate expression with :param offset: packet offset to retrieve :param use_index: add the index register to offset if true :param size: number of bytes to retrieve :return: llil expression that will get data from packet at offset """ pkt_index = il.const(4, offset) if use_index: pkt_index = il.add(4, pkt_index, il.reg(4, 'x')) return il.load(size, il.add(4, il.reg(4, 'pkt'), pkt_index))
bigcode/self-oss-instruct-sc2-concepts
def copy_to_len_sliced(s, l): """Returns the maximim length string made of copies of `s` with the length exactly `l`; if the length of `s` doesn't match, it ends with the beginning slice of `s`. Parameters ---------- s : string String to be copied and sliced. l : int The length of `output`. Returns ------- output : string Copies of `s` stuck together until their length is `l`. """ # Initialisations s_list = [ ] output_list = [ ] for i in range(len(s)): s_list.append(s[i]) while len(output_list) < l - len(s_list): output_list += s_list output_list += s_list[:l-len(output_list)] output = "".join(output_list) return output
bigcode/self-oss-instruct-sc2-concepts
import math def t_test(image1, image2): """Performs a Student's t-test for the provided images.""" num = image1.rms[0] - image2.rms[0] denom = math.sqrt(((image1.stddev[0]**2)/image1.count[0]) + ((image2.stddev[0]**2)/image2.count[0])) if denom == 0: return 0 t = num / denom if t < 0: t *= -1 return t
bigcode/self-oss-instruct-sc2-concepts
def normalize(grid): """ normalize grid to (0,1) """ field = grid.T.values min_h, max_h = field.min(), field.max() return (field - min_h) / (max_h - min_h)
bigcode/self-oss-instruct-sc2-concepts
def fastaParserSpectraClusterPy(header): """Custom parser for fasta headers adapted from https://github.com/spectra-cluster/spectra-cluster-py :param header: str, protein entry header from a fasta file :returns: dict, parsed header """ isUniprot = lambda h: h[0:3] in ['sp|', 'tr|', 'up|'] if isUniprot(header): start = 3 end = header.find('|', start) else: start = 0 breakPositions = [header.find(' '), header.find('|')] breakPositions = [i if i > 0 else len(header) for i in breakPositions] end = min(breakPositions) return {'id': header[start:end]}
bigcode/self-oss-instruct-sc2-concepts
def column_check(df, column): """Check if `column` is in `df`.""" return column in df.columns
bigcode/self-oss-instruct-sc2-concepts
def paths_same_disk(photos_path:str, export_path:str)->bool: """ Checks if the provided input path and the export path are "located" on the same disk. :param photos_path: path to photos :type photos_path: str :param export_path: path to the directory where the photo folder structure will be created :type export_path: str | """ return True if photos_path[0].lower() == export_path[0].lower() else False
bigcode/self-oss-instruct-sc2-concepts
def get_handcrafted_feature_names(platform): """ Returns a set of feature names to be calculated. Output: - names: A set of strings, corresponding to the features to be calculated. """ names = set() #################################################################################################################### # Add basic discussion tree features. #################################################################################################################### names.update(["comment_count", "max_depth", "avg_depth", "max_width", "avg_width", "max_depth_over_max_width", "avg_depth_over_width"]) #################################################################################################################### # Add branching discussion tree features. #################################################################################################################### names.update(["comment_tree_hirsch", "comment_tree_wiener", "comment_tree_randic"]) #################################################################################################################### # Add user graph features. #################################################################################################################### names.update(["user_count", "user_graph_hirsch", "user_graph_randic", "outdegree_entropy", "norm_outdegree_entropy", "indegree_entropy", "norm_indegree_entropy"]) #################################################################################################################### # Add temporal features. #################################################################################################################### names.update(["avg_time_differences_1st_half", "avg_time_differences_2nd_half", "time_differences_std"]) #################################################################################################################### # Add YouTube channel features. #################################################################################################################### # if platform == "YouTube": # names.update(["author_privacy_status_youtube", # "author_is_linked_youtube", # "author_long_uploads_status_youtube", # "author_comment_count_youtube", # "author_comment_rate_youtube", # "author_view_count_youtube", # "author_view_rate_youtube", # "author_video_upload_count_youtube", # "author_video_upload_rate_youtube", # "author_subscriber_count_youtube", # "author_subscriber_rate_youtube", # "author_hidden_subscriber_count_youtube", # "author_channel_lifetime_youtube"]) #################################################################################################################### # Add Reddit author features. #################################################################################################################### # elif platform == "Reddit": # names.update(["author_has_verified_mail_reddit", # "author_account_lifetime_reddit", # "author_hide_from_robots_reddit", # "author_is_mod_reddit", # "author_link_karma_reddit", # "author_link_karma_rate_reddit", # "author_comment_karma_reddit", # "author_comment_karma_rate_reddit", # "author_is_gold_reddit"]) # else: # print("Invalid platform name.") # raise RuntimeError return names
bigcode/self-oss-instruct-sc2-concepts
from pathlib import Path def make_path_like(path_like): """Attempts to convert a string to a Path instance.""" if isinstance(path_like, Path): return path_like try: return Path(path_like) except TypeError: raise TypeError(f'could not convert to Path: {path_like}')
bigcode/self-oss-instruct-sc2-concepts
import tempfile import json def write_json_temp_file(data): """Writes the provided data to a json file and return the filename""" with tempfile.NamedTemporaryFile(delete=False, mode='wb') as temp_file: temp_file.write(json.dumps(data).encode('utf-8')) return temp_file.name
bigcode/self-oss-instruct-sc2-concepts
def read_input(path: str) -> list: """ Read game board file from path. Return list of str. >>> read_input("check.txt") ['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'] """ with open(path, 'r') as lines: state = [] for line in lines: state.append(line.strip()) return state
bigcode/self-oss-instruct-sc2-concepts
def pairs(clustergenerator, labels): """Create an iterable of (N, {label1, label2 ...}) for each cluster in a ClusterGenerator, where N is "1", "2", "3", etc. Useful to pass to e.g. vambtools.writer_clusters. Inputs: clustergenerator: A ClusterGenerator object labels: List or array of cluster labels Output: Generator yielding ("1", {label1, label2 ... }) for each cluster """ maxindex = clustergenerator.indices.max() if len(labels) < maxindex: raise ValueError("Cluster generator contains point no {}, " "but was given only {} labels".format(maxindex, len(labels))) return ((str(i+1), c.as_tuple(labels)[1]) for (i, c) in enumerate(clustergenerator))
bigcode/self-oss-instruct-sc2-concepts
def cubec_to_cubei(cubec): """Cube centimetre to cube inch""" return cubec * 0.061
bigcode/self-oss-instruct-sc2-concepts
import math def determine_padding(filter_shape, output_shape="same"): """Method which calculates the padding based on the specified output shape and the shape of the filters.""" if output_shape == "valid": return (0, 0), (0, 0) elif output_shape == "same": filter_height, filter_width = filter_shape # output_height = (height + pad_h - filter_height) / stride + 1 pad_h1 = int(math.floor((filter_height - 1)/2)) pad_h2 = int(math.ceil((filter_height - 1)/2)) pad_w1 = int(math.floor((filter_width - 1)/2)) pad_w2 = int(math.ceil((filter_width - 1)/2)) return (pad_h1, pad_h2), (pad_w1, pad_w2)
bigcode/self-oss-instruct-sc2-concepts
from typing import Sequence from typing import Optional from typing import Callable def make_add_field_names_preprocessor( field_names: Sequence[str], field_indices: Optional[Sequence[int]] = None, ) -> Callable: """Make a preprocessor to add field names to a dataset. Create a preprocessor that converts a dataset of lists of tensors into a dataset of dictionaries mapping strings to tensors. Parameters ---------- field_names : Sequence[str], required A sequence of strings representing the field names for the new dictionaries. field_indices : Optional[Sequence[int]], optional (default=None) The indices corresponding to each field name in ``field_names``. If ``field_indices`` is ``None``, then each field name's corresponding index is assumed to be its index in the sequence. Returns ------- Callable A function taking a ``tf.data.Dataset`` and returning a ``tf.data.Dataset``, that converts each sequence of tensors into an dictionary mapping the field names to the tensors at their corresponding indices. """ if field_indices is None: field_indices = range(len(field_names)) def add_field_names_preprocessor(dataset): return dataset.map( lambda *row: { field_name: row[field_index] for field_name, field_index in zip(field_names, field_indices) } ) return add_field_names_preprocessor
bigcode/self-oss-instruct-sc2-concepts
def strip_parentheses(string): """ Remove parentheses from a string, leaving parentheses between <tags> in place Args: string: the string to remove parentheses from Returns: the processed string after removal of parentheses """ nested_parentheses = nesting_level = 0 result = '' for c in string: # When outside of parentheses within <tags> if nested_parentheses < 1: if c == '<': nesting_level += 1 if c == '>': nesting_level -= 1 # When outside of <tags> if nesting_level < 1: if c == '(': nested_parentheses += 1 if nested_parentheses < 1: result += c if c == ')': nested_parentheses -= 1 # When inside of <tags> else: result += c return result
bigcode/self-oss-instruct-sc2-concepts
def parse_multplt(path): """ Parses multplt.def file and returns list of lists like: [station, channel type (e.g. SH), channel (E, N or Z)]. """ data = [] with open(path, "r") as f: lines = f.readlines() tag = "DEFAULT CHANNEL" for line in lines: if line[:len(tag)] == tag: # entry[0] - station, ..[1] - type, ..[2] - channel entry = line[len(tag):].split() data.append(entry) return data
bigcode/self-oss-instruct-sc2-concepts
def run_query(client, query): """ Runs BigQuery queryjob :param client: BigQuery client object :param query: Query to run as a string :return: QueryJob object """ return client.query(query)
bigcode/self-oss-instruct-sc2-concepts
def add_scalebar(image_obj, scalebar_px): """ Adds a scalebar to the input image and returns a new edited image """ ## set the indentation to be ~2.5% inset from the bottom left corner of the image indent_px = int(image_obj.height * 0.025) ## set the stroke to be ~0.5% image size stroke = int(image_obj.height * 0.005) if stroke < 1: stroke = 1 print("Scale bar info: (offset px, stroke) = (%s, %s)" % (indent_px, stroke)) ## find the pixel range for the scalebar, typically 5 x 5 pixels up from bottom left LEFT_INDENT = indent_px # px from left to indent the scalebar BOTTOM_INDENT = indent_px # px from bottom to indent the scalebar STROKE = stroke # px thickness of scalebar x_range = (LEFT_INDENT, LEFT_INDENT + scalebar_px) y_range = (image_obj.height - BOTTOM_INDENT - STROKE, image_obj.height - BOTTOM_INDENT) ## set the pixels white for the scalebar for x in range(x_range[0], x_range[1]): for y in range(y_range[0], y_range[1]): image_obj.putpixel((x, y), (255, 255, 255)) return image_obj
bigcode/self-oss-instruct-sc2-concepts
def lerp(a, b, x): """Linear interpolation between a and b using weight x.""" return a + x * (b - a)
bigcode/self-oss-instruct-sc2-concepts
import copy def update_categories(desired_name2id: dict, coco_dict: dict) -> dict: """ Rearranges category mapping of given COCO dictionary based on given category_mapping. Can also be used to filter some of the categories. Arguments: --------- desired_name2id : dict {"big_vehicle": 1, "car": 2, "human": 3} coco_dict : dict COCO formatted dictionary. Returns: --------- coco_target : dict COCO dict with updated/filtred categories. """ # so that original variable doesnt get affected coco_source = copy.deepcopy(coco_dict) # init target coco dict coco_target = {"images": [], "annotations": [], "categories": []} # init vars currentid2desiredid_mapping = {} # create category id mapping (currentid2desiredid_mapping) for category in coco_source["categories"]: current_category_id = category["id"] current_category_name = category["name"] if current_category_name in desired_name2id.keys(): currentid2desiredid_mapping[current_category_id] = desired_name2id[current_category_name] else: # ignore categories that are not included in desired_name2id currentid2desiredid_mapping[current_category_id] = -1 # update annotations for annotation in coco_source["annotations"]: current_category_id = annotation["category_id"] desired_category_id = currentid2desiredid_mapping[current_category_id] # append annotations with category id present in desired_name2id if desired_category_id != -1: # update cetegory id annotation["category_id"] = desired_category_id # append updated annotation to target coco dict coco_target["annotations"].append(annotation) # create desired categories categories = [] for name in desired_name2id.keys(): category = {} category["name"] = category["supercategory"] = name category["id"] = desired_name2id[name] categories.append(category) # update categories coco_target["categories"] = categories # update images coco_target["images"] = coco_source["images"] return coco_target
bigcode/self-oss-instruct-sc2-concepts
def _EdgeStyle(data): """Helper callback to set default edge styles.""" flow = data.get("flow") if flow in {"backward_control", "backward_data", "backward_call"}: return "dashed" else: return "solid"
bigcode/self-oss-instruct-sc2-concepts