content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def adjust_name_for_printing(name): """ Make sure a name can be printed, alongside used as a variable name. """ if name is not None: name2 = name name = name.replace(" ", "_").replace(".", "_").replace("-", "_m_") name = name.replace("+", "_p_").replace("!", "_I_") name = name.replace("**", "_xx_").replace("*", "_x_") name = name.replace("/", "_l_").replace("@", '_at_') name = name.replace("(", "_of_").replace(")", "") if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None: raise NameError("name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name)) return name return ''
931df7fd3f6f456ead9b62692dea6bae31cf736f
691,625
import pickle def from_pickle(input_path): """Read network from pickle.""" with open(input_path, 'rb') as f: unpickler = pickle.Unpickler(f) background_mat = unpickler.load() return background_mat
9fd24fc422b9b15d831d9411ef9ba5537cf2c90d
691,626
def filter_breadcrumbs(_logger, level, msg, *args, **kwargs): # pylint: disable=unused-argument """ Intercepts logging messages Args: _logger: originating logger level: record level msg: record message *args: logging args **kwargs: logging kwargs """ skip_lvl = [] skip_msg = [] if level in skip_lvl or msg in skip_msg: return False if _logger == 'requests': return False return True
d32d81d87c7e83e76e091835b8dd2ed430e1fcae
691,627
def _y2num(y): """ theano-friendly class-to-score conversion """ return y[:,1] + 2*y[:,2] + 3*y[:,3] + 4*y[:,4] + 5*y[:,5]
bbdf72206bfa73dc5aca098b383eeea5fe781b90
691,628
def pres(gamma, dens, eint): """ Given the density and the specific internal energy, return the pressure Parameters ---------- gamma : float The ratio of specific heats dens : float The density eint : float The specific internal energy Returns ------- out : float The pressure """ return dens*eint*(gamma - 1.0)
3bdcdfd1dd280d9cfd397ba0c21bffce0d68bcb6
691,629
def publish_to_stream(s, base_url, app_id, stream_id): """ Publishes an app to a target stream """ # publish the app r = s.put( base_url + "/qrs/app/" + app_id + "/publish?stream=" + stream_id + "&xrfkey=abcdefg123456789" ) return r.status_code
8f1dd12ad7f273c0486d38ac92405aca96f3629a
691,630
import numpy def create_blank(width, height, rgb_color=(0, 0, 0)): """Create new image(numpy array) filled with certain color in RGB""" # Create black blank image image = numpy.zeros((height, width, 3), numpy.uint8) # Since OpenCV uses BGR, convert the color first color = tuple(reversed(rgb_color)) # Fill image with color image[:] = color return image
269db31556d17cf84a74a226be5415bc5673f09a
691,631
import logging import os import subprocess def RunGit(command): """Run a git subcommand, returning its output.""" # On Windows, use shell=True to get PATH interpretation. command = ['git'] + command logging.info(' '.join(command)) shell = (os.name == 'nt') proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE) out = proc.communicate()[0].strip() return out
eb4f836e0a1edb24b7aa2d89f05dfa8db7e4b1a8
691,632
def compressed(x, selectors): """ compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F """ return [d for d, s in zip(x, selectors) if s]
349ea3ffe35b135b5393650b5aff3d9a981c35d8
691,633
from typing import Any def flatten_dict(data: dict[str, Any], keys: list[str] = []) -> dict: """ Takes a dictionary containing key-value pairs where all values are of type other than `list` and flattens it such that all key-value pairs in nested dictionaries are now at depth 1. Args: data (dict): Dictionary containing non-list values keys (list[str], optional): Keys of `data` if `data` is a nested `dict` (`len(keys)` == depth of `data`). Defaults to []. Returns: dict: Flat dictionary containing all key-value pairs in `data` and its nested dictionaries """ flat_dict: dict[str, Any] = {} for key, value in data.items(): match value: case dict(): flat_dict = flat_dict | flatten_dict(value, [*keys, key]) case value: flat_dict['_'.join([*keys, key])] = value return flat_dict
1552225dd436e882685b90e237c761f2526b9244
691,634
import random def processslice6(): """Return the length of the part of the ruler containing 6. The process is to choose a random initial cut and then choose to cut the remaining segment containing the 6 inch mark.""" x = random.uniform(0, 12) if x >= 6: x, y = random.uniform(0, x), x else: y = random.uniform(x, 12) if x >= 6: x, y, z = random.uniform(0, x), x, y if x >= 6: return x else: return y - x elif y >= 6: y, z = random.uniform(x, y), y if y >= 6: return y - x else: return z - y else: z = random.uniform(y, 12) if z >= 6: return z - y else: return 12 - z
628a6b77196c0aef763083a1006bc46172ac7a57
691,635
import fnmatch def _table_matches_any_pattern(schema, table, patterns): """Test if the table `<schema>.<table>` matches any of the provided patterns. Will attempt to match both `schema.table` and just `table` against each pattern. Params: - schema. Name of the schema the table belongs to. - table. Name of the table. - patterns. The patterns to try. """ qual_name = '{}.{}'.format(schema, table) return any(fnmatch.fnmatch(qual_name, each) or fnmatch.fnmatch(table, each) for each in patterns)
d59e654b52f61e3b5335c2bee5dfdb960c0d8060
691,636
def match_mapped_ids(id, mapping_struct): """helper function to fetch an old id from a mapping structure to get its new id""" for new_id, old_id in mapping_struct: if old_id == id: return new_id
987852683bfc30f9c5609312f1f92e85dc1109bf
691,638
import sys def get_user_inputs(): """ tries to get user's inputs :return: list of inputs from the user (list of strings) """ input_lst = [] attempts = 0 while attempts <= 5: attempts += 1 try: value = input("enter a url or 'quit': ") except ValueError: print("You entered an invalid input.") continue if (value.lower() == 'quit') or (value.lower() == 'q'): sys.exit(0) # Exit the interpreter by raising SystemExit(status). else: input_lst.append(value) return input_lst
535436b97dcbadc8af4e450fff92f2451b2dc6eb
691,639
import datetime def skpDate(date): """ Retorna un datetime con la fecha en que la unidad genero la trama. >>> date = '041212' >>> datetime.date(2000 + int(date[4:6]), int(date[2:4]), int(date[0:2])) datetime.date(2012, 12, 4) >>> """ return datetime.date(2000 + int(date[4:6]), int(date[2:4]), int(date[0:2]))
d3b0eaae00f091542af8d6e9ac2b83f8e684dc97
691,640
import torch def BinaryDiceLoss(predict, target, smooth, p, weight=None): """ Description: Dice loss for binary classification """ assert predict.shape[0] == target.shape[0] if weight is not None: predict = torch.mul(predict, weight) target = torch.mul(target, weight) predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target))*2 + smooth den = torch.sum(predict.pow(p)+target.pow(p)) + smooth loss = 1 - num / den return loss
e9cd80f2dd8b2ca2d5496b966e2c1a20584c8913
691,641
import typing import enum def _enum_labels( value: typing.Union[int, str, enum.Enum], enum_type: typing.Optional[typing.Type] = None, ) -> typing.Dict[int, str]: """ Gets the human friendly labels of a known enum and what value they map to. """ def get_labels(v): return getattr(v, 'native_labels', lambda: {})() return get_labels(enum_type) if enum_type else get_labels(value)
c48dece92922044050ad35f066bf303d2b7b9ac1
691,642
def conditional(b, x, y): """Conditional operator for PyTorch. Args: b (FloatTensor): with values that are equal to 0 or 1 x (FloatTensor): of same shape as b y (FloatTensor): of same shape as b Returns: z (FloatTensor): of same shape as b. z[i] = x[i] if b[i] == 1 else y[i] """ return b * x + (1 - b) * y
f04765e080abe118e73831c99942f81e86fffb96
691,643
def logPlusOne(x): """ compute log(x + 1) for small x Args: x: Tensor Returns: Tensor log(x+1) """ eps=1e-4 mask = x.abs().le(eps).type_as(x) return x.mul(x.mul(-0.5) + 1.0) * mask + (x + 1.0).log() * (1.0 - mask)
5c5e621418490904323d7ca40213df66a23b6076
691,644
def calculate_directions(directions): """ Input: An iterable of (direction, unit) Output: A summarized collection of directions (x, y) """ x = 0 y = 0 for direction, unit in directions: assert direction in ['forward', 'down', 'up'] if direction == 'forward': x += unit elif direction == 'down': y += unit elif direction == 'up': y -= unit return x, y
f3b076f8cbf9e0559c418909132b37ee79f908c2
691,645
def inc(x): """ Increments number by one """ return x + 1
5720c02c0d8c2f10249a13951d420d8a958466a6
691,646
def calc_a(dert__): """ Compute vector representation of angle of gradient by normalizing (dy, dx). Numpy-broadcasted, first dimension of dert__ is a list of parameters: g, dy, dx Example ------- >>> dert1 = np.array([0, 5, 3, 4]) >>> a1 = calc_a(dert1) >>> print(a1) array([0.6, 0.8]) >>> # 45 degrees angle >>> dert2 = np.array([0, 450**0.5, 15, 15]) >>> a2 = calc_a(dert2) >>> print(a2) array([0.70710678, 0.70710678]) >>> print(np.degrees(np.arctan2(*a2))) 45.0 >>> # -30 (or 330) degrees angle >>> dert3 = np.array([0, 10, -5, 75**0.5]) >>> a3 = calc_a(dert3) >>> print(a3) array([-0.5 , 0.8660254]) >>> print(np.rad2deg(np.arctan2(*a3))) -29.999999999999996 """ return dert__[[2, 3]] / dert__[1]
d1be9a8eb2985776af45380248910d70ae09c840
691,647
import copy def replace_core_mol_dummy_atoms(mol, mcs, replace_core_mol): """ This function will replace the dummy atoms (*) with the isotope label from the core atoms. example: mol = Chem.MolFromSmiles("[10000N-]=[10001N+]=[10002N][10003CH2][2004CH]1[2005NH2+][2006CH2][2007CH]([2008OH])[2009CH]([2010OH])[2011CH]1[2012OH]") mcs = Chem.MolFromSmiles("[10003CH3][10002N]=[10001N+]=[10000NH]") replace_core = Chem.MolFromSmiles("[3*][2004CH]1[2005NH2+][2006CH2][2007CH]([2008OH])[2009CH]([2010OH])[2011CH]1[2012OH]") resulting replace_core = '[10003*][2004CH]1[2005NH2+][2006CH2][2007CH]([2008OH])[2009CH]([2010OH])[2011CH]1[2012OH]' Inputs: :param rdkit.Chem.rdchem.Mol mol: an rdkit molecule :param rdkit.Chem.rdchem.Mol mcs: an rdkit molecule for the shared common core :param rdkit.Chem.rdchem.Mol replace_core_mol: the mol with the MCS anchors labeled with * and an isotope label of the idx of the core anchor atom Returns: :returns: rdkit.Chem.rdchem.Mol replace_core_mol: an rdkit molecule with the common core removed from a ligand fragments the mol which can be used to make lists of R-groups. The * atoms will be isotope labeled with the isotope label from the core. """ replace_core_mol_original = copy.deepcopy(replace_core_mol) anchor_dict = {} anchor_to_set_dict = {} for atom in replace_core_mol.GetAtoms(): if atom.GetAtomicNum() == 0: anchor_iso = atom.GetIsotope() + 10000 neighbors = atom.GetNeighbors() tmp = [] for n_atom in neighbors: tmp.append(n_atom.GetIsotope()) anchor_dict[anchor_iso] = tmp anchor_to_set_dict[atom.GetIdx()] = anchor_iso for idx in list(anchor_to_set_dict.keys()): atom = replace_core_mol.GetAtomWithIdx(idx) anchor_iso = anchor_to_set_dict[idx] atom.SetIsotope(anchor_iso) return replace_core_mol
2687df5e709e7cdf671d59b887878e2f4240d863
691,649
def total_characters(word_list): """ Counts the total number of characters in a word list. Accepts a word list where each element is a word. Throws an Exception whenever an invalid parameter is used. :param word_list: string word list :return: total number of characters :raises: TypeError :rtype: int """ # check for valid parameters if word_list is None or not isinstance(word_list, list): raise Exception("ParameterError", "Expect input to be a list") # create a variable to hold current total total_chars = 0 # perform a loop to check if each element in word list is a string for x in range(len(word_list)): if isinstance(word_list[x], str): total_chars += len(word_list[x]) else: total_chars += 0 return total_chars
a1f6e41db38a3492ec39cfb631f723e16ed82c32
691,650
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], xy_window=(64, 64), xy_overlap=(0.5, 0.5)): """ Generate a list of windows for an image. Args: img: Input image x_start_stop: Start / stop position on the x axis (default to image width) y_start_stop: Start / stop position on the y axis (default to image height) xy_window: Window size for x and y xy_overlap: Percentage overlap between windows for the x and y axis. Returns: A list of windows (bounding boxes). """ image_width, image_height = (img.shape[1], img.shape[0]) # If x and/or y start/stop positions not defined, set to image size if x_start_stop[0] is None: x_start_stop[0] = 0 if x_start_stop[1] is None: x_start_stop[1] = image_width if y_start_stop[0] is None: y_start_stop[0] = 0 if y_start_stop[1] is None: y_start_stop[1] = image_height # Compute the span of the region to be searched xy_span = [x_start_stop[1] - x_start_stop[0], y_start_stop[1] - y_start_stop[0]] # Compute the number of pixels per step in x/y xy_step = [int(xy_window[0] * xy_overlap[0]), int(xy_window[1] * xy_overlap[1])] # Compute the number of windows in x/y windows_x = int(1 + (xy_span[0] - xy_window[0]) / (xy_window[0] * xy_overlap[0])) # 18 windows_y = int(1 + (xy_span[1] - xy_window[1]) / (xy_window[1] * xy_overlap[1])) # 10 # total_windows = windows_x * windows_y # Initialize a list to append window positions to window_list = [] # Loop through finding x and y window positions # Note: you could vectorize this step, but in practice # you'll be considering windows one by one with your # classifier, so looping makes sense for x_window in range(windows_x): for y_window in range(windows_y): # Calculate each window position x_start = x_start_stop[0] + x_window * xy_step[0] x_end = x_start + xy_window[0] y_start = y_start_stop[0] + y_window * xy_step[1] y_end = y_start + xy_window[1] bbox = ((x_start, y_start), (x_end, y_end)) # Append window position to list window_list.append(bbox) # Return the list of windows return window_list
39856ae488cbfb74f1865992e7ab7f5ffd971c62
691,651
def set_config_defaults(data_extraction_dict: dict) -> dict: """Set default values for some data configs if they are not defined. Note: 'skip' is not currently used in derivation Args: data_extraction_dict (dict): Dict from the benchmark definition dict that defined how data will be extraced from stdout/stderr Returns: data_extraction_dict (dict): Updated data extraction specification dict """ defaults = {"skip": 0, "reduction_type": "mean"} for k, v in defaults.items(): if k not in data_extraction_dict: data_extraction_dict[k] = v return data_extraction_dict
9f1c6be57fd89184c6268faca48c52db386545f9
691,652
def util_key_new ( schema, keys ): """Returns list of keys not in schema""" new_keys = [] for i in keys: if i not in schema: new_keys.append(i) return new_keys
3c856516bb6d20da865ac749e35de5f05c9dff66
691,653
import re def remove_redundant_path_slashes(path): """ If a relative filename contains multiple consecutive / characters (except at the beginning, in case of //server/host paths), remove them. >>> remove_redundant_path_slashes('/test//test2') '/test/test2' >>> remove_redundant_path_slashes('//test///test2') '//test/test2' >>> remove_redundant_path_slashes('') '' """ path_suffix = path[1:] path_suffix = re.sub(r'//+', '/', path[1:]) return path[0:1] + path_suffix
5cad7b7bf7d15b2d99894e3ef365c2b18d776c9b
691,654
import math def pose_dist(pose1, pose2): """Return Euclidean distance between two ROS poses.""" x1 = pose1.position.x y1 = pose1.position.y x2 = pose2.position.x y2 = pose2.position.y return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
59a884f078debee6a266612b3a5c480950df19eb
691,655
import itertools import random def shuffle_simple(input): """ It should take a list (array) in python and permute it. The permutation should be a random selection of all possible permutations. """ all_permutations = list(itertools.permutations(input)) special_permutation = random.choice(all_permutations) return special_permutation
478826df74ccb68fe16d73a0bbda97e8bca1c96f
691,656
def _user_can_manage(user, partner): """ Whether this user can manage the given partner org """ if user.can_administer(partner.org): return True return user.get_partner() == partner and partner.org.editors.filter(pk=user.pk).exists()
33160eff14687921c127fb2ab199d8b1a406d90d
691,657
def average(sequence): """Calculate the mean across an array of e.g. read depths. Defaults to the mean calculated using numpy and falls back to the naive Python solution. Args: sequence (list): ``numpy.array`` or list of values Returns: float: calculated average value """ try: # first assume that numpy is installed for the fastest approach return sequence.mean() except AttributeError: # no numpy available, fall back to support regular list return sum(sequence) / len(sequence)
eb66b74166040b9556f2af3f4ef8c60d0fa91a97
691,658
def cver_t(verstr): """Converts a version string into a tuple""" if verstr.startswith("b"): return tuple([0,0,0,0]+list(cver_t(verstr[1:]))) return tuple([int(x) for x in verstr.split(".")])
73b4abd456551e678f44c0f940ad8d055993a345
691,660
def remove_markdown(body): """Remove the simple markdown used by Google Groups.""" return body.replace('*', '')
0510a76c1ce1ac68a684f954400ea3e162e2e529
691,661
def IsHostMockup(si): """ Checks if host is a mockup @type si : ServiceInstance ManagedObject @param si : @rtype : boolean @return : True if host is running in mockup mode, False otherwise """ content = si.RetrieveContent() fullName = content.GetAbout().GetInstanceUuid() return (fullName != None and fullName.find("Mockup") != -1)
78d28ad1726431f67ce11f8f7a4a03b0c6c239df
691,662
def _(): """ Default view """ title = 'Test Bootstrap with Templates' return dict(title=title)
907a59b7ed3a06e0268e2b742751f229be3d066d
691,663
import os def correct_spacing(filename): """ Don't allow any more than 1 consecutive space in the text.""" newname, ext = os.path.splitext(filename) newname = ' '.join(newname.split()) return ''.join([newname.strip(), ext.strip()])
97774633bd610bd190eba44a0710b3e6447a86a9
691,664
from typing import List import collections def maxResult(self, nums: List[int], k: int) -> int: """ >>>Mono-deque The idea is, we only care about the path that has the largest scores, since we will all start from that in order to achieve global maximum. Therefore it is equivalent to track the largest score within the last k elements. So maintain a score array where score[i] indicates maximum score we can get at index i """ score = [0] * len(nums) score[0] = nums[0] max_win = collections.deque([0]) for i in range(1, len(nums)): score[i] = score[max_win[0]] + nums[i] while max_win and score[i] > score[max_win[-1]]: max_win.pop() max_win.append(i) while (i-max_win[0]) >= k: max_win.popleft() return score[-1]
6ccf14651fd7ea3169edc78109c5311ca4d0d6f7
691,665
from datetime import datetime def _user_time_zone_date(dt, user_time_zone, utc_time_zone): """ Convert given datetime string into a yyyy-mm-dd string taking into account the user time zone Keep in mind that this conversion might change the actual day if the habit was entered 'early' or 'late' in the day. This is correct because the user entered the habit in their own timezone, but the app stores this internally (and exports) in utc. So, here we are effectively converting the time back to when the user actually entered it, based on the timezone the user claims they were in. """ # We know habit list stores in UTC so don't need the timezone info dt = dt.split('+')[0].strip() dtime_obj = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') # Tell native datetime object we are using UTC, then we need to convert # that UTC time into the user's timezone BEFORE stripping off the time # to make sure the year, month, and date take into account timezone # differences. utc = dtime_obj.replace(tzinfo=utc_time_zone) return utc.astimezone(user_time_zone)
3323a7875c932b17c60af45da8ac950523a400cf
691,667
def txt2list(fname): """ writes a text file from a list Parameters ---------- list_: a python list fname: name of the output file Returns ------- a text file """ crimefile = open(fname, 'r') result = [line.split(',') for line in crimefile.readlines()] return result
9b6878f4dd823510ed9365eaa94b48d056533645
691,668
import ipaddress def int2ip(int_num): """int to an ip address""" try: return ipaddress.ip_address(int_num) except BaseException: return None
7cf84a3cac74996657dca16a942f8ac23c36200d
691,669
def is_hot(G): """ Determine if G is hot. A Game is hot if it is not numberish; that is, it's left and right stops do not coincide. Parameters ---------- G : Game The Game of interest. Returns ------- hot : bool Whether the Game is hot or not. """ return not G.is_numberish
1bc5b87d3f14be8496987cadb0bda533a44a1e8a
691,670
import os def get_subdirectories(path): """Get a list of all the child directories of the given path. Args: path: the path who's child directories are to be returned. Returns: the paths of the child directories, relative to the given path. """ return next(os.walk(path))[1]
8f06edf3200cbd99acba4a6639b4e5e87e55c7f9
691,671
def some_date(): """ Return anser to ultimate question. """ return 42
9db7ff77bb52c3de508d52e686499c6c920ed534
691,672
def get_decades(year): """ Return 4 digit and 2 digit decades given 'year' """ if year: try: decade = year[2:3] + "0" decade2 = year[:3] + "0" except: decade = "" decade2 = "" else: decade = "" decade2 = "" return decade, decade2
fa5d84466e0b61e4dc88d157dbc22f1a61f231ad
691,673
import random def heads_or_tails() -> str: """ 054 Randomly choose either heads or tails (“h” or “t”). Ask the user to make their choice. If their choice is the same as the randomly selected value, display the message “You win”, otherwise display “Bad luck”. At the end, tell the user if the computer selected heads or tails. """ coin_sides = ("h", "t") user_pick = input("'h' or 't'?: ") rand_select = random.choice(coin_sides) if user_pick in rand_select: print("You win!") else: print("You lose :(") return f"Computer choice was: {rand_select}"
ecd2d0d47238837145101f8a366017155ebcde77
691,674
def apply_weighting(object_to_be_weighted, weights): """ Replicate the components of object_to_be_weighted using the corresponding weights to define the number of replicates. Args: object_to_be_weighted: could be a list or an array weights: a list of integers Returns: the transformed object """ zipped = zip(object_to_be_weighted, weights) weighted_object = [] for item in zipped: for j in range(item[1]): weighted_object.append(item[0]) return weighted_object
51161b4ed6e6540390487c40613838083f00fd3b
691,675
def parse_timeout(arg): """Parse timeout argument""" if not arg: return None return int(arg)
e39af5bc323bb0ea32f1438a2459274627fd2f12
691,676
def doubleStuff(a_list): """ Return a new list in which contains doubles of the elements in a_list. """ new_list = [] for value in a_list: new_elem = 2 * value new_list.append(new_elem) return new_list
f34d76eb05fc8a07fb0bee41c701e18542d6554a
691,677
import statistics def median_of_counter_obj(counter): """ Calculate the weighted median of a counter obj :param counter: A counter obj """ items = [] for item in counter.items(): items += [item[0]] * int(item[1]) return statistics.median(items)
35327a66182e4482511b5dc511dd00bf45a9350a
691,678
import re def to_camel_case(string): """Give the camelCase representation of a snake_case string.""" return re.sub(r"_(\w)", lambda x: x.group(1).upper(), string)
8f037411fa44cbc50ce6db26d87728a1c1ae6658
691,679
def vowels_loop(value): """Count the number of vowels in a string, using a loop.""" count = 0 for char in value.lower(): if char in "aeiou": count += 1 return count
6f55e30283e9873933265568b5434cd014f3c825
691,681
def cd_features_length(self): """ Args: self (ConcatenatedDataset): """ return self._datasets[0].features.features_length()
1e8f8aa959d9dd925bb2b047f9e7f51887199367
691,682
def construct_table(lines, separate_head=True): """Prints a formatted table given a 2 dimensional array""" #Count the column width widths = [] for line in lines: for i,size in enumerate([len(x) for x in line]): while i >= len(widths): widths.append(0) if size > widths[i]: widths[i] = size #Generate the format string to pad the columns print_string = "" for i,width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " if (len(print_string) == 0): return print_string = print_string[:-3] #Print the actual data t = '' for i,line in enumerate(lines): t += print_string.format(*line) + '\n' if (i == 0 and separate_head): t += "-"*(sum(widths)+3*(len(widths)-1)) + '\n' return t
8f7af75070912c287328c512547d3936127cbc37
691,683
def check_brackets(input): """ Funktion zur Überprüfung eines Strings auf eine gültige Klammerung. Der String wird an die Funktion übergeben. Diese liefert am Ende einen Wahrheitswert, der angibt, ob eine korrekte Klammerung vorliegt. """ # Anzahl der noch geöffneten Klammern open_brackets = 0 # Gehe Zeichen für Zeichen durch for char in input: # Offene Klammer gefunden if char == "(": # Offene Klammeranzahl erhöhen open_brackets += 1 # Geschlossene Klammer gefunden elif char == ")": # gibt es eine geöffnete Klammer hierzu if open_brackets > 0: # offene Klammeranzahl reduzieren open_brackets -= 1 else: # sonst ist Klammerung nicht korrekt return False # Wenn keine offenen Klammern mehr vorhanden sind, wird true zurückgegeben return open_brackets == 0
925962e615f94f82d244288d92ee3edef056b0d3
691,684
from typing import OrderedDict import os import re def edit_tcin(fin=None, fout=None, options=None, defaults=None, reqxyz=True, ignore_sections=True): """ Parse, modify, and/or create a TeraChem input file. Parameters ---------- fin : str, optional Name of the TeraChem input file to be read fout : str, optional Name of the TeraChem output file to be written, if desired options : dict, optional Dictionary of options to overrule TeraChem input file. Pass None as value to delete a key. defaults : dict, optional Dictionary of options to add to the end reqxyz : bool, optional Require .xyz file to be present in the current folder ignore_sections : bool, optional Do not parse any blocks delimited by dollar signs (not copied to output and not returned) Returns ------- dictionary Keys mapped to values as strings. Certain keys will be changed to integers (e.g. charge, spinmult). Keys are standardized to lowercase. """ if defaults is None: defaults = {} if options is None: options = {} if not ignore_sections: raise RuntimeError("Currently only ignore_constraints=True is supported") intkeys = ['charge', 'spinmult'] Answer = OrderedDict() # Read from the input if provided if fin is not None: tcin_dirname = os.path.dirname(os.path.abspath(fin)) section_mode = False for line in open(fin).readlines(): line = line.split("#")[0].strip() if len(line) == 0: continue if line == '$end': section_mode = False continue elif line.startswith("$"): section_mode = True if section_mode : continue if line == 'end': break s = line.split(' ', 1) k = s[0].lower() try: v = s[1].strip() except IndexError: raise RuntimeError("%s contains an error on the following line:\n%s" % (fin, line)) if k == 'coordinates' and reqxyz: if not os.path.exists(os.path.join(tcin_dirname, v.strip())): raise RuntimeError("TeraChem coordinate file does not exist") if k in intkeys: v = int(v) if k in Answer: raise RuntimeError("Found duplicate key in TeraChem input file: %s" % k) Answer[k] = v # Replace existing keys with ones from options for k, v in options.items(): Answer[k] = v # Append defaults to the end for k, v in defaults.items(): if k not in Answer.keys(): Answer[k] = v for k, v in Answer.items(): if v is None: del Answer[k] # Print to the output if provided havekeys = [] if fout is not None: with open(fout, 'w') as f: # If input file is provided, try to preserve the formatting if fin is not None: for line in open(fin).readlines(): # Find if the line contains a key haveKey = False uncomm = line.split("#", 1)[0].strip() # Don't keep anything past the 'end' keyword if uncomm.lower() == 'end': break if len(uncomm) > 0: haveKey = True comm = line.split("#", 1)[1].replace('\n','') if len(line.split("#", 1)) == 2 else '' s = line.split(' ', 1) w = re.findall('[ ]+',uncomm)[0] k = s[0].lower() if k in Answer: line_out = k + w + str(Answer[k]) + comm havekeys.append(k) else: line_out = line.replace('\n', '') else: line_out = line.replace('\n', '') print(line_out, file=f) for k, v in Answer.items(): if k not in havekeys: print("%-25s %s" % (k, str(v)), file=f) return Answer
537d2ba56df43e23fb3b08c1b6845b73c1e1e95d
691,685
def loss_cpu(elements): """ This file directly optimize this loss function. calculate the whole matrix @return at the scale: Loss / 2 """ ret = 0 l = elements.shape[0] _l_1 = 1.0/(l*l) for i in range(l): for j in range(i): if elements[i, j] > 0: ret += (i-j) * _l_1 * elements[i, j] # here because j<i, we can safely ommit abs() for speed. return ret
81c3155380b50ee2bf9fd768344a217d8dee19f5
691,686
def fixture_ref_applied_doubled(): """Doubled version of `ref` with all values multiplied by `2`.""" ref_applied_doubled = {"a": 2, "b": {"c": 4}, "d": {"e": {"f": 6}}} return ref_applied_doubled
228db12c1c0e1f224a7ea1ae1e572a746f250d17
691,687
def fixup(line): """Account for misformatted data from FFIEC with one-off fixups""" if line[0] == "2016" and line[1] == "0000021122" and len(line) == 23: return line[:6] + line[7:] return line
eccb0cb2afb34ec613efdf1272a6d884ddea2581
691,688
def make_region(chromosome, begin, end): """ Create region string from coordinates. takes 2 (1 for human 1-9) digit chromosome, begin and end positions (1 indexed)""" region = "chr" + str(chromosome) + ":" + str(begin) + "-" + str(end) return region
5108f01bd1ab49770073036e2ad1106e7da354dd
691,689
import os def find_file(filename, rootdir): """ Finds a file with filename located in some subdirectory of the current directory """ for dirpath, _, filenames in os.walk(rootdir): if filename in filenames: return os.path.join(dirpath, filename)
e91e6b0cda3a0e948eccec0269ccf1ad1db21ed6
691,690
def _xml_tag_filter(s: str, strip_namespaces: bool) -> str: """ Returns tag name and optionally strips namespaces. :param s: Tag name :param strip_namespaces: Strip namespace prefix :return: str """ if strip_namespaces: ns_end = s.find("}") if ns_end != -1: s = s[ns_end + 1 :] else: ns_end = s.find(":") if ns_end != -1: s = s[ns_end + 1 :] return s
647ee8e2b1aca898b625b00e0e366e1292ddbed6
691,691
def format_actions(action_list): """ Returns the action list, initially a list with elements "[op][val]" like /2.0, -3.0, +1.0, formatted as a dictionary. The dictionary keys are the unique indices (to retrieve the action) and the values are lists ['op', val], such as ['+', '2.0']. """ return {idx: [action[0], float(action[1:])] for idx, action in enumerate(action_list)}
e2c0c15f19184d021fd09b9c93ae89e1aef4efae
691,693
def interpolate_temperature(temperature): """Transform temperature from degree celsius to 0.0 - 1.0 range 0.0 is -10 degrees (very cold) 1.0 is 35 degrees (very hot) Parameters: temperature - float in degrees celsius Returns: float normalized temperature """ return min(1.0, max(0.0, (10 + temperature) / 45))
b4807e24b6119d70bfbdf31bd19c5777f512773d
691,694
def lead(x, n = 1, default = None): """Return an array with each value replaced by the next (or further forward) value in the array. Arguments: x: a pandas Series object n: number of next values forward to replace each value with default: what to replace the n final values of the array with Example: >>> lead(pd.Series([1,2,3]), n=1) 0 2.0 1 3.0 2 NaN dtype: float64 >>> lead(pd.Series([1,2,3]), n=1, default = 99) 0 2 1 3 2 99 dtype: int64 """ res = x.shift(-1*n, fill_value = default) return res
c7c41355008c6691a01bcae31130ab0469543480
691,695
def get_and_check_size(iterator, n): """Check if the iterator is length n and return consumed elements. Consumes the next n+1 elements if possible (up to the end of the iterator). Returns (is_length_n, elems) """ elements = [] try: for _ in range(n): elements.append(next(iterator)) except StopIteration: return False, elements try: elements.append(next(iterator)) return False, elements except StopIteration: pass assert(len(elements) == n) return True, elements
e8a7f61f5346cdeccf0e3b67debabf6b9d20eae8
691,696
def square(x): """Return the square of x. >>> square(2) 4 >>> square(-2) 4 """ return x * x
a0cf408826163a0e3a123ff0b71330e09dd59286
691,697
def valid_ui_tabs(tab=None, preferred=False): """ List of valid UI tabs in browser. """ preferred_names = [ "assets", "audio", "charts", "code", "confusion-matrices", "histograms", "images", "installed-packages", "metrics", "notes", "parameters", "system-metrics", "text", ] mappings = { "asset": "assetStorage", "assetStorage": "assetStorage", "assets": "assetStorage", "audio": "audio", "chart": "chart", "charts": "chart", "code": "code", "confusion-matrices": "confusionMatrix", "confusion-matrix": "confusionMatrix", "confusionMatrix": "confusionMatrix", "graphics": "images", "histograms": "histograms", "images": "images", "installed-packages": "installedPackages", "installedPackages": "installedPackages", "metrics": "metrics", "notes": "notes", "parameters": "params", "params": "params", "system-metrics": "systemMetrics", "systemMetrics": "systemMetrics", "text": "text", } if preferred: return preferred_names elif tab is None: return mappings.keys() elif tab in mappings: return mappings[tab] else: raise ValueError("invalid tab name; tab should be in %r" % preferred_names)
366f75702b3b5777b2d218d71169bab02ac14ed9
691,698
from pathlib import Path def get_msd_song_info(): """Reads and returns the artist and title of each songs identified by its hash in the MSD""" songs_info = {} songs_file = Path('data/million_songs_dataset/extra/unique_tracks.txt') with open(songs_file, 'r', encoding='utf8') as file: for line in file.readlines(): song_hash, _, artist, song_title = line.rstrip('\n').split( sep='<SEP>') songs_info[song_hash] = (artist, song_title) return songs_info
f1520035c4ddd3d4433b30ca7251ff99dbea5213
691,699
def is_right_bracket(token): """ returns true if token is right bracket """ return token == ")"
0ce454bf48b1473e50f69ab3a6b44a8ceef5a081
691,700
def _make_CSV_line(username, language): """Return a WikiMetrics compatible CSV line.""" return "%s, %swiki" % (username, language)
6dc01ea0e225f19ca88e33ff77757e8cd047408e
691,701
import os from typing import OrderedDict def posix_uname(space): """ posix_uname - Get system name """ sysname, nodename, release, version, machine = os.uname() rdct_w = OrderedDict() rdct_w['sysname'] = space.newstr(sysname) rdct_w['nodename'] = space.newstr(nodename) rdct_w['release'] = space.newstr(release) rdct_w['version'] = space.newstr(version) rdct_w['machine'] = space.newstr(machine) return space.new_array_from_rdict(rdct_w)
24fa0a849ae580839894a7423a943cbdcf73fe9e
691,702
def compare2float_relative(x_base, y_check, relative_error): """Compare whether 2 geometries and their intersection is 'equal', measure with relative.""" value_x = float(x_base) value_y = float(y_check) return ((abs(value_x - value_y)) / (abs(value_x))) <= relative_error
c1b15333a3e4689d94c1e2da921276e41546d16a
691,703
def _build_sample_embedded_list(): """Helper function to create embedded list for sample.""" return [ # File linkTo "files.status", "files.file_format.file_format", "files.accession", # File linkTo "cram_files.status", "cram_files.accession", "cram_files.file_format.file_format", # File linkTo "processed_files.accession", "processed_files.file_format.file_format", "processed_files.workflow_run_outputs.@id" ]
176831d26c7b7f9a731deacb12055fe098635dee
691,704
import torch def pixelshuffle(x: torch.Tensor, dimensions: int, scale_factor: int) -> torch.Tensor: """ Apply pixel shuffle to the tensor `x` with spatial dimensions `dimensions` and scaling factor `scale_factor`. See: Shi et al., 2016, "Real-Time Single Image and Video Super-Resolution Using a nEfficient Sub-Pixel Convolutional Neural Network." See: Aitken et al., 2017, "Checkerboard artifact free sub-pixel convolution". Args: x: Input tensor dimensions: number of spatial dimensions, typically 2 or 3 for 2D or 3D scale_factor: factor to rescale the spatial dimensions by, must be >=1 Returns: Reshuffled version of `x`. Raises: ValueError: When input channels of `x` are not divisible by (scale_factor ** dimensions) """ dim, factor = dimensions, scale_factor input_size = list(x.size()) batch_size, channels = input_size[:2] scale_divisor = factor ** dim if channels % scale_divisor != 0: raise ValueError( f"Number of input channels ({channels}) must be evenly " f"divisible by scale_factor ** dimensions ({factor}**{dim}={scale_divisor})." ) org_channels = channels // scale_divisor output_size = [batch_size, org_channels] + [d * factor for d in input_size[2:]] indices = tuple(range(2, 2 + 2 * dim)) indices_factor, indices_dim = indices[:dim], indices[dim:] permute_indices = (0, 1) + sum(zip(indices_dim, indices_factor), ()) x = x.reshape(batch_size, org_channels, *([factor] * dim + input_size[2:])) x = x.permute(permute_indices).reshape(output_size) return x
be1617e01eb43e958fcc5c38c4378ac9dede98ac
691,705
from pathlib import Path def ensure_parent(*args) -> Path: """ Ensure that the parent directory of a file exists. """ path = Path(*args) path.parent.mkdir(parents=True, exist_ok=True) return path
83ba38618ed7e3ddc790904824842a167d8e28d3
691,706
from subprocess import Popen, PIPE def process(command): """ Executes a command and returns the process :param command: the command to be executed :return: returns the process off the executed command """ return Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
14fd0d38d39aa6ccad466f289fb2c55109ddd2e1
691,707
def running_under_ipython(): """Returns true if we appear to be in an IPython session.""" try: get_ipython() # type: ignore return True except NameError: return False
435c5fc060397fcd855735b4c673a043f8c5696d
691,708
def check_bgp_enable_args(**kwargs): """ check_bgp_enable_args """ module = kwargs["module"] need_cfg = False as_number = module.params['as_number'] if as_number: if len(as_number) > 11 or len(as_number) == 0: module.fail_json( msg='Error: The len of as_number %s is out of [1 - 11].' % as_number) else: need_cfg = True return need_cfg
290bc4f30fab1074c22ad868f4ad5bee2507b11a
691,709
import os import time import hashlib import queue import threading def _hashfile(file_path, blocksize=2**20, fast_hash=False): """Hash file with memory efficiency as a priority. Parameters: file_path (string): path to file to hash blocksize (int): largest memory block to hold in memory at once in bytes fast_hash (boolean): if True, hashes the first and last `blocksize` of `file_path`, the file_size, file_name, and file_path which takes less time on large files for a full hash. Full hash is done if this parameter is true Returns: sha1 hash of `file_path` if fast_hash is False, otherwise sha1 hash of first and last memory blocks, file size, file modified, file name, and appends "_fast_hash" to result. """ def _read_file(file_path, file_buffer_queue, blocksize, fast_hash=False): """Read one blocksize at a time and adds to the file buffer queue.""" with open(file_path, 'r') as file_to_hash: if fast_hash: # fast hash reads the first and last blocks and uses the # modified stamp and filesize buf = file_to_hash.read(blocksize) file_buffer_queue.put(buf) file_size = os.path.getsize(file_path) if file_size - blocksize > 0: file_to_hash.seek(file_size - blocksize) buf = file_to_hash.read(blocksize) file_buffer_queue.put(buf) file_buffer_queue.put(file_path) file_buffer_queue.put(str(file_size)) file_buffer_queue.put(time.ctime(os.path.getmtime(file_path))) else: buf = file_to_hash.read(blocksize) while len(buf) > 0: file_buffer_queue.put(buf) buf = file_to_hash.read(blocksize) file_buffer_queue.put('STOP') def _hash_blocks(file_buffer_queue): """Process file_buffer_queue one buf at a time.""" hasher = hashlib.sha1() for row_buffer in iter(file_buffer_queue.get, "STOP"): hasher.update(row_buffer.encode('utf-8')) file_buffer_queue.put(hasher.hexdigest()[:16]) file_buffer_queue = queue.Queue(100) read_file_process = threading.Thread( target=_read_file, args=( file_path, file_buffer_queue, blocksize, fast_hash)) read_file_process.daemon = True read_file_process.start() hash_blocks_process = threading.Thread( target=_hash_blocks, args=(file_buffer_queue,)) hash_blocks_process.daemon = True hash_blocks_process.start() read_file_process.join() hash_blocks_process.join() file_hash = str(file_buffer_queue.get()) if fast_hash: file_hash += '_fast_hash' return file_hash
b9b1078b55c61bad4244e56186f1e89b8706d423
691,710
import math def get_utm_zone(lon: float, lat: float) -> str: """ Return the EPSG code of the corresponding wGS84 UTM zone given (lon, lat) coordinates. Args: lon (float): Longitude (in WGS84) lat (float): Latitude (in WGS84) Returns: str: The EPSG code of the corresponding UTM zone. Can be used directly to set crs in geopandas. """ utm_band = str((math.floor((lon + 180) / 6) % 60) + 1) if len(utm_band) == 1: utm_band = "0" + utm_band if lat >= 0: epsg_code = "326" + utm_band else: epsg_code = "327" + utm_band return f"EPSG:{epsg_code}"
45de051564738674d834273b9498e88f5500c7fe
691,711
import json def combine_api_shard_files(input_files, output_file=None): """ Merges the list of .json-formatted API shard files *input_files* into a single list of dictionaries, optionally writing the result to *output_file*. """ input_lists = [] print('Loading input files') for fn in input_files: input_lists.append(json.load(open(fn))) detections = [] # detection_list = input_lists[0] for detection_list in input_lists: assert isinstance(detection_list, list) # d = detection_list[0] for d in detection_list: assert 'file' in d assert 'max_detection_conf' in d assert 'detections' in d detections.extend([d]) print('Writing output') if output_file is not None: with open(output_file, 'w') as f: json.dump(detections, f, indent=1) return detections
ffc8f694ca38b077835b8c80071ce402ceaf6002
691,712
def get_worker_id_from_tf_config(tf_config_json: dict) -> str: """Valid roles in a cluster is "chief", "worker", "ps" and "evaluator".""" task = tf_config_json["task"] worker_type = task["type"] worker_index = task["index"] return f"{worker_type}_{worker_index}"
90091241fc302a7dc471baad6102032884cb74b8
691,713
from typing import Literal def dummy() -> Literal[True]: """Dummy function""" return True
dfc7b2d0da10d65987273f1ffc0b18e44c0ca95f
691,714
def check_db_for_feature(feature, db_features=None): """ Args: feature: A feature to be checked for. db_features: All of the db features (see get_all_db_features). Returns: The feature if it matches, otherwise None. """ fulcrum_id = feature.get('properties').get('fulcrum_id') if not db_features: return None if db_features.get(fulcrum_id): # While it is unlikely that the database would have a newer version than the one being presented. # Older versions should be rejected. If they fail to be handled at least they won't overwrite a # more current value. if db_features.get(fulcrum_id).get('version') > feature.get('properties').get('version'): return "reject" feature['ogc_fid'] = db_features.get(fulcrum_id).get('ogc_fid') return feature return None
21bfabb26533638cbd616d4b9d1d543679878885
691,715
def _GetHeuristicSuspectedCLs(analysis): """Gets revisions of suspected cls found by heuristic approach.""" if analysis and analysis.suspected_cls: return [(cl['repo_name'], cl['revision']) for cl in analysis.suspected_cls] return []
bf678079f10e8fa14874b451cfb5a8e7f76e1878
691,716
def sliding_window_regions(start, end, window_size, step_size): """ sliding_window_regions ====================== This method will split a gene into different regions based on a sliding window and step size. Each region is based on the window size. The window is slid down the gene using the step size. Each step size results in a new window. For example, if the gene is ~67000 bp long, the window size is 1.5kb and the step size is 375 bp, then you would get ~180 overlapping regions, with each region having a size of 1.5kb. The first region will start at the start site of the gene and the last region will end at the end site of the gene. Parameters: ----------- 1) start: (int) The genomic start position of a gene 2) end: (int) The genomic end position of a gene 3) window_size: (int) The size of the window/region in bp to use for the sliding window 4) step_size: (int) The sliding step size in bp to slide the window across a gene. Returns: ++++++++ (list) A 2d list of regions created by sliding a window across the gene positions. Each inner list have the region start pos at index 0 and the end pos as index 1 """ start = int(start) end = int(end) window_size = int(window_size) step_size = int(step_size) ## Start and end of first region ## First region will start at the start of the gene window_start = start window_end = start + ( window_size - 1 ) ## The size of the region will include the start position to the end position. This accounts for a off by 1 error. gene_regions = [] ## Iterate over the gene range and get all regions while window_end < end: ## Add region gene_regions.append([window_start, window_end]) ## Slide the window by the step size window_start += step_size window_end += step_size ## Start and end of last region ## Last region will end at the end of the gene window_start = end - (window_size - 1) if end - (window_size - 1) > start else start window_end = end ## Add region gene_regions.append([window_start, window_end]) return gene_regions
775e7a8cfe79239465608133d4ad62d147502fab
691,717
from typing import Optional def dict_diff(d1: dict, d2: dict, no_key: Optional[str] = '<KEYNOTFOUND>') -> dict: """Compares two dictionaries Args: d1 (dict): First dictionary to compare d2 (dict): Second dictionary to compare no_key (Optional[str]): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: dict: Comparison dictionary """ d1keys = set(d1.keys()) d2keys = set(d2.keys()) both = d1keys & d2keys diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]} diff.update({k: (d1[k], no_key) for k in d1keys - both}) diff.update({k: (no_key, d2[k]) for k in d2keys - both}) return diff
2213b4f0a2b6da52220005aa1b9ed9f42cfa4075
691,718
def resolution_from_fsc(bins, fsc, value=0.5): """ Compute the resolution from the FSC curve Args: bins (array): The resolution bins (ordered from low resolution to high) fsc (array): The fsc in that resolution bin Returns: (bin index, bin value, fsc value) """ assert len(bins) == len(fsc) bin_index = len(bins) - 1 bin_value = bins[bin_index] fsc_value = fsc[bin_index] for i, (b, f) in enumerate(zip(bins, fsc)): if f < 0.5: bin_index = i bin_value = b fsc_value = f break return bin_index, bin_value, fsc_value
33ebfcb3d03703d6a5ceb5fe8d0220a28eafdb46
691,719
from pathlib import Path import json def Load_User_File(github_user): """ Load the contents of a JSON file Keyword arguments: github_user -- name of the file in the form <username>.json """ GITHUB_USER_PATH= "scripts/files/users/%s" % github_user my_file = Path(GITHUB_USER_PATH) # Are results cached ? if my_file.exists(): print ("Cached : ", GITHUB_USER_PATH) with open( GITHUB_USER_PATH, "r") as input_file: json_response = json.load(input_file) return json_response
a67a9993b4e512c46a1183c6f2b9e5a81ea162ed
691,720
import binascii import base64 def lock_4(js, token): """Local storage""" encoded = js[12443:12491].replace('\\x', '') + '3d' decoded = binascii.unhexlify(encoded) # %cCYDSHREW % return ('4', base64.b64decode(decoded).decode('utf-8'))
295bd9e3275347f27e2b27c36fe014673fcac4c8
691,721
from typing import Tuple def split_array(array_length: int, num_splits: int, split_id: int) -> Tuple[int, int]: """Split array into parts. Args: array_length: num_splits: split_id: Returns: start and end indices of the """ if not 0 <= split_id < num_splits: raise ValueError(f"gpu_id should be 0 <= {split_id} < {num_splits}") if array_length % num_splits == 0: step = int(array_length / num_splits) else: step = int(array_length / num_splits) + 1 return split_id * step, min((split_id + 1) * step, array_length)
988460b6bf8f16143da3f2f4f01acb336cd0490b
691,722
import os def get_entity_and_relation(data_path): """ Get the map of entity and relation Args: data_path: data file path Returns: entity2id (dict): entity <--> entity ID relation2id (dict): relation <--> relation ID """ with open(os.path.join(data_path, 'entities.dict')) as fin: entity2id = dict() for line in fin: eid, entity = line.strip().split('\t') entity2id[entity] = int(eid) with open(os.path.join(data_path, 'relations.dict')) as fin: relation2id = dict() for line in fin: rid, relation = line.strip().split('\t') relation2id[relation] = int(rid) return entity2id, relation2id
47d1260ce039ff18130126a7d0aa0ba44161b367
691,723
def get_interface_bind_to_vrf(config_db, vrf_name): """Get interfaces belong to vrf """ tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] data = [] for table_name in tables: interface_dict = config_db.get_table(table_name) if interface_dict: for interface in interface_dict.keys(): if interface_dict[interface].has_key('vrf_name') and vrf_name == interface_dict[interface]['vrf_name']: data.append(interface) return data
23eb680916e06b7c332951a1f22628fd868ee060
691,724
import os import csv import json def tsv_to_json(result_directory): """ This function converts tsv file to json file :param result_directory: output directory of checkv end_to_end command, includs all output tsv files :param shared_folder: kbase working scratch folder: /kb/module/work/tmp """ json_files_directory = os.path.join("/opt/work/", "json_files_dir") os.mkdir(json_files_directory) for file in os.listdir(result_directory): if file.endswith(".tsv"): json_file_path = os.path.join(json_files_directory, file + ".json") # write tsv file to json file tsv_file_path = os.path.join(result_directory, file) with open(tsv_file_path) as tsv_file: reader = csv.DictReader(tsv_file, delimiter="\t") data = list(reader) res = {"data": data} with open(json_file_path, "w") as jsonfile: json.dump(res, jsonfile) return json_files_directory
9ea523e2f6a8e8dc1e5374e29ba27232f9f8d0cf
691,725
def process_tags(tags): """Process the provided tag information to correct format. Converts the list of information to a dictionary to be able to pass to the geometry creation step. Input: ----- tags: list of lists, [[tagname1, tagval1], [tagname2, tagval2], ...] Return: ------- tagdict: dict, key=TAGNAME, value=TAGVALUE """ tagdict = {} for tagset in tags: key = tagset[0] val = float(tagset[1]) tagdict[key] = val return tagdict
95f78cdfb3632f705e6403fe04a78746c6cfa7d0
691,726
def friends(data): """ Event triggered on connection to an userstream For more information: https://dev.twitter.com/streaming/overview/messages-types#friends-lists-friends """ # noqa: E501 return 'friends' in data or 'friends_str' in data
a44ece86d830e920b26db6fe14c9c66d8168b453
691,727
def onsegment(p, q, r): """ Returns true if point q lies on segment pq, given three collinear points p, q, r. """ if q[0] <= max(p[0], r[0]) and q[0] >= min(p[0], r[0]) and q[1] <= max(p[1], r[1]) and q[1] >= min(p[1], r[1]): return True else: return False
1c48c9cfeddf00b155ddc63ae81c386ec1105d36
691,728
def _format_rotator_mode(value): """Format rotator mode, and rais appropriate error if it can't be formatted.""" modes = set(['pa', 'vertical', 'stationary']) if value.lower() not in modes: raise ValueError("Rotator mode must be in {!r}".format(modes)) return value.lower()
f56bcccaccfa4d6e68783c68b8cd32676b6b6584
691,729
def flag_nonsingletons(df, avar, sample): """Boolean flag for 'not from a singleton `avar` group.""" counts = df[sample].groupby(avar).size() big_counts = df[[avar]].join(counts.to_frame('_T'), on=avar).fillna(0) non_single = big_counts['_T'] > 1 return non_single
187c4e6deaf65b6b2d539400420a6022ad984692
691,730