content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import random def llenar_lista_principal(): """ list --> list OBJ: llenar la lista """ n= 20 lista = [0] * n for i in range(n): lista[i] = random.randint(0,90) return lista
4d8732f244449c95307e61c29343d6ade7db7c8c
692,391
import sys def analysisFinder(analysis): """ Analysis type: Initial bolt size Torque analysis Thread stripping analysis Joint analysis """ _analysis = str(analysis).lower() _analysis = _analysis.replace('analysis','') _analysis = _analysis.replace('initial','') _analysis = _analysis.replace('stripping','') _analysis = _analysis.replace('size','') _analysis = _analysis.replace(' ','') _analysis = _analysis.replace('_','') _analysis = _analysis.replace('-','') _analysis = _analysis.strip() # _bolt = ['bolt'] _torque = ['torque'] _thread = ['thread'] _joint = ['joint'] # if _analysis in _bolt: _type = 'bolt' elif _analysis in _torque: _type = 'torque' elif _analysis in _thread: _type = 'thread' elif _analysis in _joint: _type = 'joint' else: print('error analysis type {} no available'.format(analysis)) sys.exit('error analysis type {} no available'.format(analysis)) # return _type #
374147573179618c421150047f23b68b6356edfe
692,393
def wsgiapptest(environ, start_response): """ """ return '123'
105743238af0a6d20bfba36f89438dc351d3067f
692,394
def parse_fp(file): """parse .fp files. returns a 3D array (nested lists): year x stock x fishery. The original base.fp file, for instance, returns a 39x30x25 array.""" slices = file.read().strip().replace("\r", "").split("\n\n") return [ [[float(s) for s in line.split()] for line in slice.splitlines()] for slice in slices ]
d77ff4874e8887ef67081b701f807bb31af3d327
692,395
def has_transition_perm(bound_method, user): """ Returns True if model in state allows to call bound_method and user have rights on it """ if not hasattr(bound_method, "_django_fsm"): im_func = getattr(bound_method, "im_func", getattr(bound_method, "__func__")) raise TypeError("%s method is not transition" % im_func.__name__) meta = bound_method._django_fsm im_self = getattr(bound_method, "im_self", getattr(bound_method, "__self__")) current_state = meta.field.get_state(im_self) return ( meta.has_transition(current_state) and meta.conditions_met(im_self, current_state) and meta.has_transition_perm(im_self, current_state, user) )
deb8e5de0318e43890acdbd2d96f00ae5ee73f2c
692,396
from functools import reduce def excel_col_letter_to_index(x): """ Convert a 'AH','C', etc. style Excel column reference to its integer equivalent. @param x (str) The letter style Excel column reference. @return (int) The integer version of the column reference. """ return reduce(lambda s,a:s*26+ord(a)-ord('A')+1, x, 0)
800f650118c1216727be18722aa4b435ec34b56f
692,397
def _get_option_count(): """ Prompt user how many options they would like to determine from (as int). :return: int determining option count. """ try: print("Hello, I am the oracle of fate. The deliverer of destiny. I have a few questions for you... \n ") option = int(input("How many options do you have? (Input answer as nonzero integer) \n")) print("\nThat's {} options? Got it! Now let's see...".format(option)) return option except: print(" Unanticipated Problem get_option_count(). Try again? ")
3eea3fb9ac05324a2071ed4b3d7fee72e0ec23c2
692,398
def xml_filename(subject): """ This function... :param subject: :return: """ return "AtlasDefinition_{0}.xml".format(subject)
7eb3105c897a6ca4236b1ca0d02bac0820c21d04
692,399
import os def get_files_in_dir(root,verbose=False,returnsorted=True): """Recursive walk down from root dir, return list of files (end nodes). Parameters ---------- root : str Path to the root directory holding the files to be scanned. verbose : bool If True reports on progress. returnsorted : bool If True sorts the returned list of files found in root. Returns ------- """ files = [] sep = os.path.sep for dirname, subdirlist, filelist in os.walk(root): if verbose == True: print("Entering dir ", dirname) for fname in filelist: if verbose == True: print("Adding file ", fname) files.append(dirname+sep+fname) if returnsorted == True: files = sorted(files) return files
0172dfdff1b6540b5ebc726574efcf28b2ff6e55
692,400
def __checklimit__(limit): """ create a string to inject into sparql queries to limit the amount of returned results """ try: limit = int(limit) if limit > 0: return 'limit ' + str(limit) else: return '' except ValueError: return 'limit not an integer'
ca993263b27ebaca3322e74105462a1cb796987b
692,403
def _max(*args): """Returns the maximum value.""" return max(*args)
92e25771df097b8d01692011b5a87560c83055c8
692,404
def get_average(pixels): """ Given a list of pixels, finds the average red, blue, and green values Input: pixels (List[Pixel]): list of pixels to be averaged Returns: rgb (List[int]): list of average red, green, blue values across pixels respectively Assumes you are returning in the order: [red, green, blue] """ red_avg = sum([x.red for x in pixels])//len(pixels) green_avg = sum([x.green for x in pixels])//len(pixels) blue_avg = sum([x.blue for x in pixels])//len(pixels) return [red_avg, green_avg, blue_avg]
2cae3992f3a7a3d0d9ef3feb84221a4964221bbf
692,405
def print_help(): """ Print a more detailed help description """ print("") print("DETAILED HELP GOES HERE") print("") return 1
6e5772701ce8e5ac220dcfdd3265666f19d37a8b
692,406
import os import shutil def del_dir(path): """ 删除目录 :param path: 路径 :return: """ if os.path.exists(path): shutil.rmtree(path) return True return False
74ca41f9288250764a1f854dad18529ea6831a5e
692,407
def time_into_string(time): """ convert the time into a suitable string for the title""" t = time/86400. day = int(t) t1 = (t-day)*24 hour = int(t1) t2 = (t1-hour)*60 minute = int(t2) string = '%id-%i:%02i' % (day, hour, minute) string = '%05.3f' % time return string
15b1a8ce7cc3a5978b4eb22fb58cc1c2cb18d807
692,408
def distribute_atoms(atoms, n): """ split a 1D list atoms into n nearly-even-sized chunks. """ k, m = divmod(len(atoms), n) return [atoms[i*k+min(i,m) : (i+1)*k+min(i+1,m)] for i in range(n)]
12a21245f2e1cb412bdb35aadf3c7d130d11107f
692,409
import math def calc_SE(estimate_list,actual_list): """ make sure input lists are of same size """ len_list=len(actual_list) total=0 for i in range(0,len_list): total += math.pow(actual_list[i]-estimate_list[i],2) return math.sqrt(total/len_list)
94049032a1797224190c585e5d2a90a26c1f5579
692,410
def get_perm_indices_path(data_dir, data_fn): """Get path of pickled perm_indices file.""" return '{}/{}_perm_indices.pkl'.format(data_dir, data_fn)
efbb539cdb1c5bc9ad5e00560a135cd0c7608741
692,411
import os def runsh(text, print_text = True): """ Utility to run a string as a shell script and toss output """ if print_text: print("RUNNING:",text) result = os.system(text) return result
f998fd5e34fb4f6b6bcc5e443cd9d25002949a8a
692,412
def chiffre1(nb): """ Renvoie le chiffre des dizaines Prend en paramètre un numéro de dent nb """ return nb // 10 if nb >= 11 and nb <= 48 else -1
72883bcd26030e07219c01e6b2639193284a7152
692,413
def pmf2disp(pmf1, pmf2): """ Take two empirical PMF vectors with the same support and calculate the K-S stats """ cdf_1 = pmf1.cumsum() #cumsum means: Return cumulative sum over a DataFrame or Series axis. #calculate the probability of the event (pred <= theta_threshold) cdf_2 = pmf2.cumsum() diff = cdf_1 - cdf_2 diff = abs(diff) return max(diff) #return the maximum gamma(a,z).
10a30d6c4632e35ef07efd7d84481fba92b2c16e
692,414
def bytes2MB(bytes): """ Convert bytes to MB. :param bytes: number of bytes :type bytes: int :return: MiB :rtype: float """ return bytes/(1000*1000)
33a3ff86cb8422914ed08437521c1fe0b7d49610
692,415
import torch def collate_fn_customer(batch): """ 这个函数的作用是将读取到的batch中的多组数据,融合成整体 也就是增加一个batch维度 """ images = [] bboxes = [] for i, data in enumerate(batch): # data[0]为img维度 images.append(data[0]) # data[1]为bbox维度 bboxes.append(data[1]) #images类型转换:list==>torch.tensor images = torch.stack(images) batch = (images, bboxes) return batch
482d8d5a5405d2413d0caa8faf2877138d7fd2a0
692,416
def get_course_info_usage_key(course, section_key): """ Returns the usage key for the specified section's course info module. """ return course.id.make_usage_key('course_info', section_key)
7a9b661241dcfa428e7f4f19b3db0bc162fea041
692,418
import io def optimize_lossless_webp(image): """Encode image to lossless WEBP using Pillow. :param PIL.Image image: The image to encode. :returns: The encoded image's bytes. """ image_io = io.BytesIO() image.save( image_io, format="WEBP", lossless=True, quality=100, method=6, ) image_io.seek(0) return image_io.read()
11f5d9b9f2240e463494b9d395079a8884a04cc9
692,419
def plot_landmarks(axis, landmarks, **kwargs): """Plot markers at ``landmark`` locations in ``axis``.""" color = kwargs.pop('color', 'k') lbl = kwargs.pop('label', '') marker = kwargs.pop('marker','^') for x, y in landmarks: axis.plot([x], [y], marker=marker, color=color, label=lbl, **kwargs) return axis
9128afc62918fe0523312156e3618576c1e236d8
692,420
def degree_to_n_coeffs(degree): """how many coefficients has a 2d polynomial of given degree""" return int((degree+1)*(degree+2)/2.+0.5)
3558e2d68d16ffce0df31a3cc494c160f965a7ed
692,422
from typing import Tuple import colorsys def get_rgb_from_value(v: float) -> Tuple[int, int, int]: """Returns a 3-tuple of rgb values based on the input float. The input float should be between 0 and 1 and is interpreted as the hue value in an HSL to RGB color conversion. """ # colorsys returns rgb values between 0 and 1 r, g, b = colorsys.hls_to_rgb(v, 0.5, 1) # multiply by 255 to get values between 0 and 255 red = round(r * 255) green = round(g * 255) blue = round(b * 255) return red, green, blue
3ad5bdf262717f3a77fdeeaa1d4cb98fd5f5f0bb
692,423
def response_handler(response): """ Parse the response from a REST API request and return object in common format. Args: response: response object from a REST request """ try: return response.response().result except: return response
5dd30b4aebfacd5a12d067e9f1e3c62b53a47b6c
692,424
def __get_type_NHC(code): """ Get the intensity category according to the status of system defined by "National Hurricane Center". Reference: https://www.nhc.noaa.gov/data/hurdat/hurdat2-format-nov2019.pdf 0 - Subtropical cyclone of depression intensity; Subtropical cyclone of storm intensity; A low that is neither a TC, a subtropical cyclone, nor an EC; Tropical wave; Disturbuance (OTHERS, unknown intensity) 1 - Tropical depression (TD, <34 knots) 2 - Tropical storm (TS, 34-63 knots) 3 - Hurricane (HU, >64 knots) 4 - Extratropical cyclone (EC, any intensity) Parameters ---------- code : str A string code represents the type. Returns ------- re: str One of the type in ['TD', 'TS', 'HU', 'EC', 'OTHERS']. """ return code
a59d97fa473dc5faea7d8d7c61a5d5766500e6dd
692,425
def data_type(value): """ 返回数据类型 自建filter :param value: :return: the type of value """ return str(type(value).__name__)
8457cc6366e5e159892972c306fec23b61d1d26b
692,426
import os def kojak_files(): """Locations of test Kojak files""" kojak = os.path.join("tests", "data", "test.kojak.txt") intra = os.path.join("tests", "data", "test.perc.intra.txt") inter = os.path.join("tests", "data", "test.perc.inter.txt") return (kojak, inter, intra)
c32d7b8f4860130ff5f38e0c290ce1a7a592ff20
692,427
import hashlib def get_file_hash(filename): """ Return a str obj with the hex representation of the hash of the file with the given filename. (assuming the file does exist and is readable. exception is raised otherwise) """ READ_SIZE = 8192 * 4 srcfile = open(filename, 'rb') hash_func = hashlib.sha256() buf = srcfile.read(READ_SIZE) while len(buf) > 0: hash_func.update(buf) buf = srcfile.read(READ_SIZE) return hash_func.hexdigest()
fc3267259cb5515961c2730e09b30b9423bd2b00
692,428
def get_bezier4_point(point_a,control_a,control_b,point_b,t): """gives the point t(between 0 and 1) on the defined Bezier curve""" return (1-t)**3*point_a + 3*(1.-t)**2*t*control_a + 3*(1-t)*t**2*control_b + t**3*point_b
1c879a9cbb89986a03871682dbbb4fe3cf753c63
692,429
def get_model_data(wb_data, model): """ :param wb_data: 获取的表格数据 :param model: 模型名称 :return: """ rows = wb_data.nrows # 拿到模型所有的字段 keys = [field.attname for field in model._meta.fields] keys.remove('id') result = [] for row in range(1, rows): dict = {} for key, value in zip(keys, wb_data.row_values(row)): dict[key] = value result.append(dict) return result
8aa14409e55e7169c07cc4661782daac3a9d3c8a
692,430
def get_version_name(config): """ return str for model version name """ if config['read_best']: config['version'] = None config['verbose'] = True else: if config['add_flat']: fv = 'flat' + str(config['flat_nhid']) + '_' else: fv = '' if 'lstm' in config['model']: lstm_nm = 'LSTM' if config['bilstm']: lstm_nm = 'bi' + lstm_nm lstm_nm += str(config['lstm_nhid']) else: lstm_nm = '' if 'gnn' in config['model']: gnn_nm = config['gnn_name'] + str(config[config['gnn_name'] + '_nhid']) + 'out' + str(config['gnn_outdim']) else: gnn_nm = '' if config['version'] is None: # first about the model version = 'e{}{}{}'.format(config['epochs'], lstm_nm, gnn_nm) # then about the data version += fv # finally about training version += 'lr' + str(config['lr']) + ('cw_' if config['class_weights'] else '') + ('cos' if config['sch'] == 'cosine' else '') version += 'l2' + str(config['l2']) if config['ns_sizes'] != '25_10': version += 'ns' + config['ns_sizes'].replace('_', ':') if config['tag'] is not None: version += 'tag_' + config['tag'] config['version'] = version return config
355c875362bcf365142ae5d8dc2d3d02571cf7c3
692,431
import os def get_data_abs_path(): """Get the data folder path for the backend module""" test_moudule = os.path.split(os.path.abspath(__file__))[0] data_folder = os.path.join(test_moudule, "data") return data_folder
aae7c4a9f930215683f23a45dca5f8e8bd0778cf
692,432
def _parse_roles(roles): """ Example, ContosoFrontend:Standard_D1_v2:1:Standard :param roles: :return: """ if not roles: return None roles_json = [] for role in roles: terms = role.split(':') roles_json.append({ 'sku': { 'name': terms[1], 'capacity': terms[2], 'tier': terms[3] }, 'name': terms[0] }) return roles_json
011e29113be05d401b00dfc03f39605ddc9ad574
692,433
def averageMonth(inputDataFrame, year, month): """returns the average triplength of a month""" assert 'duration' in inputDataFrame.columns assert 'count' in inputDataFrame.columns selectedDataFrame = inputDataFrame[(inputDataFrame.index.month == month) & (inputDataFrame.index.year == year)] if selectedDataFrame['count'].sum() != 0: return selectedDataFrame['duration'].sum() / selectedDataFrame['count'].sum() else: return 0
c918a1de13416e4ce7a1fe217e90aac4f7a65312
692,434
def r(b, p, alpha): """ Function to calculate the r coeficient of the Massman frequency correction. """ r = ((b ** alpha) / (b ** alpha + 1)) * \ ((b ** alpha) / (b ** alpha + p ** alpha)) * \ (1 / (p ** alpha + 1)) return r
ddffac7b5af40147d6653501a72fd7c5c501a2fa
692,435
def generate_roman_number(n: int) -> str: """ Allowed roman numbers: IV, VI, IX, XI, XC, LC, XXXIX, XLI Disallowed: IIV, VIIII, XXXX, IXL, XIL """ if n > 4000: raise ValueError(f'Input too big: {n}') number_list = [ (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] string_as_list = [] for divisor, character in number_list: if n >= divisor: count, n = divmod(n, divisor) string_as_list.extend(count * [character]) return ''.join(string_as_list)
d77ea3e9d1f025f4bfad38668b649a7d53471c39
692,437
def get_value(value, value_type=None): """ This function return the value of the specified key else `None` For `strings`, the default value is the empty string. For `bytes`, the default value is empty bytes. For `bools`, the default value is false. For `numeric` types, the default value is zero. For `enums`, the default value is the first defined enum value, which must be 0. For message fields, the field is not set. Its exact value is language-dependent. See the generated code guide for details. The default value for `repeated` fields is empty (generally an empty list in the appropriate language). """ # print('\n\n----------') # print(value, value_type, type(value)) # string check if isinstance(value, str) and len(value)==0: return None elif isinstance(value, str): return value # numeric check if isinstance(value, int) and value==0: return None elif isinstance(value, int): return value
d45359e21114f84a15d6589605bf7e317f1c425d
692,438
def delete_zero_amount_exchanges(data, drop_types=None): """Drop all zero value exchanges from a list of datasets. ``drop_types`` is an optional list of strings, giving the type of exchanges to drop; default is to drop all types. Returns the modified data.""" if drop_types: dont_delete = lambda x: x["type"] not in drop_types or x["amount"] else: dont_delete = lambda x: x["amount"] for ds in data: ds["exchanges"] = list(filter(dont_delete, ds["exchanges"])) return data
8e9c214826398959b74e7bffc1e41ea635172d0a
692,439
def line_from_two_points(x1, y1, x2, y2): """ Helper function to return the equation of a line passing through any two points. :Parameters: x1: float X coordinate of first point y1: float Y coordinate of first point x2: float X coordinate of second point y2: float Y coordinate of second point :Returns: (slope, intercept) or (None, xposition) if the slope is infinite. """ xdiff = (x2-x1) if abs(xdiff) > 0.0: ydiff = (y2-y1) slope = ydiff / xdiff intercept = y1 - slope * x1 return (slope, intercept) else: return (None, x1)
8496f9b9116676d9b91bdf9277f2c1e0db34c491
692,440
def extract_objective(objective_field): """Extract the objective field id from the model structure """ if isinstance(objective_field, list): return objective_field[0] return objective_field
cc7ebd22ed4bd5619506461ce97f0d9329911881
692,441
from typing import List from typing import Dict def make_property_dict(key: str, data: List) -> Dict: """Organize real estate data for the debtor in 106 A/B :param key: The ID for the property in the form :param data: Extracted content from real estate section :return: Organized property information """ if len(data) == 10: property_id = data[9] else: property_id = "" return { "key": key, "address": data[0], "city": data[1], "state": data[2], "zip": data[3], "property_value": data[4], "your_property_value": data[5], "other": data[6], "ownership_interest": data[7], "county": data[8], "property_id": property_id, }
c41c5e11e07719bf9b60b37ddc379731c77d7492
692,442
from typing import Counter def read_kmers(self, kmerFile): """Read in a file that contains kmers and their frequencies. The length of kmer is figure out. File must have the kmer and count spearated by a single space. -kmerFile, file-like object with kmer counts """ #Initialize a kmer couts counter, sets unseen kmers to 0 kmer_counts = Counter() #Determine length of kmer k = None for line in kmerFile: line = line.rstrip() fields = line.split() #Line must have the kmer and count spearated by a single space. if len(fields) > 2: raise RuntimeError("Invalid kmerFile! There is more the one field on \ line {}".format(line)) #The kmer is the first field kmer = fields[0] #The second field must be the count and must be the same length as other kmers if k and not len(kmer) == k: raise RuntimeError("Invalid kmerFile! The second field must be the count \ and must be the same length as other kmerson line {}".format(line)) else: k = len(kmer) #The second field must be an integer if fields[1].isdigit(): count = int(fields[1]) else: raise RuntimeError("Invalid kmerFile! Count must bean int on \ line {}".format(line)) kmer_counts[kmer] = count return kmer_counts
c8c2ff50bfe9df509b8908358c1bdb32354de819
692,443
from typing import Iterable import itertools def take(bits: Iterable[str], number: int) -> str: """ >>> take('11000001', 2) '11' >>> take('110', 4) '110' """ took = "".join(itertools.islice(bits, number)) return took
83c96f8e69c29e09f30f4be186159e9504da41b5
692,445
def re_run_game() -> bool: """Function that asks for new game or stops playing""" return input("Would you like to play another game 'Y' or 'N'? ").lower().startswith("y")
f5f0a54e1ac08242dd08afc4612f5ad3f4630a7e
692,446
from typing import Tuple def tokenize_version(version_string: str) -> Tuple[int, int, int]: """Tokenize a version string to a tuple. Truncates qualifiers like ``-dev``. :param version_string: A version string :return: A tuple representing the version string >>> tokenize_version('0.1.2-dev') (0, 1, 2) """ before_dash = version_string.split('-')[0] major, minor, patch = before_dash.split('.')[:3] # take only the first 3 in case there's an extension like -dev.0 return int(major), int(minor), int(patch)
2dbca80d7fbd0e504adbdd6901f42a87452116ac
692,447
def _phony_callable(*args, **kwargs): """DOCUMENT ME!!!""" return (args, kwargs)
ac585094b274eca6964bbb69fc557f648d43aa9d
692,448
def _gr_div_ ( graph , scale ) : """Scale the graph graph = ... newg = graph / 10 """ return graph * ( 1.0 / scale )
f930e50416bf7ad1397f035e4658b069a8fb6505
692,449
def process_data(df): """ Melt the dataset to put all columns into a variables column """ df2 = df.melt( id_vars=['prfInformationCarrier', 'prfYear', 'prfNpdidInformationCarrier'], value_vars=['prfPrdOilNetMillSm3', 'prfPrdGasNetBillSm3', 'prfPrdNGLNetMillSm3', 'prfPrdCondensateNetMillSm3', 'prfPrdOeNetMillSm3', 'prfPrdProducedWaterInFieldMillSm3'] ) return df2
672604a726b97b8944f658824e8b6b19f21812a3
692,451
def indicator_contral_vars(slab_ubr,slab_index,slab,key): """ Note! slab is str,not a list indicator value """ # indicators for slab, keys and variables indicator_slab = 0 indicator_keys = 0 indicator_ubrs = [0,0] # used to break outer loop exit_flag = 0 for slab_part in slab_index: if slab in slab_part: # if ture, slab is indicated, indicator for slab = 0 indicator_slab = 0 # keep on indicate the keys for key_part in slab_part[1]: # if ture, key is indicated, indicator for key = 0 if key in key_part: # the postion of ubrs: first place = 0 # second place = first place + variable number -1(start from zeros) indicator_keys = 0 indicator_ubrs[1] = indicator_ubrs[0] + key_part[1] - 1 # since all the vars have been indicated, break out from the loop # and set exit _flag = 1 to break outer loop exit_flag = 1 break else: # if key haven't been found, key indicator + 1, search for next key indicator_keys = indicator_keys + 1 # the start postion of ubrs should change + variable number indicator_ubrs[0] = indicator_ubrs[0] + key_part[1] #v if exit_flag = 1, variable have been indcated, break out from the loop if exit_flag: break else: # slab haven't been found, slab indicator + 1, search for next slab indicator_slab = indicator_slab + 1 # indicator keys count indicator_keys = indicator_keys + len(slab_part[1]) - 1 # indicator ubrs count for key_part in slab_part[1]: indicator_ubrs[0] = indicator_ubrs[0] + key_part[1] #v if exit_flag = 1, variable have been indcated, break out from the loop if exit_flag: break # indicated_ubrs = slab_ubr[indicator_ubrs[0]:indicator_ubrs[1]+1] return [indicator_ubrs[0],indicator_ubrs[1]+1]
bf2f340a35fbcbe70af20e945a042d6abab9177b
692,452
def MVP_T2D(x,t,p): """ Defines the differential equations for the 4 compartment Kandarian Model modified for T2D (Aradottir et al. 2019) Written by: Dinesh Krishnamoorthy, May 2020 Arguments: x : vector of the state variables: x = [I_s,I_p,I_e,G] t : time p : vector of the parameters: p = [u,SI,pEGP,B,tau1,p2,pGEZI] States: I_s - Subcutaneous Insulin I_sc [U/day] I_p - Plasma Insulin I_p [U/day] I_e - Insulin effect on glucose I_eff [U/day] G - Glucose concentration in plasma [mmol/L] Input: u - exogenous insulin input [U/day] Disturbances: SI - Insulin sensitivity [1/U] pEGP - rate of endogenous glucose production [mmol/L day] B - Endogenous insulin production co-eff beta [U L/mmol day] Parameters: tau1 - time constant [day] p2 - delay in insulin action [1/day] pGEZI-rate of glucose elimination from plasma [1/day] """ I_s, I_p, I_e, G = x u, SI, pEGP, B, tau1, p2, pGEZI = p dx1 = (u - I_s)/tau1 dx2 = (I_s - I_p)/tau1 dx3 = p2*(I_p + B*G) - p2*I_e dx4 = -(pGEZI + SI*I_e)*G + pEGP f = [dx1,dx2,dx3,dx4] return f
eb3f54ff363629d90752512a2e4ac0ecff9ef14a
692,453
def add_thermo_data(thermo_data1, thermo_data2, group_additivity=False, verbose=False): """ Add the thermodynamic data `thermo_data2` to the data `thermo_data1`, and return `thermo_data1`. If `group_additivity` is True, append comments related to group additivity estimation If `verbose` is False, omit the comments from a "zero entry", whose H298, S298, and Cp are all 0. If `verbose` is True, or thermo_data2 is not a zero entry, add thermo_data2.comment to thermo_data1.comment. """ if (len(thermo_data1.Tdata.value_si) != len(thermo_data2.Tdata.value_si) or any([T1 != T2 for T1, T2 in zip(thermo_data1.Tdata.value_si, thermo_data2.Tdata.value_si)])): raise ValueError('Cannot add these ThermoData objects due to their having different temperature points.') for i in range(thermo_data1.Tdata.value_si.shape[0]): thermo_data1.Cpdata.value_si[i] += thermo_data2.Cpdata.value_si[i] thermo_data1.H298.value_si += thermo_data2.H298.value_si thermo_data1.S298.value_si += thermo_data2.S298.value_si test_zero = sum(abs(value) for value in [thermo_data2.H298.value_si, thermo_data2.S298.value_si] + thermo_data2.Cpdata.value_si.tolist()) # Used to check if all of the entries in thermo_data2 are zero if group_additivity: if verbose or test_zero != 0: # If verbose==True or test_zero!=0, add thermo_data2.comment to thermo_data1.comment. if thermo_data1.comment: thermo_data1.comment += ' + {0}'.format(thermo_data2.comment) else: thermo_data1.comment = 'Thermo group additivity estimation: ' + thermo_data2.comment return thermo_data1
615cc306d2536e24f71b18c3adbf90e5948c0160
692,455
def calcMean(data): """\ Calculates statistical mean. :param data: List of values :returns: the mean of a list of values. """ return sum(data)/float(len(data))
e4d97d6bd23eb5b2b4a1568a95d51f8eda7ad75e
692,456
def getCellDict(self, pos): """ Gets a dictionary representing the cell in the ith row and jth column. Keywords: "color", "strength", "resources", "type", "isdisabled", "adjacent" """ self.__checkIndices__(pos) i, j = pos; celldict = {} celldict['color'] = self.__colors__[i][j] celldict['strength'] = self.__strengths__[i][j] celldict['resources'] = self.__resources__[i][j] celldict['type'] = self.__types__[i][j] celldict['isdisabled'] = self.__isdisabled__[i][j] celldict['adjacent'] = self.__adjacent__[i][j] return celldict
c9c9a029b0f1464f7e8f61d12faa7c966d826740
692,457
def check_true(string): """ Check if an English string seems to contain truth. Return a boolean Default to returning a False value unless truth is found. """ string = string.lower() if string in ['true', 'yes', 'y', '1', 'yep', 'yeah']: return True else: return False
bb6d9a4e3b1d92216a67187e69973b59bb67f83d
692,458
def enumerate_keyed_param(param, values): """ Given a param string and a dict of values, returns a flat dict of keyed, enumerated params. Each dict in the values list must pertain to a single item and its data points. Example: param = "InboundShipmentPlanRequestItems.member" values = [ {'SellerSKU': 'Football2415', 'Quantity': 3}, {'SellerSKU': 'TeeballBall3251', 'Quantity': 5}, ... ] Returns: { 'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415', 'InboundShipmentPlanRequestItems.member.1.Quantity': 3, 'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251', 'InboundShipmentPlanRequestItems.member.2.Quantity': 5, ... } """ if not values: # Shortcut for empty values return {} if not param.endswith('.'): # Ensure the enumerated param ends in '.' param += '.' if not isinstance(values, (list, tuple, set)): # If it's a single value, convert it to a list first values = [values, ] for val in values: # Every value in the list must be a dict. if not isinstance(val, dict): # Value is not a dict: can't work on it here. raise ValueError(( "Non-dict value detected. " "`values` must be a list, tuple, or set; containing only dicts." )) params = {} for idx, val_dict in enumerate(values): # Build the final output. params.update({ '{param}{idx}.{key}'.format(param=param, idx=idx+1, key=k): v for k, v in val_dict.items() }) return params
20d3120e45db04c30ae44e4c97440ea6b7757b86
692,459
import socket import sys def ad_server_address_list(ad_domain): """Generate AD Server IPAddress List""" try: getaddrinfo_data = socket.getaddrinfo(ad_domain, 389, 0, socket.SOCK_STREAM) except socket.gaierror: print ("Name service failure For Domain: " +ad_domain+ "\nExiting....!") sys.exit(1) ad_server_ipaddr_list = [] for info in getaddrinfo_data: ipaddr = info[4][0] if ipaddr not in ad_server_ipaddr_list: ad_server_ipaddr_list.append(ipaddr) return ad_server_ipaddr_list
a30c4852523d1a0c992a9f8cd6fd8ea0598aa69b
692,460
import re def is_phone_number(input_string): """ the U.S. Phone number format standard is from https://en.wikipedia.org/wiki/North_American_Numbering_Plan """ num_pattern=re.compile(r''' ^\(?[\d]{3}\)?[\s-]? #area code [\d]{3}[\s-]? #central office code [\d]{4}$ #subscriber number ''',re.X) is_valid=num_pattern.search(input_string); if is_valid:return True else:return False
1a43610f4c9aec4b682a6d16a1edc3a21ee6686a
692,462
def get_universal_deps_indices(): """ See `https://spacy.io/api/annotation#dependency-parsing` for the list :return: """ tags = ["acl", "advcl", "advmod", "amod", "appos", "aux", "case", "cc", "ccomp", "clf", "compound", "conj", "cop", "csubj", "dep", "det", "discourse", "dislocated", "expl", "fixed", "flat", "goeswith", "iobj", "list", "mark", "nmod", "nsubj", "nummod", "obj", "obl", "orphan", "parataxis", "punct", "reparandum", "root", "vocative", "xcomp"] spacy_deps = ['', 'ROOT', 'acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp', 'compound', 'conj', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'dobj', 'expl', 'intj', 'mark', 'meta', 'neg', 'nmod', 'npadvmod', 'nsubj', 'nsubjpass', 'nummod', 'oprd', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj', 'predet', 'prep', 'prt', 'punct', 'quantmod', 'relcl', 'xcomp'] snlp_deps = ['compound:prt', 'nmod:poss', 'tmod', 'pass', 'O'] tags = tags + spacy_deps + snlp_deps tags = list(map(lambda x: x.lower(), tags)) tags = list(set(tags)) return tags
69e0c9433331ade6dc50378db03d52be45cdd969
692,464
import string import secrets def gen_rand_password(length): """ Returns a random number of specified character length. """ alphabet = string.ascii_letters + string.digits while True: password = ''.join(secrets.choice(alphabet) for i in range(length)) if (any(c.islower() for c in password) and any(c.isupper() for c in password) and sum(c.isdigit() for c in password) >= 3): break return password
c6d216a2842e53ccea3d1f3ddb9f6174f284c6d8
692,466
def error_in_query(q, task, log): """call after every q.exec_ to check for errors; logs error and problematic query, returns error message for QMessagebox if error found, False if no error found """ lasterr = q.lastError() if lasterr.isValid(): msg = "An error occurred while {}:".format(task) log.error(msg) log.error('FAILED QUERY: "{}"'.format(q.lastQuery())) return msg + "\n\n{}".format(lasterr.text()) else: return False
44701d315d37630940b167810c9e7de562ad4e37
692,467
def platform(request): """Fixture to change platform based on pytest parameter""" request.getfixturevalue("change_homedir") return request.getfixturevalue(request.param)
e3d896414157a200b0786a88f06d2592093febcf
692,468
import six def let(__context__, *args, **kwargs): """:yaql:let Returns context object where args are stored with 1-based indexes and kwargs values are stored with appropriate keys. :signature: let([args], {kwargs}) :arg [args]: values to be stored under appropriate numbers $1, $2, ... :argType [args]: chain of any values :arg {kwargs}: values to be stored under appropriate keys :argType {kwargs}: chain of mappings :returnType: context object .. code:: yaql> let(1, 2, a => 3, b => 4) -> $1 + $a + $2 + $b 10 """ for i, value in enumerate(args, 1): __context__[str(i)] = value for key, value in six.iteritems(kwargs): __context__[key] = value return __context__
c1c1e55b6b514ea88594f8126c7ced7aa8b1d2e5
692,469
def outlier_removal_null(dataframe, colname, low_cut, high_cut): """Replace outliers with empty values on dataframe[colname]""" col = dataframe[colname] dataframe.loc[ col.apply( lambda x: isinstance(x, (int, float)) and (x < low_cut or x > high_cut) ), colname, ] = None return dataframe
ace03738c7d2e3482a8473e0632a65f428b0f4fd
692,470
def get_date_shortcode(date_str): """ Get shortcode for the standard date strings, to use in submodel names """ if date_str == "std_contest": return "SC" elif date_str == "std_contest_daily": return "SCD" elif date_str == "std_future": return "SF" elif date_str == "std_test": return "ST" elif date_str == "std_val": return "SV" elif date_str == "std_contest_eval": return "SCE" elif date_str == "std_contest_eval_daily": return "SCED" elif date_str == "std_paper": return "SP" else: return date_str
af0aaa57e40972d6f6ea6b1a7960f3ae8d0cbbdf
692,471
import requests def check_ssl(url): """Check if the ssl certificate is valid.""" try: requests.get(url, verify=True, timeout=3) return True except Exception: return False
3f0e2015d9f11f3f49b83d9799cd511deabd7c51
692,472
from typing import Counter def count_words(words, strip_chars=None, word_delimiter=" ", make_lower=True): """count_words returns a Counter object built on `words` after stripping selected characters and tokenizing words with a chosen delimeter.""" if make_lower: words = words.lower() if strip_chars: char_strip_map = "".maketrans(dict.fromkeys(strip_chars)) words = words.translate(char_strip_map) word_counts = Counter(w for w in words.split(word_delimiter) if w) return word_counts
9bb068c9fdb95838c65f822c4f955ed11c191396
692,473
def cal_confidence(antecedents_support, combination_support): """ calculate confidence of antecedents and consequents Parameters ---------- antecedents_support : float support of antecedents. for example : - 0.43 combination_support : float support of combination. for example : - 0.35 Returns ------- confidence of antecedents and combination. for example : = 0.35 / 0.43 = 0.813 """ try: confidence = combination_support / antecedents_support return round(confidence, 3) except ZeroDivisionError: raise ValueError("antecedents support supposed not be zero !")
998d517448139e94658fd3b327e6e3b9303c32ee
692,474
import re def is_ice(structure): """ Returns True if pure ice is suspected. Args ---- structure: pymatgen.core.Structure Pymatgen Structure object. """ if re.sub(" ","",structure.composition.alphabetical_formula) == "H2O": return True return False
5b6a812ad410497628c3d955addf2f62d40e2a27
692,476
def multi_pMethod(args): """ Runs the pMethod function and returns the results plus the id of the node """ id, pMethod, dataset1, dataset2 = args worst_pvalue, best_pvalue, worst_sim_score, best_sim_score, worst_rep_1, worst_rep_2, best_rep_1, best_rep_2 = pMethod(dataset1, dataset2) return id, worst_pvalue, best_pvalue, worst_sim_score, best_sim_score, worst_rep_1, worst_rep_2, best_rep_1, best_rep_2
70677e23cd24f869ee3943265c0aa4aa7f2c421e
692,477
def lensort(lst): """ >>> lensort(['python', 'perl', 'java', 'c', 'haskell', 'ruby']) ['c', 'perl', 'java', 'ruby', 'python', 'haskell'] """ return sorted(lst, key=lambda x: len(x))
f0116b5b2d7735c1bd39481ff333e3c7a3d60474
692,478
def get_hemisphere(lat): """For a given latitude, return N or S.""" if lat < 0.0: return 'S' else: return 'N'
7a70a73e41927c286dfc6514061d1fccadadaabe
692,479
from typing import List import re def purify(lines: List[str]) -> List[str]: """Remove redundant white spaces from list of lines""" lines_stripped = [re.sub(r'\s+', ' ', line).strip() for line in lines] return [line for line in lines_stripped if line]
f3791adccd60092d2449578e14dadccade7f9d00
692,481
def get_all_trace_filenames(variables: list): """Create a list of ALL original TraCE-21ka NetCDF filenames. Args: variables: List with the CAM variable names. Returns: A list of strings with the TraCE filenames. """ result = list() for v in variables: result += ['trace.01.22000-20001BP.cam2.h0.%s.0000101-0200012.nc' % v, 'trace.02.20000-19001BP.cam2.h0.%s.0200101-0300012.nc' % v, 'trace.03.19000-18501BP.cam2.h0.%s.0300101-0350012.nc' % v, 'trace.04.18500-18401BP.cam2.h0.%s.0350101-0360012.nc' % v, 'trace.05.18400-17501BP.cam2.h0.%s.0360101-0450012.nc' % v, 'trace.06.17500-17001BP.cam2.h0.%s.0450101-0500012.nc' % v, 'trace.07.17000-16001BP.cam2.h0.%s.0500101-0600012.nc' % v, 'trace.08.16000-15001BP.cam2.h0.%s.0600101-0700012.nc' % v, 'trace.09.15000-14901BP.cam2.h0.%s.0700101-0710012.nc' % v, 'trace.10.14900-14351BP.cam2.h0.%s.0710101-0765012.nc' % v, 'trace.11.14350-13871BP.cam2.h0.%s.0765101-0813012.nc' % v, 'trace.12.13870-13101BP.cam2.h0.%s.0813101-0890012.nc' % v, 'trace.13.13100-12901BP.cam2.h0.%s.0890101-0910012.nc' % v, 'trace.14.12900-12501BP.cam2.h0.%s.0910101-0950012.nc' % v, 'trace.15.12500-12001BP.cam2.h0.%s.0950101-1000012.nc' % v, 'trace.16.12000-11701BP.cam2.h0.%s.1000101-1030012.nc' % v, 'trace.17.11700-11301BP.cam2.h0.%s.1030101-1070012.nc' % v, 'trace.18.11300-10801BP.cam2.h0.%s.1070101-1120012.nc' % v, 'trace.19.10800-10201BP.cam2.h0.%s.1120101-1180012.nc' % v, 'trace.20.10200-09701BP.cam2.h0.%s.1180101-1230012.nc' % v, 'trace.21.09700-09201BP.cam2.h0.%s.1230101-1280012.nc' % v, 'trace.22.09200-08701BP.cam2.h0.%s.1280101-1330012.nc' % v, 'trace.23.08700-08501BP.cam2.h0.%s.1330101-1350012.nc' % v, 'trace.24.08500-08001BP.cam2.h0.%s.1350101-1400012.nc' % v, 'trace.25.08000-07601BP.cam2.h0.%s.1400101-1440012.nc' % v, 'trace.26.07600-07201BP.cam2.h0.%s.1440101-1480012.nc' % v, 'trace.27.07200-06701BP.cam2.h0.%s.1480101-1530012.nc' % v, 'trace.28.06700-06201BP.cam2.h0.%s.1530101-1580012.nc' % v, 'trace.29.06200-05701BP.cam2.h0.%s.1580101-1630012.nc' % v, 'trace.30.05700-05001BP.cam2.h0.%s.1630101-1700012.nc' % v, 'trace.31.05000-04001BP.cam2.h0.%s.1700101-1800012.nc' % v, 'trace.32.04000-03201BP.cam2.h0.%s.1800101-1880012.nc' % v, 'trace.33.03200-02401BP.cam2.h0.%s.1880101-1960012.nc' % v, 'trace.34.02400-01401BP.cam2.h0.%s.1960101-2060012.nc' % v, 'trace.35.01400-00401BP.cam2.h0.%s.2060101-2160012.nc' % v, 'trace.36.400BP-1990CE.cam2.h0.%s.2160101-2204012.nc' % v] return result
1bf1ed3bca0e66cc6f6a171f91e2e2752968eee8
692,482
import os def get_generic_intensity_QC_TSV_path(cli_args, contrast): """ :param cli_args: Dictionary containing all command-line arguments from user :param contrast: Integer, the contrast number to get the path for :return: String, the path to a .tsv file for one contrast and one path, but with a unique part of that .tsv filename replaced by '{}' """ return os.path.join(cli_args['output'], cli_args['task'] + ( '_contrast_{}'.format(contrast) + '_Intensity_QC{}.tsv' ))
bf8b2b8339ff3d74d491678fab1430bef91c6f2e
692,483
def list_manipulation(s): """ To transform the input word 's' to list form """ s_list = [] for ch in s: s_list.append(ch) return s_list
bc4a5bf4c2133acc711f025b1b7287b93817d191
692,484
import os def relative_filename_within_dir(filename: str, directory: str) -> str: """ Starting with a (typically absolute) ``filename``, returns the part of the filename that is relative to the directory ``directory``. If the file is *not* within the directory, returns an empty string. """ filename = os.path.abspath(filename) directory = os.path.abspath(directory) if os.path.commonpath([directory, filename]) != directory: # Filename is not within directory return "" return os.path.relpath(filename, start=directory)
1a2001d684fab4b78f5fed9354939d2b2cb9c6cb
692,485
import os def xnormpath(path): """ Cross-platform version of os.path.normpath """ return os.path.normpath(path).replace(os.sep, '/')
4a1ec76725780b955438884c0daa16f916dac23d
692,486
import hashlib from datetime import datetime def get_unique_job_id() -> str: """ Returns a 64 hex-character (lowercase) string. e.g., 'e2cddf55dc410ec584d647157388e96f22bf7b60d900e79afd1c56e27aa0e417' :return string: """ job_id = hashlib.sha256(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f').encode('utf-8')).hexdigest() # We no longer use TxJob so can't check it for duplicates # (but could theoretically check the preconvert bucket since job_id.zip is saved there). #while TxJob.get(job_id): #job_id = hashlib.sha256(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f').encode('utf-8')).hexdigest() return job_id
3ca19bcc3ee819f0f58629149399103603a15741
692,487
import os import sys def validate_args(args): """Do sanity checking and validation on provided input arguments.""" if not os.path.isdir(os.path.expanduser(args.input_dir)): sys.exit("Input path provided is not a directory: %s" % args.input_dir) if os.path.exists(os.path.expanduser(args.output_dir)): if args.overwrite: print("WARNING: Will overwrite output dir: %s" % args.output_dir) else: sys.exit( "Output path already exists: %s\nUse --overwrite to replace contents " "of output path with converted files." % args.output_dir ) try: int(args.property_order_increment) except TypeError: sys.exit("Property order increment must be a positive integer or 0.") return args
6fd8ebcad555538fc1b36fd676d8337f9a91b5ce
692,488
def ones(n): """ Returns a sequence of ones with n elements. @type n: number @param n: length of sequence @rtype: list @return: sequence """ return [1.0] * n
fa1674f272e2e573e0dd41bd8ca6746b8fb4af69
692,489
def sort_protein_group(pgroup, sortfunctions, sortfunc_index): """Recursive function that sorts protein group by a number of sorting functions.""" pgroup_out = [] subgroups = sortfunctions[sortfunc_index](pgroup) sortfunc_index += 1 for subgroup in subgroups: if len(subgroup) > 1 and sortfunc_index < len(sortfunctions): pgroup_out.extend(sort_protein_group(subgroup, sortfunctions, sortfunc_index)) else: pgroup_out.extend(subgroup) return pgroup_out
f5ab92363c5a8406421470a817f49a16c43b7a90
692,490
import unicodedata def standardize_cinema_name(cinema_name): """ standardize cinema name with related info like screen count and conuty """ # first, make sure only half width charaters left cinema_name = unicodedata.normalize('NFKC', cinema_name) # second, remove all spaces cinema_name = cinema_name.replace(' ', '') return cinema_name
dcce8c9898a2476a084e9092b18ef74b546923e1
692,491
def returns(df): """ daily return """ return (df/df.shift(1)-1)
6f55bd6de52d3d348d418c578b5b049c56dc9079
692,492
def _str_to_type(obj, strtype): """Helper for ansiformat and colorize""" if isinstance(obj, type(strtype)): return obj return obj.encode('ascii')
353cc93d260c47a3a6dd12a9159d4691731d07a3
692,493
def GetScraper(version): """Returns the scraper module for the given version. Args: version: version string of IE, or None for most recent Returns: scrape module for given version """ # Pychecker will warn that the parameter is unused; we only # support one version of Firefox at this time # We only have one version of the Firefox scraper for now return __import__("firefox2", globals(), locals(), [''])
fc0ab42a4b36d32bbb6c7779a3d3aa400143ed21
692,494
def get_median_two_sorted_arrays_merge_sort(arr1, arr2): """ Time complexity: O(m+n) Space complexity: O(n) Args: arr1: arr2: Returns: """ new_arr = [] i = 0 j = 0 while i < len(arr1) and j < len(arr2): if arr1[i] < arr2[j]: new_arr.append(arr1[i]) i += 1 else: new_arr.append(arr2[j]) j += 1 while i < len(arr1): new_arr.append(arr1[i]) i += 1 while j < len(arr2): new_arr.append(arr2[j]) j += 1 N = len(new_arr) mid = len(new_arr)//2 if N %2 == 0: return (new_arr[mid] + new_arr[mid-1])/2 else: return new_arr[mid]
42e4d5793f3b3ee9ac1989326a480b6c439e84fd
692,495
def unpack_forestryDB(objects): """unpacks hierarchical structure of Forestry DB objects into a easily savable format Args: objects (dict): Forestry DB nested dict object Returns: values (list): list of values saved in dict object """ values = [ objects['properties']['ig_test'], objects['properties']['ig_date'], objects['properties']['created'], objects['properties']['id'], objects['properties']['ig_time'], objects['properties']['ig_confidence'], objects['properties']['ig_identity'], objects['geometry']['coordinates'][0], objects['geometry']['coordinates'][1], objects['geometry']['type'], objects['type'] ] return values
7bd18a16de489a3ff39d6d207f26f5b25d8359a1
692,496
def getDictionary(file_name): """ Read and split data linewise """ file_object = open(file_name, 'r') data = file_object.read().splitlines() """ Initialize dictionary to store the mapping """ dictionary = {} index = 0 for entity in data: """ Assign unique index to every entity """ dictionary[entity] = index index += 1 """ Number of entries in the data file """ num_entries = index return dictionary, num_entries
2fdc0f6412ff61b8f96835e2e60edad0255c42ac
692,497
import math import itertools def _get_aline_range(aline, per_val, section_stride=1): """ Args: aline (int): number of seismic sections in the inline or crossline directions per_val (float): the fraction of the volume to use for validation. Defaults to 0.2. section_stride (int): the stride of the sections in the training data. If greater than 1, this function will skip (section_stride-1) between each section Defaults to 1, do not skip any section. """ try: if section_stride < 1: raise ValueError("section_stride cannot be zero or a negative number") if per_val < 0 or per_val >= 1: raise ValueError("Validation percentage (per_val) should be a number in the range [0,1).") val_aline = math.floor(aline * per_val / 2) val_range = itertools.chain(range(0, val_aline), range(aline - val_aline, aline)) train_range = range(val_aline, aline - val_aline, section_stride) return train_range, val_range except (Exception, ValueError): raise
87bacc83011feda24c1eb56a148bf6364f4e9fd6
692,498
import os def picklepaths(root: str) -> list: """ Create a list of file paths to files that end in .p in the given directory. Args: root: directory path Returns: sorted list of pickle file paths """ return [os.path.join(root, f) for f in sorted(os.listdir(root)) if f[-2:] == '.p']
83e6f600370f3eaeaa2ee43bd8767e12dddc423f
692,499
def can_convert(s1, s2): """Convert 2 strings of same length by doing zero or more conversions""" if s1 == s2: return True dp = {} for i, j in zip(s1, s2): if dp.setdefault(i, j) != j: return False return len(set(s2)) < 26
a726acddc068299b92a77e7677224ee53d8fc8a2
692,500
def CT_to_class(CT): """ Converts a CT layer from raw sea ice concentrations (0-100) to class ids (0-10). """ CTs = list(range(0, 110, 10)) class_ids = list(range(0, 11, 1)) for i in range(len(CTs)): CT[CT == CTs[i]] = class_ids[i] return CT
7d212e89bea2c3b4603018e065a826a78a2d66fe
692,502
from typing import DefaultDict from typing import Tuple import copy from typing import List def laplace_smooth_cmd_counts( seq1_counts: DefaultDict[str, int], seq2_counts: DefaultDict[str, DefaultDict[str, int]], start_token: str, end_token: str, unk_token: str, ) -> Tuple[DefaultDict[str, int], DefaultDict[str, DefaultDict[str, int]]]: """ Apply laplace smoothing to the input counts for the cmds. In particular, add 1 to each of the counts, including the unk_token. By including the unk_token, we can handle unseen commands. Parameters ---------- seq1_counts: DefaultDict[str, int] individual command counts seq2_counts: DefaultDict[str, DefaultDict[str, int]] sequence command (length 2) counts start_token: str dummy command to signify the start of a session (e.g. "##START##") end_token: str dummy command to signify the end of a session (e.g. "##END##") unk_token: str dummy command to signify an unseen command (e.g. "##UNK##") Returns ------- tuple of laplace smoothed counts: individual command counts, sequence command (length 2) counts """ seq1_counts_ls = copy.deepcopy(seq1_counts) seq2_counts_ls = copy.deepcopy(seq2_counts) cmds: List[str] = list(seq1_counts_ls.keys()) + [unk_token] for cmd1 in cmds: for cmd2 in cmds: if cmd1 != end_token and cmd2 != start_token: seq1_counts_ls[cmd1] += 1 seq2_counts_ls[cmd1][cmd2] += 1 seq1_counts_ls[cmd2] += 1 return seq1_counts_ls, seq2_counts_ls
3ca9dbc3da5418944d66e9c8da8c770da6b30a05
692,503
def fully_qualified_symbol_name(name) -> bool: """ Checks if `name` is a fully qualified symbol name. """ return ( isinstance(name, str) and "`" in name and not name.startswith("`") and not name.endswith("`") and "``" not in name )
11fdb71d9a733c5669618dd2fde44d53379f6b54
692,504
import os import torch def resumecheckpoint(resume, net, optimizer): """Optionally resume from a checkpoint""" start_epoch = 0 prec = 0 if resume: if os.path.isfile(resume): print("=> loading checkpoint '{}'".format(resume)) checkpoint = torch.load(resume) start_epoch = checkpoint['epoch'] prec = checkpoint['prec'] net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(resume)) return start_epoch, prec
0eb20c00389e935ce55ad3301e3d17b27014e2aa
692,505