content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import re def available_mem(cores, mem, fmtstring=True): """Calculate available memory for a process Params: cores (int): number of cores mem (str): set memory as string with conversion (M, G, g) fmtstring (bool): return memory as formatted string """ prefix = "G" m = re.match("[0-9]+([a-zA-Z]*)", str(mem)) if m: prefix = m.groups()[0] requested_mem_per_core = int(re.sub("[a-zA-Z]*", "", str(mem))) core_mem = mem_per_core(prefix) requested_cores = min(cores, available_cpu_count()) mem = min(requested_cores * core_mem, requested_cores * requested_mem_per_core) if fmtstring: return "{}{}".format(mem, prefix) else: return mem
241ad5133917d84b78bc0670fc974184b88e3978
3,640,411
def normalize_v(v): """ Normalize velocity to [-1, 1]. Ref: https://github.com/microsoft/AirSim-Drone-Racing-VAE-Imitation/blob/e651be52ff8274c9f595e88b13fe42d51302403d/racing_utils/dataset_utils.py#L20 """ # normalization of velocities from whatever to [-1, 1] range v_x_range = [-1, 7] v_y_range = [-3, 3] v_z_range = [-3, 3] v_yaw_range = [-1, 1] if len(v.shape) == 1: # means that it's a 1D vector of velocities v[0] = 2.0 * (v[0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0 v[1] = 2.0 * (v[1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0 v[2] = 2.0 * (v[2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0 v[3] = 2.0 * (v[3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0 elif len(v.shape) == 2: # means that it's a 2D vector of velocities v[:, 0] = 2.0 * (v[:, 0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0 v[:, 1] = 2.0 * (v[:, 1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0 v[:, 2] = 2.0 * (v[:, 2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0 v[:, 3] = 2.0 * (v[:, 3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0 else: raise Exception('Error in data format of V shape: {}'.format(v.shape)) return v # Note: The version used in Shuang's code base is below, which should be equivalent to the above version. # self.targets[:, 0] = 2. * (self.targets[:, 0] + 1.) / (7. + 1.) - 1. # self.targets[:, 1] = 2. * (self.targets[:, 1] + 3.) / (3. + 3.) - 1. # self.targets[:, 2] = 2. * (self.targets[:, 2] + 3.) / (3. + 3.) - 1. # self.targets[:, 3] = 2. * (self.targets[:, 3] + 1.) / (1. + 1.) - 1.
cd47c8d3498e677a1f566b64199224f23a4b5896
3,640,412
def allele_counts_dataframe(read_evidence_generator): """ Creates a DataFrame containing number of reads supporting the ref vs. alt alleles for each variant. """ return dataframe_from_generator( element_class=ReadEvidence, variant_and_elements_generator=read_evidence_generator, # DataFrameBuilder will take the length of these fields' values rename_dict={ "ref_reads": "num_ref_reads", "alt_reads": "num_alt_reads", "other_reads": "num_other_reads", }, extra_column_fns={ "num_ref_fragments": lambda _, x: len(x.ref_read_names), "num_alt_fragments": lambda _, x: len(x.alt_read_names), "num_other_fragments": lambda _, x: len(x.other_read_names) })
dd76b3b168977eb185ba1205a75ba4642becc913
3,640,413
def validate_rule_paths(sched: schedule.Schedule) -> schedule.Schedule: """A validator to be run after schedule creation to ensure each path contains at least one rule with an expression or value. A ValueError is raised when this check fails.""" for path in sched.unfold(): if path.is_final and not list(path.rules_with_expr_or_value): raise ValueError( "No expression or value specified along the path {}." .format(path) ) return sched
2ddfd6f9607687f6a3e3c955ed3470913fdf14bd
3,640,414
def spiralcontrolpointsvert( x: int, y: int, step: int, growthfactor: float, turns: int): """Returns a list[(int, int)] of 2D vertices along a path defined by a square spiral Args: x, y: int centerpoint coordinates step: int step increment growthfactor: float multiplier to step increment to make exponential spirals turns: number of turns of the spiral Yields: list of vertices of the spiral list[[x: int, y: int]] """ v = [[x, y]] inc = step while turns > 0: x += step v.append([x, y]) step += inc y += step v.append([x, y]) step += inc x -= step v.append([x, y]) step += inc y -= step v.append([x, y]) turns -= 1 step += inc inc *= growthfactor return v
de9f577ed826b227d44b69e638dd33e08ea9c430
3,640,415
def validate_dependencies(): """Validate external dependencies. This function does NOT have to exist. If it does exist the runtime will call and execute it during api initialization. The purpose of this function is to verify that external dependencies required to auto-generate a problem are properly installed and configured on this system. Some common tasks that may be performed are checking that a certain program is installed (such as 'javac') and that it is executable. You may also want to verify that template files that the generator modifies exist in the templates/ directory. If any dependency check fails the function should print out the respective error message and return False. If all checks pass correctly the function should return True. If the function does not exist the API initializer will assume that all dependencies are met and will add the generator to the pre-fetched generator list assuming there is an auto-generated problem in the database that has the given generator set for it's 'generator' field. The following code demonstrates how to check that the java compiler (javac) is present on the system and can be executed by the current user. """ print "DEPENDENCY CHECK - TEMPLATE.py (TEMPLATE)" javac_path = "/usr/bin/javac" # This should have scope across the entire module but doesn't for template purposes if not os.path.exists(javac_path): print "ERROR - TEMPLATE - The specified java compiler (%s) does not appear to exist." % javac_path return False if not os.access(javac_path, os.X_OK): print "ERROR - TEMPLATE - javac is not executable by the python runtime." return False return True
0bea4c7bbf6198bf18514233c902f5bfa62dc8f8
3,640,417
def find_closest_vertex(desired_hop, available_vertices): """ Find the closest downstream (greater than or equal) vertex in availbale vertices. If nothing exists, then return -1. Keyword arguments: desired_hop -- float representing the desired hop location available_location -- np array of available vertices in model Returns: vertex -- the closest available vertex that is >= desired_hop """ available_vertices = np.sort(available_vertices) forward_vertices = available_vertices[available_vertices >= desired_hop] if forward_vertices.size < 1: vertex = -1 else: vertex = forward_vertices[0] return vertex
c0aa85238faf58ff30cc937bf73b53ce2cc0ee48
3,640,418
def second_smallest(numbers): """Find second smallest element of numbers.""" m1, m2 = float('inf'), float('inf') for x in numbers: if x <= m1: m1, m2 = x, m1 elif x < m2: m2 = x return m2
0ca7b297da68651e4a8b56377e08f09d4d82cfb7
3,640,419
import numpy def calc_sft_ccs_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data: bool = False): """Calculate structure factor tensor in CCS (X||a*, Z||c) based on the information given in dictionary. Output information is written in the same dictionary. """ dict_crystal_keys = dict_crystal.keys() dict_in_out_keys = dict_in_out.keys() necessary_crystal_keys = set(["unit_cell_parameters", ]) diff_set_crystal = necessary_crystal_keys.difference(set(dict_crystal_keys)) if len(diff_set_crystal) != 0: raise AttributeError(f"The following attributes have to be defined {diff_set_crystal:}") flag_reduced_symm_elems = len(set(["reduced_symm_elems", "centrosymmetry", "translation_elems"]).difference(set(dict_crystal_keys))) == 0 flag_full_symm_elems = len(set(["full_symm_elems", ]).difference(set(dict_crystal_keys))) == 0 flag_full_mcif_elems = len(set(["full_mcif_elems", ]).difference(set(dict_crystal_keys))) == 0 if not(flag_reduced_symm_elems or flag_full_symm_elems or flag_full_mcif_elems): raise AttributeError("The symmetry elements have to be defined.") necessary_in_out_keys = set(["index_hkl", ]) diff_set_in_out = necessary_in_out_keys.difference(set(dict_in_out_keys)) if len(diff_set_in_out) != 0: raise AttributeError(f"The following attributes have to be defined {diff_set_in_out:}") index_hkl = dict_in_out["index_hkl"] non_zero_keys = set(["mag_atom_lande_factor", "mag_atom_kappa", "mag_atom_j0_parameters", "mag_atom_j2_parameters"]) diff_set_crystal = non_zero_keys.difference(set(dict_crystal_keys)) if len(diff_set_crystal) != 0: sft_ccs = numpy.zeros((9, index_hkl.shape[-1]), dtype=complex) dder = {} return sft_ccs, dder if "flag_only_orbital" in dict_in_out_keys: flag_only_orbital = dict_in_out["flag_only_orbital"] else: flag_only_orbital = False if flag_reduced_symm_elems: reduced_symm_elems = dict_crystal["reduced_symm_elems"] centrosymmetry = dict_crystal["centrosymmetry"] if centrosymmetry: centrosymmetry_position = dict_crystal["centrosymmetry_position"] else: centrosymmetry_position = None translation_elems = dict_crystal["translation_elems"] elif flag_full_symm_elems: full_symm_elems = dict_crystal["full_symm_elems"] reduced_symm_elems = full_symm_elems centrosymmetry = False centrosymmetry_position = None translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int) elif flag_full_mcif_elems: full_mcif_elems = dict_crystal["full_mcif_elems"] reduced_symm_elems = full_mcif_elems[:13] centrosymmetry = False centrosymmetry_position = None translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int) unit_cell_parameters = dict_crystal["unit_cell_parameters"] atom_para_index = dict_crystal["atom_para_index"] atom_para_fract_xyz = dict_crystal["atom_fract_xyz"][:, atom_para_index] atom_para_sc_fract = dict_crystal["atom_site_sc_fract"][:, atom_para_index] atom_para_sc_b = dict_crystal["atom_site_sc_b"][:, atom_para_index] atom_para_fract_xyz = calc_m_v( atom_para_sc_fract, numpy.mod(atom_para_fract_xyz, 1), flag_m=False, flag_v=False)[0] + atom_para_sc_b atom_para_occupancy = dict_crystal["atom_occupancy"][atom_para_index] atom_para_b_iso = dict_crystal["atom_b_iso"][atom_para_index] atom_beta = dict_crystal["atom_beta"] if "atom_site_aniso_sc_beta" in dict_crystal_keys: atom_site_aniso_sc_beta = dict_crystal["atom_site_aniso_sc_beta"] atom_site_aniso_index = dict_crystal["atom_site_aniso_index"] atom_sc_beta = numpy.zeros((6,)+atom_beta.shape, dtype=float) atom_sc_beta[:, :, atom_site_aniso_index] = atom_site_aniso_sc_beta atom_beta = (atom_sc_beta*numpy.expand_dims(atom_beta, axis=0)).sum(axis=1) atom_para_beta = atom_beta[:, atom_para_index] mag_atom_para_index = dict_crystal["mag_atom_para_index"] atom_para_lande_factor = dict_crystal["mag_atom_lande_factor"][mag_atom_para_index] atom_para_kappa = dict_crystal["mag_atom_kappa"][mag_atom_para_index] atom_para_j0_parameters = dict_crystal["mag_atom_j0_parameters"][:, mag_atom_para_index] atom_para_j2_parameters = dict_crystal["mag_atom_j2_parameters"][:, mag_atom_para_index] atom_para_susceptibility = dict_crystal["atom_para_susceptibility"] atom_para_sc_chi = dict_crystal["atom_para_sc_chi"] flag_unit_cell_parameters = numpy.any(dict_crystal["flags_unit_cell_parameters"]) flag_atom_para_fract_xyz = numpy.any(dict_crystal["flags_atom_fract_xyz"][:, atom_para_index]) flag_atom_para_occupancy = numpy.any(dict_crystal["flags_atom_occupancy"][atom_para_index]) flag_atom_para_b_iso = numpy.any(dict_crystal["flags_atom_b_iso"][atom_para_index]) flag_atom_para_beta = numpy.any(dict_crystal["flags_atom_beta"][:, atom_para_index]) flag_atom_para_susceptibility = numpy.any(dict_crystal["flags_atom_para_susceptibility"]) flag_atom_para_lande_factor = numpy.any(dict_crystal["flags_mag_atom_lande_factor"][mag_atom_para_index]) flag_atom_para_kappa = numpy.any(dict_crystal["flags_mag_atom_kappa"][mag_atom_para_index]) sft_ccs, dder = calc_sft_ccs(index_hkl, reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems, unit_cell_parameters, atom_para_fract_xyz, atom_para_occupancy, atom_para_susceptibility, atom_para_b_iso, atom_para_beta, atom_para_lande_factor, atom_para_kappa, atom_para_j0_parameters, atom_para_j2_parameters, atom_para_sc_chi, dict_in_out=dict_in_out, flag_only_orbital=flag_only_orbital, flag_unit_cell_parameters=flag_unit_cell_parameters, flag_atom_para_fract_xyz=flag_atom_para_fract_xyz, flag_atom_para_occupancy=flag_atom_para_occupancy, flag_atom_para_susceptibility=flag_atom_para_susceptibility, flag_atom_para_b_iso=flag_atom_para_b_iso, flag_atom_para_beta=flag_atom_para_beta, flag_atom_para_lande_factor=flag_atom_para_lande_factor, flag_atom_para_kappa=flag_atom_para_kappa, flag_use_precalculated_data=flag_use_precalculated_data) return sft_ccs, dder
6905b0c1b4d4b3b65099600822ca0f6077fe1393
3,640,420
def normalize_mesh(mesh, in_place=True): """Rescales vertex positions to lie inside unit cube.""" scale = 1.0 / np.max(mesh.bounds[1, :] - mesh.bounds[0, :]) centroid = mesh.centroid scaled_vertices = (mesh.vertices - centroid) * scale if in_place: scaled_mesh = mesh scaled_mesh.vertices = scaled_vertices else: scaled_mesh = mesh.copy() scaled_mesh.vertices = scaled_vertices scaled_mesh.fix_normals() return scaled_mesh
e70ff4dea9173a541267c2a5bd040f12de4499c8
3,640,421
from datetime import datetime import socket def get_run_name(): """ A unique name for each run """ return datetime.now().strftime( '%b%d-%H-%M-%S') + '_' + socket.gethostname()
26f57e72912e896fe192de61b6477ef65051fccd
3,640,422
import traceback def process_request(identifier, browser, document_type='Annual Return', num_doc=1, status_df=None): """ Search ICRIS for the passed identifier, analyze the returned documents, and cart the documents depending on whether we purchased the document before. Parameters ---------- identifier : str Name or Companies Registry Number of the company to purchase documents for browser : selenium.webdriver.remote.webdriver.WebDriver An instance of Selenium WebDriver document_type : str, optional Type of document to be purchased, default `Annual Return` num_doc : int, optional Number of documents of type `document_type` to be purchased status_df : pandas.DataFrame Dataframe object to append data related to the status of the operations to Returns ------- status_df : pandas.DataFrame Dataframe object containing information about the status of the carting operations with the following columns """ if status_df is None: status_df = pd.DataFrame() cart_number = 0 try: try: # Check if there were no matches for the passed identifier companies = CompaniesIndexPage(browser) companies.NO_MATCHES() raise Exception(f"No matches found for identifier: {identifier}") except NoSuchElementException: pass main_menu, search, companies, info, doc_index = init_webpages(browser) exception = 'None' main_menu.navigate_to_search_page() if identifier.isdigit(): search.crNo_search(identifier) else: search.name_search(identifier) if identifier.isdigit(): try: companies.choose_number(identifier) except TimeoutError: raise Exception(f"No companies found for company number {identifier}") else: try: companies.choose_name(identifier) except TimeoutError: raise Exception(f"No companies found for company name {identifier}") info.proceed() doc_index.list_documents() cart_status, cart_number = doc_index.index_and_cart(document_type, num_doc) row = pd.Series([identifier,document_type, str(cart_status).upper(), cart_number, exception]) status_df = status_df.append(row, ignore_index = True) return status_df except Exception: exception = traceback.format_exc(7) try: cart_status except NameError: cart_status = False row = pd.Series([identifier, document_type, str(cart_status).upper(), cart_number, exception]) status_df = status_df.append(row, ignore_index = True) return status_df
7a6071488aba447959264d80d5bb201deb9a2339
3,640,423
def create_blackboard(): """ Create a blackboard with a few variables. Fill with as many different types as we need to get full coverage on pretty printing blackboard tests. """ Blackboard.clear() blackboard = Client(name="Tester") for key in {"foo", "some_tuple", "nested", "nothing"}: blackboard.register_key( key=key, access=py_trees.common.Access.READ ) for key in {"foo", "some_tuple", "nested", "nothing"}: blackboard.register_key( key=key, access=py_trees.common.Access.WRITE ) blackboard.foo = "bar" blackboard.some_tuple = (1, "bar") blackboard.nested = Nested() blackboard.nothing = None return blackboard
776129ba57a545ef3bcfca75c99816fd198bfc3d
3,640,424
def adminRoomDelete(*args, **kwargs): """ 删除房间 """ params = kwargs['params'] filters = { Room.room_uuid == params['room_uuid'] } Room().delete(filters) filters = { UserRoomRelation.room_uuid == params['room_uuid'] } UserRoomRelation().delete(filters) return BaseController().successData()
d8f9a29b46aac908eda9e5fedbc113ce93bc2bf4
3,640,425
def who_is_it(image_path, database, model): """ Arguments: image_path -- path to an image database -- database containing image encodings along with the name of the person on the image model -- your Inception model instance in Keras Returns: min_dist -- the minimum distance between image_path encoding and the encodings from the database identity -- string, the name prediction for the person on image_path """ ### START CODE HERE ### ## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line) encoding = create_encoding(image_path, model) ## Step 2: Find the closest encoding ## # Initialize "min_dist" to a large value, say 100 (≈1 line) min_dist = 100 # Loop over the database dictionary's names and encodings. for (name, db_enc) in database.items(): # Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line) dist = np.linalg.norm(encoding-db_enc) # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines) if dist < min_dist: min_dist = dist identity = name ### END CODE HERE ### if min_dist > 0.85: print("Not in the database.") print("distance", min_dist) identity = "Unknown" else: print ("it's " + str(identity) + ", the distance is " + str(min_dist)) return min_dist, identity
e2301b2504cc7bbb4c362b98541cad325b4b8587
3,640,426
def compute_win_state_str_row(n_rows, n_cols, n_connects): """Each win state will be a string of 0s and 1s which can then converted into an integer in base 2. I assume that at the maximum n_rows = n_cols = 5, which means that a 31 bit integer (since in Python it's always signed) should be more than enough for a 25 bit string. """ n_cells = n_rows * n_cols win_states = list() # each iteration in the for loop computes the possible # winning states for a particular row, e.g., # - if n_connects == n_cols, there's just one winning state for row_ind in range(n_rows): prefix = '0' * (row_ind * n_cols) row_end = (row_ind * n_cols) + n_cols win_start_ind = row_ind * n_cols win_end_ind = win_start_ind + n_connects while win_end_ind <= row_end: # save the winning state suffix = '0' * (n_cells - win_end_ind) win_state = prefix + '1' * n_connects + suffix win_states.append(win_state) # update for the next possible win state of the row win_start_ind = win_start_ind + 1 win_end_ind = win_start_ind + n_connects prefix += '0' return win_states
b0f0f2846de7506b4b69f90bb8a0b1641a421659
3,640,427
import hmac import hashlib def hmac_sha512(key: bytes, data: bytes) -> bytes: """ Return the SHA512 HMAC for the byte sequence ``data`` generated with the secret key ``key``. Corresponds directly to the "HMAC-SHA512(Key = ..., Data = ...)" function in BIP32 (https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions). :param key: The secret key used for HMAC calculation. :param data: The data for which an HMAC should be calculated. :return: A byte sequence containing the HMAC of ``data`` generated with the secret key ``key``. """ h = hmac.new(key, data, hashlib.sha512) return h.digest()
64850ea2d5e921138d8e0ebc2d021f8eaf5a7357
3,640,428
def __scheduler_trigger(cron_time_now, now_sec_tuple, crontask, deltasec=2): """ SchedulerCore logic actual time: cron_time_now format: (WD, H, M, S) actual time in sec: now_sec_tuple: (H sec, M sec, S) crontask: ("WD:H:M:S", "LM FUNC") deltasec: sample time window: +/- sec: -sec--|event|--sec- """ # Resolve "normal" time check_time = tuple(int(t.strip()) if t.isdigit() else t.strip() for t in crontask[0].split(':')) # Resolve "time tag" to "normal" time if len(check_time) < 3: tag = crontask[0].strip() value = Sun.TIME.get(tag, None) if value is None or len(value) < 3: errlog_add('cron syntax error: {}:{}'.format(tag, value)) return False check_time = ('*', value[0], value[1], value[2]) # Cron actual time (now) parts summary in sec check_time_now_sec = now_sec_tuple[0] + now_sec_tuple[1] + now_sec_tuple[2] # Cron overall requested time in sec - hour in sec, minute in sec, sec check_time_scheduler_sec = int(now_sec_tuple[0] if check_time[1] == '*' else check_time[1] * 3600) \ + int(now_sec_tuple[1] if check_time[2] == '*' else check_time[2] * 60) \ + int(now_sec_tuple[2] if check_time[3] == '*' else check_time[3]) # Time frame +/- corrections tolerance_min_sec = 0 if check_time_now_sec - deltasec < 0 else check_time_now_sec - deltasec tolerance_max_sec = check_time_now_sec + deltasec task_id = "{}:{}|{}".format(check_time[0], check_time_scheduler_sec, str(crontask[1]).replace(' ', '')) # Check WD - WEEK DAY if check_time[0] == '*' or check_time[0] == cron_time_now[0]: # Check H, M, S in sec format between tolerance range if tolerance_min_sec <= check_time_scheduler_sec <= tolerance_max_sec: __cron_task_cache_manager(check_time_now_sec, deltasec) if check_time[3] == '*' or task_id not in LAST_CRON_TASKS: lm_state = False if isinstance(crontask[1], str): # [1] Execute Load Module as a string (user LMs) lm_state = exec_lm_core_schedule(crontask[1].split()) else: try: # [2] Execute function reference (built-in functions) console_write("[builtin cron] {}".format(crontask[1]())) lm_state = True except Exception as e: errlog_add("[cron] function exec error: {}".format(e)) if not lm_state: console_write("[cron]now[{}] {} <-> {} conf[{}] exec[{}] LM: {}".format(cron_time_now, __convert_sec_to_time(tolerance_min_sec), __convert_sec_to_time(tolerance_max_sec), crontask[0], lm_state, crontask[1])) # SAVE TASK TO CACHE if check_time[3] != '*': # SAVE WHEN SEC not * LAST_CRON_TASKS.append(task_id) return True return False
8c3cc2f23bf94bfe7f817db542f50345be8f1a20
3,640,429
def get13FAmendmentType(accNo, formType=None) : """ Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS. This turned out to be unreliable (often missing or wrong), so I don't use it to get the combined holdings for an investor. Instead I just look at the number of holdings in an amendment compared to the previous filing, and treat it as a restatement if the new number of holdings is more than half the old number. """ info = basicInfo.getSecFormInfo(accNo, formType) xmlUrls = [l[-1] for l in info['links'] if l[0].lower().endswith('xml')] xmlSummTab = utils.downloadSecUrl(xmlUrls[0],toFormat='xml') coverPage = findChildSeries(xmlSummTab,['formdata','coverpage']) isAmendment = findChildEndingWith(coverPage,'isamendment') if isAmendment is None or isAmendment.text.strip().lower() not in ['true','yes'] : return None return findChildSeries(coverPage,['amendmentinfo','amendmenttype']).text.strip()
a8ff184b4d3eb43ea8da75a64e83cb136908364e
3,640,431
def tabs_to_cover_string(string): """ Get the number of tabs required to be at least the same length as a given string. :param string: The string :return: The number of tabs to cover it :rtype: int """ num_tabs = int(np.floor(len(string) / 8) + 1) return num_tabs
242496271bafc78a2180c8e8798b9ed4892afb29
3,640,432
def kl(p, q): """ Kullback-Leibler divergence for discrete distributions Parameters ---------- p: ndarray probability mass function q: ndarray probability mass function Returns -------- float : D(P || Q) = sum(p(i) * log(p(i)/q(i)) Discrete probability distributions. """ return np.sum(np.where(p != 0, p * np.log(p / q), 0))
905903324958414972329e32becf9c8848f54029
3,640,433
def map_uris(uris): """Map URIs from external URI to HDFS :return: """ pkgs_path = __pillar__['hdfs']['pkgs_path'] ns = nameservice_names() return map(lambda x: 'hdfs://{0}{1}/{2}'.format(ns[0], pkgs_path, __salt__['system.basename'](x)), uris)
8dd682e932c2ac4dd495cfd11e88abdf58e78800
3,640,434
def get_body(name): """Retrieve the Body structure of a JPL .bsp file object Args: name (str) Return: :py:class:`~beyond.constants.Body` """ return Pck()[name]
d9949d9638c27b77f0bff203d2015aeb7af8c389
3,640,436
def is_available(): """ Convenience function to check if the current platform is supported by this module. """ return ProcessMemoryInfo().update()
d7d1d842009b39f79c650d54f776db664b30ea14
3,640,437
def render_path_spiral(c2w, up, rads, focal, zrate, rots, N): """ enumerate list of poses around a spiral used for test set visualization """ render_poses = [] rads = np.array(list(rads) + [1.]) for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]: c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads) z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.]))) render_poses.append(viewmatrix(z, up, c)) return render_poses
0eb608be5f29425cca62b15ce48db02e2297be17
3,640,438
from typing import Dict from typing import List from typing import Any def placeAnchorSourceToLagunaTX( common_anchor_connections: Dict[str, List[Dict[str, Any]]] ) -> List[str]: """ The anchors are placed on the Laguna RX registers We move the source cell of the anchor onto the corresponding TX registers """ anchor_to_source_cell = _getAnchorToSourceCell(common_anchor_connections) slr_to_source_cell_to_loc = defaultdict(dict) for anchor, loc in anchor_2_loc.items(): assert 'LAGUNA' in loc and 'RX_REG' in loc source_cell = anchor_to_source_cell[anchor] # if two anchor registers are connected if 'q0_reg' in source_cell: assert False, source_cell target_tx = getPairingLagunaTXOfRX(loc) slr_index = getSLRIndexOfLaguna(target_tx) slr_to_source_cell_to_loc[slr_index][source_cell] = target_tx script = [] for slr_index, source_cell_to_loc in slr_to_source_cell_to_loc.items(): script.append('catch { place_cell { \\') for source_cell, loc in source_cell_to_loc.items(): script.append(f' {source_cell} {loc} \\') script.append('} }') # if both the TX and the RX lagunas are in the FIXED state, the router will not perform hold violation fix script.append('catch { set_property IS_LOC_FIXED 0 [get_cells -hierachical -filter { BEL =~ *LAGUNA*TX* }] }') open('place_laguna_anchor_source_cells.tcl', 'w').write('\n'.join(script)) return script
d30cef6a42b846a3d82467eabebce1275a3d81ed
3,640,439
def post_example_form(): """Example of a post form.""" return render_template("post-form.html")
e5e934dfe0d2b81081cda5deeca483f46fae89fe
3,640,440
def validate(data): """Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid """ if not isinstance(data, dict): raise ValueError("data should be dict") if "text" not in data or not isinstance(data["text"], str) or len(data["text"]) < 1: raise ValueError("text field is required and should not be empty") if "markdown" in data and not isinstance(data["markdown"], bool): raise ValueError("markdown field should be bool") if "attachments" in data: if not isinstance(data["attachments"], list): raise ValueError("attachments field should be list") for attachment in data["attachments"]: if "text" not in attachment and "title" not in attachment: raise ValueError("text or title is required in attachment") return True
ae8b7e74bd7607a7c8f5079014a0f5e3af5bc011
3,640,441
def stripExtra(name): """This function removes paranthesis from a string *Can later be implemented for other uses like removing other characters from string Args: name (string): character's name Returns: string: character's name without paranthesis """ startIndexPer=name.find('(') start = 0 if(startIndexPer!=-1): start = startIndexPer if(start==0): return name else: return name[0:start-1]
fd9b8c2d6f513f06d8b1df067520c7f05cff023d
3,640,442
def google_maps(maiden: str) -> str: """ generate Google Maps URL from Maidenhead grid Parameters ---------- maiden : str Maidenhead grid Results ------- url : str Google Maps URL """ latlon = toLoc(maiden) url = "https://www.google.com/maps/@?api=1&map_action=map" "&center={},{}".format( latlon[0], latlon[1] ) return url
04c2a6d730831746dc63ce6c733b16322d0696da
3,640,443
def format_signed(feature, # type: Dict[str, Any] formatter=None, # type: Callable[..., str] **kwargs ): # type: (...) -> str """ Format unhashed feature with sign. >>> format_signed({'name': 'foo', 'sign': 1}) 'foo' >>> format_signed({'name': 'foo', 'sign': -1}) '(-)foo' >>> format_signed({'name': ' foo', 'sign': -1}, lambda x: '"{}"'.format(x)) '(-)" foo"' """ txt = '' if feature['sign'] > 0 else '(-)' name = feature['name'] # type: str if formatter is not None: name = formatter(name, **kwargs) return '{}{}'.format(txt, name)
4adeecb92b0d102ae512c2c8acf89d38454b4e4e
3,640,444
def load_ligand(sdf): """Loads a ligand from an sdf file and fragments it. Args: sdf: Path to sdf file containing a ligand. """ lig = next(Chem.SDMolSupplier(sdf, sanitize=False)) frags = generate_fragments(lig) return lig, frags
984ba4bf61af6f8197f96a80f0f493b7dae84f08
3,640,445
def CMDpending(parser, args): """Lists pending jobs.""" parser.add_option('-b', '--builder', dest='builders', action='append', default=[], help='Builders to filter on') options, args, buildbot = parser.parse_args(args) if args: parser.error('Unrecognized parameters: %s' % ' '.join(args)) if not options.builders: options.builders = buildbot.builders.keys for builder in options.builders: builder = buildbot.builders[builder] pending_builds = builder.data.get('pendingBuilds', 0) if not pending_builds: continue print('Builder %s: %d' % (builder.name, pending_builds)) if not options.quiet: for pending in builder.pending_builds.data: if 'revision' in pending['source']: print(' revision: %s' % pending['source']['revision']) for change in pending['source']['changes']: print(' change:') print(' comment: %r' % unicode(change['comments'][:50])) print(' who: %s' % change['who']) return 0
a9d56333fa84f2c92a969135c0dcc02bf94b972f
3,640,446
def numpy_translation(xyz): """Returns the dual quaternion for a pure translation. """ res = np.zeros(8) res[3] = 1.0 res[4] = xyz[0]/2.0 res[5] = xyz[1]/2.0 res[6] = xyz[2]/2.0 return res
8180449ec6128237f63b4519117553e85a2d1369
3,640,447
def sort_car_models(car_db): """return a copy of the cars dict with the car models (values) sorted alphabetically""" sorted_db = {} for model in car_db: sorted_db[model] = sorted(car_db[model]) return sorted_db
a478f16ece83058ba411480b91584e4c61026141
3,640,448
import requests def test_module(params) -> str: """Tests API connectivity and authentication'" Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """ try: url = params.get('url')[:-1] if str(params.get('url')).endswith('/') \ else params.get('url') credentials = params.get('apikey') creds = "Bearer " + credentials headers = {"Authorization": creds} url = urljoin(url, '/customer/getSubmission/7bf5ba92-30e1-4d42-821f-6d4ac94c3be1') response = requests.request("GET", url, headers=headers) status = response.status_code if status != 200: if 'UnauthorizedError' in str(response.content): return 'Authorization Error: make sure API Key is correctly set' else: return str(status) except Exception as e: raise e return 'ok'
64d45742c82854a9eb3d4e04aadbe05d458f9aca
3,640,449
from typing import Tuple from typing import Container import asyncio def create(auto_remove: bool = False) -> Tuple[str, str]: """ Creates a database inside a docker container :return: container name, database name :rtype: Tuple[str, str] """ piccolo_docker_repository = PiccoloDockerRepository(auto_remove=auto_remove) piccolo_docker_repository.create_container() container: Container = piccolo_docker_repository.container database_name: str = "" if container: loop = asyncio.get_event_loop() database_name = loop.run_until_complete( piccolo_docker_repository.create_database() ) return container.name, database_name
c875b034b564d85e104655086b7faa1ed7a2df2c
3,640,450
import re def load_expected_results(file, pattern): """Reads the file, named file, which contains test results separated by the regular expression pattern. The test results are returned as a dictionary. """ expected = {} compiled_pattern = re.compile(pattern) with open(file) as f: test = None for line in f: line = line.rstrip().decode('utf-8') match = compiled_pattern.search(line) if match: test = match.groups()[0] expected[test] = '' else: expected[test] += line + '\n' return expected
05e20e2e6932c2a4db634f48046ea3e3f6e5dedc
3,640,454
def following(request): """View all posts from followed users""" if request.method == "GET": user = User.objects.get(pk=request.user.id) following = user.follow_list.following.all() # Post pagination: https://docs.djangoproject.com/en/3.1/topics/pagination/ posts = Post.objects.filter(user__in=following).order_by("-date") following_paginator = Paginator(posts, 10) following_page = request.GET.get('page') page_obj = following_paginator.get_page(following_page) else: return redirect("index") context = {"page_obj": page_obj} return render(request, "network/following.html", context)
1b350c835d6bbf6d51c1a99d56d405a989f681a2
3,640,455
def create_random_polygon(min_x, min_y, max_x, max_y, vertex_num): """Create a random polygon with the passed x and y bounds and the passed number of vertices; code adapted from: https://stackoverflow.com/a/45841790""" # generate the point coordinates within the bounds x = np.random.uniform(min_x, max_x, vertex_num) y = np.random.uniform(min_y, max_y, vertex_num) # determine the center of all points center = (sum(x) / vertex_num, sum(y) / vertex_num) # find the angle of each point from the center angles = np.arctan2(x - center[0], y - center[1]) # sort points by their angle from the center to avoid self-intersections points_sorted_by_angle = sorted([(i, j, k) for i, j, k in zip(x, y, angles)], key=lambda t: t[2]) # the process fails if there are duplicate points if len(points_sorted_by_angle) != len(set(points_sorted_by_angle)): return None # structure points as x-y tuples points = [(x, y) for (x, y, a) in points_sorted_by_angle] # create the polygon return Polygon(points)
ad04591daf524bd0c97890a36722233cd08c4e5e
3,640,456
def document_hidden(session): """Polls for the document to become hidden.""" def hidden(session): return session.execute_script("return document.hidden") return Poll(session, timeout=3, raises=None).until(hidden)
21376291398aea859ed0f4d080a7bf617d93521f
3,640,457
def create_blueprint(): """Creates a Blueprint""" blueprint = Blueprint('Tasks Blueprint', __name__, url_prefix='/tasks') blueprint.route('/', methods=['POST'])(tasks.create) blueprint.route('/', methods=['PATCH'])(tasks.patch) blueprint.route('/', methods=['GET'])(tasks.list) return blueprint
c0e0412e599e4f4378efa2e2749f957a8b58043b
3,640,458
import gzip def estimate_null_variance_gs(gs_lists, statslist, Wsq, single_gs_hpo=False, n_or_bins=1): """ Estimates null variance from the average of a list of known causal windows """ statspaths = {h : p for h, p in [x.rstrip().split('\t')[:2] \ for x in open(statslist).readlines()]} with gzip.open(list(statspaths.values())[0], 'rt') as ex_statfile: statscols = ex_statfile.readline().rstrip().split('\t') # Estimate null variance for each entry in gs_lists for gspath in gs_lists: for hpo, statspath in statspaths.items(): # Intersect sumstats for phenotype with GS regions gsdf = pbt.BedTool(statspath).\ intersect(pbt.BedTool(gspath), u=True, f=1.0).\ to_dataframe(names=statscols) gsdf['window'] = gsdf[['#chr', 'start', 'end']].astype(str).\ aggregate('_'.join, axis=1) # Read effect sizes per window and convert to mean variance stats = gsdf.loc[:, 'window meta_lnOR'.split()].\ rename(columns={'meta_lnOR' : 'lnOR'}) gs_var = np.nanmean((stats.lnOR.astype(float) / 1.96) ** 2) # Update Wsq estimates for all sig. and effect size quantiles if single_gs_hpo: for hpo in Wsq.keys(): for sig in 'gw fdr'.split(): for i in range(n_or_bins): Wsq[hpo][sig][i].append(gs_var) break else: for sig in 'gw fdr'.split(): for i in range(n_or_bins): Wsq[hpo][sig][i].append(gs_var) return Wsq
755f35c108b9045d237a6cbfa442e7b6b0c24829
3,640,459
import torch def create_model(config): """Create the score model.""" model_name = config.model.name score_model = get_model(model_name)(config) score_model = score_model.to(config.device) score_model = torch.nn.DataParallel(score_model) return score_model
ca0b9fa1c68d83c1697d82273fc740ba35826c87
3,640,460
import gc def at(addr): """Look up an object by its id.""" for o in gc.get_objects(): if id(o) == addr: return o return None
f408b9a63afad1638f156163c6249e0e8095bff4
3,640,461
import warnings def money_flow_index(close_data, high_data, low_data, volume, period): """ Money Flow Index. Formula: MFI = 100 - (100 / (1 + PMF / NMF)) """ catch_errors.check_for_input_len_diff( close_data, high_data, low_data, volume ) catch_errors.check_for_period_error(close_data, period) mf = money_flow(close_data, high_data, low_data, volume) tp = typical_price(close_data, high_data, low_data) flow = [tp[idx] > tp[idx-1] for idx in range(1, len(tp))] pf = [mf[idx] if flow[idx] else 0 for idx in range(0, len(flow))] nf = [mf[idx] if not flow[idx] else 0 for idx in range(0, len(flow))] pmf = [sum(pf[idx+1-period:idx+1]) for idx in range(period-1, len(pf))] nmf = [sum(nf[idx+1-period:idx+1]) for idx in range(period-1, len(nf))] # Dividing by 0 is not an issue, it turns the value into NaN which we would # want in that case with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) money_ratio = np.array(pmf) / np.array(nmf) mfi = 100 - (100 / (1 + money_ratio)) mfi = fill_for_noncomputable_vals(close_data, mfi) return mfi
7c122ae10ef406fabf56f63ac80a35557999c2ee
3,640,462
def Get_Weights(dict_rank): """Converts rankings into weights.""" Weights = adapt.create_Weightings(dict_rank) return Weights
d50b9dcad7803a34c969f357cd4659ffe49ab740
3,640,463
def psisloo(log_likelihood): """ Summarize the model fit using Pareto-smoothed importance sampling (PSIS) and approximate Leave-One-Out cross-validation (LOO). Takes as input an ndarray of posterior log likelihood terms [ p( y_i | theta^s ) ] per observation unit. e.x. if using pystan: loosummary = stanity.psisloo(stan_fit.extract()['log_lik']) Returns a Psisloo object. Useful methods such as print_summary() & plot(). References ---------- Aki Vehtari, Andrew Gelman and Jonah Gabry (2015). Efficient implementation of leave-one-out cross-validation and WAIC for evaluating fitted Bayesian models. arXiv preprint arXiv:1507.04544. Aki Vehtari and Andrew Gelman (2015). Pareto smoothed importance sampling. arXiv preprint arXiv:1507.02646. """ return Psisloo(log_likelihood)
5500ebd85eb9ac796b0410756e4b51f674890ca6
3,640,465
from typing import Tuple from typing import Optional from typing import Pattern def rxdelim(content: str) -> Tuple[Optional[Pattern], Optional[Pattern]]: """ Return suitable begin and end delimiters for the content `content`. If no matching delimiters are found, return `None, None`. """ tp = magic.from_buffer(content).lower() for rxtp, rxbegin, rxend in DELIMITERS: if rxtp.match(tp): return rxbegin, rxend return None, None
884efcd13846da9938b6f120cb3d8963addd0b42
3,640,466
def GenKeyOrderAttrs(soappy_service, ns, type_name): """Generates the order and attributes of keys in a complex type. Args: soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating the information stored in the WSDL. ns: string The namespace the given WSDL-defined type belongs to. type_name: string The name of the WSDL-defined type to search for. Returns: list A list of dictionaries containing the attributes of keys within a complex type, in order. """ complex_type = soappy_service.wsdl.types[ns].types[type_name] if IsASubType(type_name, ns, soappy_service): # This is an extension of another type. key_order = GenKeyOrderAttrs( soappy_service, complex_type.content.derivation.attributes['base'].getTargetNamespace(), complex_type.content.derivation.attributes['base'].getName()) if hasattr(complex_type.content.derivation.content, 'content'): key_order.extend([element.attributes for element in complex_type.content.derivation.content.content]) return key_order else: # This is a base type. return [element.attributes for element in complex_type.content.content]
794f74502b5db305f51bd560b388c64d305cd4e2
3,640,467
def read_binary_stl(filename): """Reads a 3D triangular mesh from an STL file (binary format). :param filename: path of the stl file :type filename: str :return: The vertices, normals and index array of the mesh :rtype: Mesh :raises: ValueError """ with open(filename, 'rb') as stl_file: stl_file.seek(80) face_count = np.frombuffer(stl_file.read(4), dtype=np.int32)[0] record_dtype = np.dtype([ ('normals', np.float32, (3,)), ('vertices', np.float32, (3, 3)), ('attr', '<i2', (1,)), ]) data = np.fromfile(stl_file, dtype=record_dtype) if face_count != data.size: raise ValueError('stl data has incorrect size') vertices = data['vertices'].reshape(-1, 3) indices = np.arange(face_count * 3).astype(np.uint32) normals = np.repeat(data['normals'], 3, axis=0) return Mesh(vertices, indices, normals, clean=True)
53ae2a1806413280719286813e091392d965ce76
3,640,468
def monotonise_tree(tree, n_feats, incr_feats, decr_feats): """Helper to turn a tree into as set of rules """ PLUS = 0 MINUS = 1 mt_feats = np.asarray(list(incr_feats) + list(decr_feats)) def traverse_nodes(node_id=0, operator=None, threshold=None, feature=None, path=None): if path is None: path = np.zeros([n_feats, 2]) else: path[feature, PLUS if operator[0] == '>' else MINUS] = 1 if not node_is_leaf( tree, node_id): feature = tree.feature[node_id] threshold = tree.threshold[node_id] left_node_id = tree.children_left[node_id] traverse_nodes(left_node_id, "<=", threshold, feature, path.copy()) right_node_id = tree.children_right[node_id] traverse_nodes(right_node_id, ">", threshold, feature, path.copy()) else: # a leaf node if np.sum(path) > 0: # check if all increasing all_increasing = np.sum(np.asarray([path[i_feat, MINUS] if i_feat + 1 in incr_feats else path[i_feat, PLUS] for i_feat in mt_feats - 1])) == 0 all_decreasing = np.sum(np.asarray([path[i_feat, MINUS] if i_feat + 1 in decr_feats else path[i_feat, PLUS] for i_feat in mt_feats - 1])) == 0 counts = np.asarray(tree.value[node_id][0]) probs = counts / np.sum(counts) predicted_value = np.sign(probs[1] - 0.5) if predicted_value >= 0 and all_increasing: # ok pass elif predicted_value <= 0 and all_decreasing: # ok pass else: # not a valid rule tree.value[node_id][0] = [0., 0.] else: print('Tree has only one node (i.e. the root node!)') return None if len(mt_feats) > 0: traverse_nodes() return tree
78405b1d2bf5c2617b248210058caca0b062b668
3,640,469
import pickle def pythonify_and_pickle(file, out_filename): """Convert all the data in the XML file and save as pickled files for nodes, ways, relations and tags separately. :param file: Filename (the file will be opened 4 times, so passing a file object will not work). Can be anything which :module:`digest` can parse. :param out_filename: If is `test` then writes files `test_nodes.pic.xz` through `test_tags.pic.xz` :return: A tuple of the 4 output filenames for nodes, ways, relations and tags. """ obj = NodesPacked(file) out = [out_filename + "_nodes.pic.xz"] pickle(obj, out[0]) for typpe, name in [(Ways, "ways"), (Relations, "relations"), (Tags, "tags")]: obj = None obj = typpe(file) name = "{}_{}.pic.xz".format(out_filename, name) pickle(obj, name) out.append(name) return out
4140c9e66b9a43b6880b152c50facf89ba723339
3,640,470
def compute_inverse_volatility_weights(df: pd.DataFrame) -> pd.Series: """ Calculate inverse volatility relative weights. :param df: cols contain log returns :return: series of weights """ dbg.dassert_isinstance(df, pd.DataFrame) dbg.dassert(not df.columns.has_duplicates) # Compute inverse volatility weights. # The result of `compute_volatility_normalization_factor()` # is independent of the `target_volatility`. weights = df.apply( lambda x: compute_volatility_normalization_factor( x, target_volatility=0.1 ) ) # Replace inf's with 0's in weights. weights.replace([np.inf, -np.inf], np.nan, inplace=True) # Rescale weights to percentages. weights /= weights.sum() weights.name = "weights" # Replace NaN with zero for weights. weights = hdataf.apply_nan_mode(weights, mode="fill_with_zero") return weights
347343729dc271dd161f419a394151b99f1ce876
3,640,471
def resattnet164(**kwargs): """ ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=164, model_name="resattnet164", **kwargs)
35023591d70a577526280489c19f47fffb0800a2
3,640,472
def stype(obj): """ Return string shape representation of structured objects. >>> import numpy as np >>> a = np.zeros((3,4), dtype='uint8') >>> b = np.zeros((1,2), dtype='float32') >>> stype(a) '<ndarray> 3x4:uint8' >>> stype(b) '<ndarray> 1x2:float32' >>> stype([a, (b, b)]) '[<ndarray> 3x4:uint8, (<ndarray> 1x2:float32, <ndarray> 1x2:float32)]' >>> stype([1, 2.0, [a], [b]]) '[<int> 1, <float> 2.0, [<ndarray> 3x4:uint8], [<ndarray> 1x2:float32]]' >>> stype({'a':a, 'b':b, 'c':True}) '{a:<ndarray> 3x4:uint8, b:<ndarray> 1x2:float32, c:<bool> True}' :param object obj: Any object :return: String representation of object where arrays are replace by their shape and dtype descriptions :rtype: str """ typestr = lambda obj: '<' + type(obj).__name__ + '> ' mklist = lambda obj: ', '.join(stype(o) for o in obj) mkset = lambda obj: ', '.join(stype(o) for o in sorted(obj)) mkdict = lambda obj: ', '.join( str(k) + ':' + stype(v) for k, v in sorted(obj.items())) if istensor(obj, ['shape', 'dtype']): return typestr(obj) + shapestr(obj, True) if isinstance(obj, list): return '[' + mklist(obj) + ']' if isinstance(obj, tuple): return '(' + mklist(obj) + ')' if isinstance(obj, set): return '{' + mkset(obj) + '}' if isinstance(obj, dict): return '{' + mkdict(obj) + '}' return typestr(obj) + str(obj)
76b805684361a13f03955692dacd02c045c43bd9
3,640,473
def board2key(Z): """ Turn a "Game of Life" board into a key. """ return(bin2hex(array2string(Z[1:-1, 1:-1].reshape((1, 512 * 512 * 4))[0])))
101b4ecdf03e9a9a832434d59e2eaa9a6bed2ef5
3,640,474
def CipherArray(Array = [[" "]," "], Random = 1): """ Array - array to coding Key - Key number to coding It's а function that encodes elements Returns an array consisting of coded elements """ if (type(Array) != list): raise TypeError("Неправильний формат масиву") if (type(Random) != int): raise TypeError("Неправильний формат коду") for i in range(len(Array)): for j in range(len(Array[i])): Array[i][j] = chr(ord(Array[i][j]) * Random) return Mover(Array)
a2d472cd49a803a4a08f0fba5362b54cce24c37a
3,640,475
def voltage(raw_value, v_min=0, v_max=10, res=32760, gain=1): """Converts a raw value to a voltage measurement. ``V = raw_value / res * (v_max - v_min) * gain`` """ return (float(raw_value) / res * (v_max - v_min) * gain, "V")
b4ea7d2521e1fa856a21b98ace2a9490f8a3b043
3,640,476
def extract_characteristics_from_string(species_string): """ Species are named for the SBML as species_name_dot_characteristic1_dot_characteristic2 So this transforms them into a set Parameters: species_string (str) = species string in MobsPy for SBML format (with _dot_ instead of .) """ return set(species_string.split('_dot_'))
abfcc0d3e425e8f43d776a02254a04b0e85dc6d1
3,640,477
def _get_bzr_version(): """Looks up bzr version by calling bzr --version. :raises: VcsError if bzr is not installed""" try: value, output, _ = run_shell_command('bzr --version', shell=True, us_env=True) if value == 0 and output is not None and len(output.splitlines()) > 0: version = output.splitlines()[0] else: raise VcsError("bzr --version returned %s," + " maybe bzr is not installed" % value) except VcsError as e: raise VcsError("Coud not determine whether bzr is installed: %s" % e) return version
fb0171fe286e6251b25536dd40323c4af73a1255
3,640,478
def C(source): """Compile at runtime and run code in-line""" return _embed_or_inline_c(source, True)
d1bd11370a1df3c93209b8d60046c077ac872d3e
3,640,479
def normalize(x): """Standardize the original data set.""" max_x = np.max(x, axis=0) min_x = np.min(x, axis=0) x = (x-min_x) / (max_x-min_x) return x
eba8ff32dca072b134c689d727e0246d7563a95d
3,640,480
def _diff_bearings(bearings, bearing_thresh=40): """ Identify kinked nodes (nodes that change direction of an edge) by diffing Args: bearings (list(tuple)): containing (start_node, end_node, bearing) bearing_thresh (int): threshold for identifying kinked nodes (range 0, 360) Returns: list[str] of kinked nodes """ kinked_nodes = [] # diff bearings nodes = [b[0] for b in bearings] bearings_comp = [b[2] for b in bearings] bearing_diff = [y - x for x, y in zip(bearings_comp, bearings_comp[1:])] node2bearing_diff = list(zip(nodes[1:-1], bearing_diff)) # id nodes to remove for n in node2bearing_diff: # controlling for differences on either side of 360 if min(abs(n[1]), abs(n[1] - 360)) > bearing_thresh: kinked_nodes.append(n[0]) return kinked_nodes
a29c3cdd009065d7a73dd993ae66f81853d5e2bc
3,640,481
from typing import Any import json import aiohttp import re async def request(method: str, url: str, params: dict = None, data: Any = None, credential: Credential = None, no_csrf: bool = False, json_body: bool = False, **kwargs): """ 向接口发送请求。 Args: method (str) : 请求方法。 url (str) : 请求 URL。 params (dict, optional) : 请求参数。 data (Any, optional) : 请求载荷。 credential (Credential, optional): Credential 类。 no_csrf (bool, optional) : 不要自动添加 CSRF。 json_body (bool, optional) 载荷是否为 JSON Returns: 接口未返回数据时,返回 None,否则返回该接口提供的 data 或 result 字段的数据。 """ if credential is None: credential = Credential() method = method.upper() # 请求为非 GET 且 no_csrf 不为 True 时要求 bili_jct if method != 'GET' and not no_csrf: credential.raise_for_no_bili_jct() # 使用 Referer 和 UA 请求头以绕过反爬虫机制 DEFAULT_HEADERS = { "Referer": "https://www.bilibili.com", "User-Agent": "Mozilla/5.0" } headers = DEFAULT_HEADERS if params is None: params = {} # 自动添加 csrf if not no_csrf and method in ['POST', 'DELETE', 'PATCH']: if data is None: data = {} data['csrf'] = credential.bili_jct data['csrf_token'] = credential.bili_jct # jsonp if params.get("jsonp", "") == "jsonp": params["callback"] = "callback" config = { "method": method, "url": url, "params": params, "data": data, "headers": headers, "cookies": credential.get_cookies() } config.update(kwargs) if json_body: config["headers"]["Content-Type"] = "application/json" config["data"] = json.dumps(config["data"]) # 如果用户提供代理则设置代理 if settings.proxy: config["proxy"] = settings.proxy session = get_session() async with session.request(**config) as resp: # 检查状态码 try: resp.raise_for_status() except aiohttp.ClientResponseError as e: raise NetworkException(e.status, e.message) # 检查响应头 Content-Length content_length = resp.headers.get("content-length") if content_length and int(content_length) == 0: return None # 检查响应头 Content-Type content_type = resp.headers.get("content-type") # 不是 application/json if content_type.lower().index("application/json") == -1: raise ResponseException("响应不是 application/json 类型") raw_data = await resp.text() resp_data: dict if 'callback' in params: # JSONP 请求 resp_data = json.loads( re.match("^.*?({.*}).*$", raw_data, re.S).group(1)) else: # JSON resp_data = json.loads(raw_data) # 检查 code code = resp_data.get("code", None) if code is None: raise ResponseCodeException(-1, "API 返回数据未含 code 字段", resp_data) if code != 0: msg = resp_data.get('msg', None) if msg is None: msg = resp_data.get('message', None) if msg is None: msg = "接口未返回错误信息" raise ResponseCodeException(code, msg, resp_data) real_data = resp_data.get("data", None) if real_data is None: real_data = resp_data.get("result", None) return real_data
68b68df293f474ecfff5fdd7ae93f0431d700d50
3,640,482
def InflRate(): """Inflation rate""" return asmp.InflRate()
33fcaa24cc00875e059850574469a95bcab3b469
3,640,483
def author_single_view(request, slug): """ Render Single User :param request: :param slug: :return: """ author = get_object_or_404(Profile, slug=slug) author_forum_list = Forum.objects.filter(forum_author=author.id).order_by("-is_created")[:10] author_comments = Comment.objects.filter(comment_author=author.id).order_by("-is_created")[:10] total_forums = Forum.objects.filter(forum_author=author.id).annotate(num_comments=Count('forum_author')) total_comments = Comment.objects.filter(comment_author=author.id).annotate(num_comments=Count('comment_author')) template = 'app_author/author_single.html' context = { 'author': author, 'author_forum_list': author_forum_list, 'author_comments': author_comments, 'total_forums': total_forums, 'total_comments': total_comments } return render(request, template, context)
506ec5f980d5ee59809358ac2add7cfcd0327a60
3,640,484
def get_predefined(schedule): """ Predefined learn rate changes at specified epochs :param schedule: dictionary that maps epochs to to learn rate values. """ def update(lr, epoch): if epoch in schedule: return floatX(schedule[epoch]) else: return floatX(lr) return update
5cb9fab3bb3b4b4d868504953d78e3f93f5a7198
3,640,485
def launch_ec2_instances(config, nb=1): """ Launch new ec2 instance(s) """ conf = config[AWS_CONFIG_SECTION] ami_image_id = conf.get(AMI_IMAGE_ID_FIELD) ami_name = conf.get(AMI_IMAGE_NAME_FIELD) if ami_image_id and ami_name: raise ValueError('The fields ami_image_id and ami_image_name cannot be both' 'specified at the same time. Please specify either ami_image_id' 'or ami_image_name') if ami_name: ami_image_id = _get_image_id(config, ami_name) instance_type = conf[INSTANCE_TYPE_FIELD] key_name = conf[KEY_NAME_FIELD] security_group = conf[SECURITY_GROUP_FIELD] logger.info('Launching {} new ec2 instance(s)...'.format(nb)) # tag all instances using RAMP_AWS_BACKEND_TAG to be able # to list all instances later tags = [{ 'ResourceType': 'instance', 'Tags': [ {'Key': RAMP_AWS_BACKEND_TAG, 'Value': '1'}, ] }] sess = _get_boto_session(config) resource = sess.resource('ec2') instances = resource.create_instances( ImageId=ami_image_id, MinCount=nb, MaxCount=nb, InstanceType=instance_type, KeyName=key_name, TagSpecifications=tags, SecurityGroups=[security_group], ) return instances
27b3aa745021f3a1516b09746db1c8111b8905d2
3,640,486
def residual_error(X_train,X_test,y_train,y_test, reg="linear"): """ Plot the residual error of the Regresssion model for the input data, and return the fitted Regression model. ------------------------------------------------------------------- # Parameters # X_train,X_test,y_train,y_test (np.arrays): Given X, a 2-D array of Data, and y, an array of target data, we can use: sklearn.model_selection.train_test_split(X,y) to obtain X_train, X_test, y_train, and y_test. # reg (string): Whether the regression model is linear or logistical (default="linear"). """ if reg.lower() == "linear": reg=LinearRegression() reg.fit(X_train,y_train) elif reg.lower() == "logistic": reg=LogisticRegression() reg.fit(X_train,y_train) ## setting plot style plt.style.use('fivethirtyeight') ## plotting residual errors in training data plt.scatter(reg.predict(X_train), reg.predict(X_train) - y_train, color = "green", s = 10, label = 'Train data') ## plotting residual errors in test data plt.scatter(reg.predict(X_test), reg.predict(X_test) - y_test, color = "blue", s = 10, label = 'Test data') ## plotting line for zero residual error plt.hlines(y = 0, xmin = 0, xmax = 50, linewidth = 2) ## plotting legend plt.legend(loc = 'upper right') ## plot title plt.title("Residual errors") return reg
38513473122ff1430f6f6cf44eb984086bcdda72
3,640,487
def centerSquare(pil_img: Image.Image): """Adds padding on both sides to make an image square. (Centered)""" pil_img = pil_img.convert('RGBA') # ensure transparency background_color = (0, 0, 0, 0) width, height = pil_img.size if width == height: return pil_img elif width > height: result = Image.new(pil_img.mode, (width, width), background_color) result.paste(pil_img, (0, (width - height) // 2)) return result else: result = Image.new(pil_img.mode, (height, height), background_color) result.paste(pil_img, ((height - width) // 2, 0)) return result
a991fa4a7a877334bba9e93126e4c8561c27e6f7
3,640,488
def _convert_steplist_to_string(step_data): """Converts list of step data into a single string. Parameters ---------- step_data : list List of step data Returns ------- str A space delimited string where every 6th value is followed by a newline. """ text = '' for i, datum in enumerate(step_data): if i == 0: text += f'\n{datum}\n' else: if i%6 == 0: text += f'{datum}\n' else: text += f'{datum} ' return text
112495edbafc3db39946d7abeefff6466e2dff94
3,640,489
from typing import Optional def get_global_public_delegated_prefix(project: Optional[str] = None, public_delegated_prefix: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalPublicDelegatedPrefixResult: """ Returns the specified global PublicDelegatedPrefix resource. """ __args__ = dict() __args__['project'] = project __args__['publicDelegatedPrefix'] = public_delegated_prefix if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:compute/beta:getGlobalPublicDelegatedPrefix', __args__, opts=opts, typ=GetGlobalPublicDelegatedPrefixResult).value return AwaitableGetGlobalPublicDelegatedPrefixResult( creation_timestamp=__ret__.creation_timestamp, description=__ret__.description, fingerprint=__ret__.fingerprint, ip_cidr_range=__ret__.ip_cidr_range, is_live_migration=__ret__.is_live_migration, kind=__ret__.kind, name=__ret__.name, parent_prefix=__ret__.parent_prefix, public_delegated_sub_prefixs=__ret__.public_delegated_sub_prefixs, region=__ret__.region, self_link=__ret__.self_link, status=__ret__.status)
7c48f4ccce1fb1640d3e3851d9e5481a9dd6a281
3,640,490
def has_conformer(molecule, check_two_dimension=False): """ Check if conformer exists for molecule. Return True or False Parameters ---------- molecule check_two_dimension: bool, optional. Default False If True, will also check if conformation is a 2D conformation (all z coordinates are zero) and return False if conformation is 2D Returns ------- """ conformer_bool = True try: if molecule.NumConfs() <= 1: # Check if xyz coordinates are not zero for conf in molecule.GetConfs(): # print(conf.GetCoords().__len__()) # coords = molecule.GetCoords() # values = np.asarray(list(coords.values())) # print(values) # print(values.all()) # if not values.all(): # conformer_bool = False #for i in range(conf.GetCoords().__len__()): values = np.asarray([conf.GetCoords().__getitem__(i) == (0.0, 0.0, 0.0) for i in conf.GetCoords()]) if values.all(): conformer_bool = False except AttributeError: conformer_bool = False if conformer_bool and check_two_dimension: for conf in molecule.GetConfs(): values = np.asarray([conf.GetCoords().__getitem__(i)[-1] == 0.0 for i in conf.GetCoords()]) if values.all(): conformer_bool = False return conformer_bool
fd0501a70f3ad002612be7d0625678ccc9f24dc9
3,640,491
def pad_sequence(yseqs, batch_first=False, padding_value=0): """Numpy implementation of torch.pad_sequence Args: yseqs (np.ndarray): List of array. (B, *) batch_first (bool): padding_value (int, optional): Padding value. Defaults to 0. Returns: np.ndarray Examples: >>> a = np.ones(25, 300) >>> b = np.ones(22, 300) >>> c = np.ones(15, 300) >>> pad_sequence([a, b, c]).size() (25, 3, 300) >>> pad_sequence([a, b, c], batch_first=True).size() (3, 25, 300) """ if len(yseqs) == 1: return np.array(yseqs) max_idx = np.argmax([y.shape[0] for y in yseqs]) max_shape = yseqs[max_idx].shape base = np.ones((len(yseqs), *max_shape)) * padding_value for i, y in enumerate(yseqs): base[i][:y.shape[0]] = y if batch_first: return base else: return base.transpose(1, 0, *np.arange(2, len(base.shape)))
42fe65a15b39227a31b6022f8dae84cabd1888fb
3,640,492
import re def parse_transceiver_dom_sensor(output_lines): """ @summary: Parse the list of transceiver from DB table TRANSCEIVER_DOM_SENSOR content @param output_lines: DB table TRANSCEIVER_DOM_SENSOR content output by 'redis' command @return: Return parsed transceivers in a list """ result = [] p = re.compile(r"TRANSCEIVER_DOM_SENSOR\|(Ethernet\d+)") for line in output_lines: m = p.match(line) assert m, "Unexpected line %s" % line result.append(m.group(1)) return result
367d6a744add04e7649c971ef8fec3788ed8db88
3,640,495
import math def superimposition_matrix( v0: np.ndarray, v1: np.ndarray, scaling: bool = False, usesvd: bool = True ) -> np.ndarray: """ Return matrix to transform given vector set into second vector set. Args: ---- v0: shape (3, *) or (4, *) arrays of at least 3 vectors. v1: shape (3, *) or (4, *) arrays of at least 3 vectors. scaling: True scaling is desired. usesvd: True if SVD decomposition is used. If usesvd is True, the weighted sum of squared deviations (RMSD) is minimized according to the algorithm by W. Kabsch [8]. Otherwise the quaternion based algorithm by B. Horn [9] is used (slower when using this Python implementation). The returned matrix performs rotation, translation and uniform scaling (if specified). """ v0 = np.array(v0, dtype=np.float64, copy=False)[:3] v1 = np.array(v1, dtype=np.float64, copy=False)[:3] if v0.shape != v1.shape or v0.shape[1] < 3: raise ValueError('Vector sets are of wrong shape or type.') # move centroids to origin t0 = np.mean(v0, axis=1) t1 = np.mean(v1, axis=1) v0 = v0 - t0.reshape(3, 1) v1 = v1 - t1.reshape(3, 1) if usesvd: # Singular Value Decomposition of covariance matrix u, s, vh = np.linalg.svd(np.dot(v1, v0.T)) # rotation matrix from SVD orthonormal bases R = np.dot(u, vh) if np.linalg.det(R) < 0.0: # R does not constitute right handed system R -= np.outer(u[:, 2], vh[2, :]*2.0) s[-1] *= -1.0 # homogeneous transformation matrix M = np.identity(4) M[:3, :3] = R else: # compute symmetric matrix N xx, yy, zz = np.sum(v0 * v1, axis=1) xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1) xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1) N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx), (yz-zy, xx-yy-zz, xy+yx, zx+xz), (zx-xz, xy+yx, -xx+yy-zz, yz+zy), (xy-yx, zx+xz, yz+zy, -xx-yy+zz)) # quaternion: eigenvector corresponding to most positive eigenvalue l, V = np.linalg.eig(N) q = V[:, np.argmax(l)] q /= vector_norm(q) # unit quaternion q = np.roll(q, -1) # move w component to end # homogeneous transformation matrix M = quaternion_matrix(q) # scale: ratio of rms deviations from centroid if scaling: v0 *= v0 v1 *= v1 M[:3, :3] *= math.sqrt(np.sum(v1) / np.sum(v0)) # translation M[:3, 3] = t1 T = np.identity(4) T[:3, 3] = -t0 M = np.dot(M, T) return M
a83fb9532a59cffdd986c364825c32fa682a45dc
3,640,496
def get_graph_metadata(graph_id: int): """Returns the metadata for a single graph. This is automatically generated by the datasource classes. Parameters ---------- graph_id : int Graph ID. Returns 404 if the graph ID is not found Returns ------- Dict A dictionary representing the metadata of the current graph. """ graph_obj = Graph.query.filter_by(id=graph_id).first() if not graph_obj: return make_response(jsonify({"message": "Graph not found"}), 404) response = jsonify(graph_obj.meta) return response
a3eb61fcaf901d8caa47da345a8279e4e7058a84
3,640,497
def username_exists(username, original=""): """Returns true if the given username exists.""" return username != original and User.objects.filter(username=username).count() > 0
16f9a53922d0141459327e79aba5678af9446536
3,640,498
def set_n_jobs(n_jobs: int, x_df: pd.DataFrame) -> int: """ Sets the number of n_jobs, processes to run in parallel. If n_jobs is not specified, the max number of CPUs is used. If n_jobs is set to a higher amount than the number of observations in x_df, n_jobs is rebalanced to match the length of x_df. :param n_jobs: number of jobs to run in parallel :param x_df: x dataframe :return: number of jobs to run in parallel, using the above logic """ if not n_jobs: n_jobs = mp.cpu_count() if n_jobs > len(x_df): n_jobs = len(x_df) return n_jobs
e081f0f2ee6ceeac7587cb362c62ffef0a114a56
3,640,499
def group_node_intro_times(filt, groups, n_sents): """ Returns lists of addition times of nodes into particular groups """ devs = [[] for _ in range(len(set(groups)))] for i in range(len(groups)): intro = int(filt[i, i]) devs[groups[i]].append(intro/n_sents) # still normalize addition time return devs
b5da0e97c76683201a9b81fce1b1f1c7f25e4d6d
3,640,500
def svn_client_version(): """svn_client_version() -> svn_version_t const *""" return _client.svn_client_version()
2ffab063bce4e32010eb1f3aa57306e60a0f2417
3,640,501
def getportnum(port): """ Accepts a port name or number and returns the port number as an int. Returns -1 in case of invalid port name. """ try: portnum = int(port) if portnum < 0 or portnum > 65535: logger.error("invalid port number: %s" % port) portnum = -1 except: try: p = socket.getservbyname(port) portnum = int(p) except socket.error, e: logger.error("%s: %s" % (e, port)) portnum = -1 return portnum
7a5e287a0014afc1fa2933fcaae389a3c8aa50e8
3,640,504
def getParafromMinibatchModel(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True): """ Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number of training examples = 1080) Y_train -- test set, of shape (output size = 6, number of training examples = 1080) X_test -- training set, of shape (input size = 12288, number of training examples = 120) Y_test -- test set, of shape (output size = 6, number of test examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set) n_y = Y_train.shape[0] # n_y : output size costs = [] # To keep track of the cost # Create Placeholders of shape (n_x, n_y) X, Y = create_placeholders(n_x, n_y) # Initialize parameters parameters = initialize_parameters() # Forward propagation: Build the forward propagation in the tensorflow graph z3 = forward_propagation(X, parameters) # Cost function: Add cost function to tensorflow graph cost = compute_cost(z3, Y) # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost) # Initialize all the variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): minibatch_cost = 0. num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set minibatches = random_mini_batches(X_train, Y_train, minibatch_size) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y). _ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) minibatch_cost += temp_cost / num_minibatches # Print the cost every epoch if print_cost == True and epoch % 100 == 0: print ("Cost after epoch %i: %f" % (epoch, minibatch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(minibatch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # lets save the parameters in a variable parameters = sess.run(parameters) print ("Parameters have been trained!") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train})) print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test})) return parameters
2716d4a02b0ce5a0a7a640f80a51f180d280eb0d
3,640,505
def add_land( ax=None, scale="10m", edgecolor=None, facecolor=None, linewidth=None, **kwargs ): """Add land to an existing map Parameters ---------- ax : matplotlib axes object, optional scale : str, optional Resolution of NaturalEarth data to use ('10m’, ‘50m’, or ‘110m'). edgecolor : str or tuple, optional Color to use for the landmass edges. facecolor : str or tuple, optional Color to use for the landmass faces. linewidth : float, optional Width of land edge in points Other Parameters ---------------- Keyword args are passed on to NaturalEarthFeature. Returns ------- FeatureArtist """ if ax is None: ax = plt.gca() edgecolor = edgecolor or plt.rcParams.get( "pyseas.border.color", props.dark.border.color ) facecolor = facecolor or plt.rcParams.get( "pyseas.land.color", props.dark.land.color ) linewidth = linewidth or plt.rcParams.get("pyseas.border.linewidth", 0.4) land = cfeature.NaturalEarthFeature( "physical", "land", scale, edgecolor=edgecolor, facecolor=facecolor, linewidth=linewidth, **kwargs, ) return ax.add_feature(land)
c2a5e97a7e6cb76ffe4a70b754fe86c59b71eb05
3,640,506
import CLSIDToClass def EnsureDispatch(prog_id, bForDemand = 1): # New fn, so we default the new demand feature to on! """Given a COM prog_id, return an object that is using makepy support, building if necessary""" disp = win32com.client.Dispatch(prog_id) if not disp.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it. try: ti = disp._oleobj_.GetTypeInfo() disp_clsid = ti.GetTypeAttr()[0] tlb, index = ti.GetContainingTypeLib() tla = tlb.GetLibAttr() mod = EnsureModule(tla[0], tla[1], tla[3], tla[4], bForDemand=bForDemand) GetModuleForCLSID(disp_clsid) # Get the class from the module. disp_class = CLSIDToClass.GetClass(str(disp_clsid)) disp = disp_class(disp._oleobj_) except pythoncom.com_error: raise TypeError("This COM object can not automate the makepy process - please run makepy manually for this object") return disp
9f9ed2d87ab5c0329ce729a4fca3078daf6e8d17
3,640,507
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): """Private function used to compute log probabilities within a job.""" n_samples = X.shape[0] log_proba = np.empty((n_samples, n_classes)) log_proba.fill(-np.inf) all_classes = np.arange(n_classes, dtype=np.int) for estimator, features in zip(estimators, estimators_features): log_proba_estimator = estimator.predict_log_proba(X[:, features]) if n_classes == len(estimator.classes_): log_proba = np.logaddexp(log_proba, log_proba_estimator) else: log_proba[:, estimator.classes_] = np.logaddexp( log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))]) missing = np.setdiff1d(all_classes, estimator.classes_) log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf) return log_proba
a42510017d8b14ddf8f97de5902f6e1fb223da0d
3,640,508
def sort_coords(coords: np.ndarray) -> np.ndarray: """Sort coordinates based on the angle with first coord from the center. Args: coords (np.ndarray): Coordinates to be sorted. The format of coords is as follows. np.array([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]] Returns: np.ndarray for sorted coordinates. """ if len(coords[0]) != 3: raise ValueError("Only valid for 3D vector") center = np.average(coords, axis=0) relative_coords = coords - center external_prod = np.cross(relative_coords[0], relative_coords[1]) if abs(np.linalg.norm(external_prod)) < 1e-8: # Skip parallel vectors. external_prod = np.cross(relative_coords[0], relative_coords[2]) normal_to_12_plane = external_prod / np.linalg.norm(external_prod) v0 = relative_coords[0] / np.linalg.norm(relative_coords[0]) def angle_between_v0(index: int) -> float: """ Args: index (int): index of coords. Returns (float): Angle between rays from the center to rel_coords[0] and rel_coords[int]. """ v = relative_coords[index] / np.linalg.norm(relative_coords[index]) matrix = concatenate(([v0], [v], [normal_to_12_plane]), axis=0) determinant = det(matrix) angle = arctan2(clip(dot(v0, v), -1.0, 1.0), determinant) return angle indices = [i for i in range(len(coords))] indices.sort(key=angle_between_v0) return coords[indices]
a05390e56e57e66e6f288096fd4583bee26da88f
3,640,510
import warnings def to_fraction(value, den_limit=65536): """ Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a (numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting the denominator to the range 0 < n <= *den_limit* (which defaults to 65536). """ try: # int, long, or fraction n, d = value.numerator, value.denominator except AttributeError: try: # float n, d = value.as_integer_ratio() except AttributeError: try: n, d = value.num, value.den except AttributeError: try: # tuple n, d = value warnings.warn( PiCameraDeprecated( "Setting framerate or gains as a tuple is " "deprecated; please use one of Python's many " "numeric classes like int, float, Decimal, or " "Fraction instead")) except (TypeError, ValueError): # try and convert anything else to a Fraction directly value = Fraction(value) n, d = value.numerator, value.denominator # Ensure denominator is reasonable if d == 0: raise PiCameraValueError("Denominator cannot be 0") elif d > den_limit: return Fraction(n, d).limit_denominator(den_limit) else: return Fraction(n, d)
6ee8c13ab17e08480f13012a834d5d928a7d4f51
3,640,511
def find_closest(myList, myNumber): """ Returns closest value to myNumber. If two numbers are equally close, return the smallest number. # adapted from # https://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value """ sortList = sorted(myList) # print(sortList) pos = bisect_left(sortList, myNumber) if pos == 0: return sortList[0] if pos == len(sortList): return sortList[-1] before = sortList[pos - 1] after = sortList[pos] if after - myNumber < myNumber - before: return after else: return before
0e4b6e2932aa4bb1886627e831d90d3a339b73b6
3,640,512
from pathlib import Path def remove_uuid_file(file_path, dry=False): """ Renames a file without the UUID and returns the new pathlib.Path object """ file_path = Path(file_path) name_parts = file_path.name.split('.') if not is_uuid_string(name_parts[-2]): return file_path name_parts.pop(-2) new_path = file_path.parent.joinpath('.'.join(name_parts)) if not dry and file_path.exists(): file_path.replace(new_path) return new_path
f2c8aa77595081ff968596340b45f61c490d16ec
3,640,513
def get_9x9x9_scramble(n=120): """ Gets a random scramble (SiGN notation) of length `n` for a 9x9x9 cube. """ return _MEGA_SCRAMBLER.call("megaScrambler.get999scramble", n)
7f5d11ad8cec05de5165fa0f90da40a7d9f17d97
3,640,514
import re def youku(link): """Find youku player URL.""" pattern = r'http:\/\/v\.youku\.com\/v_show/id_([\w]+)\.html' match = re.match(pattern, link) if not match: return None return 'http://player.youku.com/embed/%s' % match.group(1)
efcf1394cc02503a1ae18d91abee34777958e545
3,640,515
from typing import Optional def get_default_service_account(project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDefaultServiceAccountResult: """ Use this data source to retrieve default service account for this project :param str project: The project ID. If it is not provided, the provider project is used. """ __args__ = dict() __args__['project'] = project if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount', __args__, opts=opts, typ=GetDefaultServiceAccountResult).value return AwaitableGetDefaultServiceAccountResult( display_name=__ret__.display_name, email=__ret__.email, id=__ret__.id, name=__ret__.name, project=__ret__.project, unique_id=__ret__.unique_id)
0d0c859771a11fe9a0772b9dd4aa2597a9081bd3
3,640,516
def combinationSum(candidates, target): """ :type candidates: List[int] :type target: int :rtype: List[List[int]] """ result = [] candidates = sorted(candidates) def dfs(remain, stack): if remain == 0: result.append(stack) return for item in candidates: if item > remain: break if stack and item < stack[-1]: continue else: dfs(remain - item, stack + [item]) dfs(target, []) return result
e8739c196c84aa7d15712ba1007e602a330fd625
3,640,517
import re def get_Xy(sentence): """将 sentence 处理成 [word1, w2, ..wn], [tag1, t2, ...tn]""" words_tags = re.findall('(.)/(.)', sentence) if words_tags: words_tags = np.asarray(words_tags) words = words_tags[:, 0] tags = words_tags[:, 1] return words, tags # 所有的字和tag分别存为 data / label return None
9d850f74af6417c0172cb944b0e1ce4e3d931a96
3,640,519
def create_gap_token(rowidx=None): """returns a gap Token Parameters ---------- rowidx: int (Optional) row id Returns ------- Token """ return TT.Token(token_type=SupportedDataTypes.GAP, value='', rowidx=rowidx)
08f18bfcbf54e8861c684943111e22c33af2c69f
3,640,520
def get_local_bricks(volume: str) -> Result: """ Return all bricks that are being served locally in the volume volume: Name of the volume to get local bricks for """ vol_info = volume_info(volume) if vol_info.is_err(): return Err(vol_info.value) local_ip = get_local_ip() local_brick_list = [] for volume in vol_info.value: for brick in volume.bricks: if brick.peer.hostname == local_ip: local_brick_list.append(brick) return Ok(local_brick_list)
d49db6aac12d976a1cfbd72540862be6406f85c9
3,640,521
def unionWCT(m=6, n=6): """ @ worst-case family union where @m>=2 and n>=2 and k=3 :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 2 or m < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) d1.addTransition(0, "a", 1) d1.addTransition(0, "c", 0) for i in range(1, m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", 0) d1.addTransition(i, "c", i) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(0, "a", 0) d2.addTransition(0, "b", 1) for i in range(1, n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", 1) return d1, d2
2b93c22e380c0ed52db2c9dfb9042785541b5885
3,640,522
def week_changes (after, before, str_dates, offset = 0, limit = 3) : """Yield all elements of `str_dates` closest to week changes.""" return unit_changes (after, before, str_dates, "week", offset, limit)
0b744db3f2cc581ea1fd2c59bbdc569339b88737
3,640,523