content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def histtab(items, headers=None, item="item", count="count", percent="percent", cols=None): """Make a histogram table.""" if cols is not None: # items is a Table. items = items.as_tuples(cols=cols) if headers is None: headers = cols + [count, percent] if headers is None: headers = [item, count, percent] h = util.hist_dict(items) tab = Table(headers=headers) tot = float(sum(h.itervalues())) hist_items = h.items() if cols is not None: for key, val in hist_items: row = dict(zip(cols, key)) row[count] = val tab.append(row) else: for key, val in hist_items: tab.append({item: key, count: val}) if percent is not None: for i, (key, val) in enumerate(hist_items): tab[i][percent] = val / tot tab.sort(col=count, reverse=True) return tab
ca6bc51d80a179f693ca84fa27753908dcf30ca8
3,647,184
from rdkit import Chem def read_sdf_to_mol(sdf_file, sanitize=False, add_hs=False, remove_hs=False): """Reads a list of molecules from an SDF file. :param add_hs: Specifies whether to add hydrogens. Defaults to False :type add_hs: bool :param remove_hs: Specifies whether to remove hydrogens. Defaults to False :type remove_hs: bool :param sanitize: Specifies whether to sanitize the molecule. Defaults to False :type sanitize: bool :return: list of molecules in RDKit format. :rtype: list[rdkit.Chem.rdchem.Mol] """ suppl = Chem.SDMolSupplier(sdf_file, sanitize=sanitize, removeHs=remove_hs) molecules = [mol for mol in suppl] if add_hs: for mol in molecules: if mol is not None: mol = Chem.AddHs(mol, addCoords=True) return molecules
c1917b5dcdad3a88bfd2b181e6c1e757393a4de8
3,647,185
import math def search_and_score(milvus_collection_name, mongo_name, field_name, vectors, topk, nprobe, inner_score_mode: str): """ search vectors from milvus and score by inner field score mode :param milvus_collection_name: collection name will be search :param mongo_name: mongo collection name will be selected from :param field_name: field name for searching from mongodb :param vectors: vectors which will be searched in milvus :param topk: milvus topk number :param nprobe: milvus nprobe number :param inner_score_mode: :return: image id of entity """ result_dbs = [] MAX_TOPK = 2048 magic_number = 60 increase_rate = 0.1 query_topk = topk + magic_number end_flag = False try: inner_score_mode = InnerFieldScoreMode(inner_score_mode) except Exception as e: raise WrongInnerFieldModeError("Unsupported inner field mode", e) while (len(result_dbs) < topk) and (not end_flag): # check query topk max value query_topk = min(query_topk, MAX_TOPK) vids = MilvusIns.search_vectors(milvus_collection_name, vectors, topk=query_topk, nprobe=nprobe) if len(vids) == 0: raise NoneVectorError("milvus search result is None", "") # filter -1 and if exist -1 or len(vids) < topk if (-1 in vids.id_array[0]) or len(vids[0]) < query_topk: end_flag = True # inner field score function here res_vids = get_inner_field_score_result(vids, query_topk, inner_score_mode) if len(res_vids) < topk: if query_topk < MAX_TOPK: # calc a new query_topk and needn't to query from mysql query_topk += math.ceil(query_topk * increase_rate) increase_rate *= 2 if not end_flag: continue end_flag = True result_dbs = MongoIns.search_by_vector_id(mongo_name, field_name, res_vids) # calc a new query_topk if len(result_dbs) < topk query_topk += math.ceil(query_topk * increase_rate) return result_dbs[:topk]
885d0e912e76a379dbb55b15945f898c419254bc
3,647,186
def fix_simulation(): """ Create instance of Simulation class.""" return Simulation()
fc36a880342bb6e6be6e6735c3ebd09891d09502
3,647,187
def build_tree(vectors, algorithm='kd_tree', metric='minkowski', **kwargs): """Build NearestNeighbors tree.""" kwargs.pop('algorithm', None) kwargs.pop('metric', None) return NearestNeighbors(algorithm=algorithm, metric=metric, **kwargs).fit(vectors)
42df608eb0e4e5f420bd0a8391ad748b18eb5f4f
3,647,188
def _expectedValues(): """ These values are expected for well exposed spot data. The dictionary has a tuple for each wavelength. Note that for example focus is data set dependent and should be used only as an indicator of a possible value. keys: l600, l700, l800, l890 tuple = [radius, focus, widthx, widthy] """ out = dict(l600=(0.45, 0.40, 0.34, 0.32), l700=(0.47, 0.40, 0.32, 0.31), l800=(0.49, 0.41, 0.30, 0.30), l800l=(0.49, 0.41, 0.27, 0.27), l800m=(0.49, 0.41, 0.30, 0.30), l800h=(0.49, 0.41, 0.31, 0.31), l890=(0.54, 0.38, 0.29, 0.29)) return out
7ddd7031313ac5c90f022a6a60c81ad12b4d5dac
3,647,189
import time def storyOne(player): """First Story Event""" player.story += 1 clear() print("The dust gathers around, swirling, shaking, taking some sort of shape.") time.sleep(2) print("Its the bloody hermit again!") time.sleep(2) clear() print("Hermit: Greetings, " + str(player.name) + ". It is good to see you.") print(str(player.name) + ": Really? You still alive?") time.sleep(5) clear() print("Hermit: Shut up.\n\nAlso, incidentally, I'm here to warn you. The world has noticed you... Your progress will become... Difficult.") time.sleep(4) clear() print("Hermit: Now, a choice awaits you. I have the power to offer you a gift!") time.sleep(2) clear() print("0: A better weapon.") print("1: Better armor.") print("2: A better enchantment.") print("3: A rank increase.") choice = input("Enter a number between 0 and 3: ") if choice == "0": player.weapon += 1 elif choice == "1": player.armor += 1 elif choice == "2": player.enchantment += 1 elif choice == "3": player.level += 1 else: pass clear() print("Hermit: Excellent!") print(kill_hermit()) time.sleep(4) clear() return True
fbaff47f27c5ec474caa73d2f87d77029f4814a4
3,647,190
import h5py import torch import tensorflow from typing import Dict from typing import Union def get_optional_info() -> Dict[str, Union[str, bool]]: """Get optional package info (tensorflow, pytorch, hdf5_bloscfilter, etc.) Returns ------- Dict[str, Union[str, False]] package name, package version (if installed, otherwise False) """ res = {} try: bloscFilterAvail = h5py.h5z.filter_avail(32001) except ImportError: # pragma: no cover bloscFilterAvail = False res['blosc-hdf5-plugin'] = bloscFilterAvail try: torchVersion = torch.__version__ except ImportError: # pragma: no cover torchVersion = False res['pytorch'] = torchVersion try: tensorflowVersion = tensorflow.__version__ except ImportError: # pragma: no cover tensorflowVersion = False res['tensorflow'] = tensorflowVersion return res
07255d4e889b669497628cd9b9c6e102ceb22bbf
3,647,192
import timeit def epsilon_experiment(dataset, n: int, eps_values: list): """ Function for the experiment explained in part (g). eps_values is a list, such as: [0.0001, 0.001, 0.005, 0.01, 0.05, 0.1, 1.0] Returns the errors as a list: [9786.5, 1234.5, ...] such that 9786.5 is the error when eps = 0.0001, 1234.5 is the error when eps = 0.001, and so forth. """ timer_list = [] total_errors = [] non_private_histogram = get_histogram(dataset) for epsilon in eps_values: start = timeit.default_timer() error_list = [] for _ in range(30): dp_histogram = get_dp_histogram(dataset, n, epsilon) av_error = calculate_average_error(non_private_histogram, dp_histogram) error_list.append(av_error) total_average_error = sum(error_list) / len(error_list) total_errors.append(total_average_error) stop = timeit.default_timer() timer_list.append(stop-start) return total_errors, timer_list
7aabe10dd97f594533a2da4a901b61790c8435f8
3,647,193
def infer_scaletype(scales): """Infer whether `scales` is linearly or exponentially distributed (if latter, also infers `nv`). Used internally on `scales` and `ssq_freqs`. Returns one of: 'linear', 'log', 'log-piecewise' """ scales = asnumpy(scales).reshape(-1, 1) if not isinstance(scales, np.ndarray): raise TypeError("`scales` must be a numpy array (got %s)" % type(scales)) elif scales.dtype not in (np.float32, np.float64): raise TypeError("`scales.dtype` must be np.float32 or np.float64 " "(got %s)" % scales.dtype) th_log = 1e-15 if scales.dtype == np.float64 else 4e-7 th_lin = th_log * 1e3 # less accurate for some reason if np.mean(np.abs(np.diff(scales, 2, axis=0))) < th_lin: scaletype = 'linear' nv = None elif np.mean(np.abs(np.diff(np.log(scales), 2, axis=0))) < th_log: scaletype = 'log' # ceil to avoid faulty float-int roundoffs nv = int(np.round(1 / np.diff(np.log2(scales), axis=0)[0])) elif logscale_transition_idx(scales) is None: raise ValueError("could not infer `scaletype` from `scales`; " "`scales` array must be linear or exponential. " "(got diff(scales)=%s..." % np.diff(scales, axis=0)[:4]) else: scaletype = 'log-piecewise' nv = nv_from_scales(scales) return scaletype, nv
50e961118a3c97835d279832b399ef72946f4b4a
3,647,194
from googleapiclient.http import build_http import google def authorized_http(credentials): """Returns an http client that is authorized with the given credentials. Args: credentials (Union[ google.auth.credentials.Credentials, oauth2client.client.Credentials]): The credentials to use. Returns: Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An authorized http client. """ if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials): if google_auth_httplib2 is None: raise ValueError( "Credentials from google.auth specified, but " "google-api-python-client is unable to use these credentials " "unless google-auth-httplib2 is installed. Please install " "google-auth-httplib2." ) return google_auth_httplib2.AuthorizedHttp(credentials, http=build_http()) else: return credentials.authorize(build_http())
21d7a05e9d99f0a6e8414da5925f0d69224f846c
3,647,195
from typing import Optional from typing import List from typing import Tuple from typing import Callable def add_grating_couplers_with_loopback_fiber_array( component: Component, grating_coupler: ComponentSpec = grating_coupler_te, excluded_ports: Optional[List[str]] = None, grating_separation: float = 127.0, bend_radius_loopback: Optional[float] = None, gc_port_name: str = "o1", gc_rotation: int = -90, straight_separation: float = 5.0, bend: ComponentSpec = bend_euler, straight: ComponentSpec = straight_function, layer_label: Tuple[int, int] = (200, 0), layer_label_loopback: Optional[Tuple[int, int]] = None, component_name: Optional[str] = None, with_loopback: bool = True, nlabels_loopback: int = 2, get_input_labels_function: Callable = get_input_labels, cross_section: CrossSectionSpec = strip, select_ports: Callable = select_ports_optical, **kwargs, ) -> Component: """Returns a component with grating_couplers and loopback. Args: component: to add grating_couplers. grating_coupler: grating_coupler. excluded_ports: list of ports to exclude. grating_separation: in um. bend_radius_loopback: um. gc_port_name: optional grating coupler name. gc_rotation: grating coupler rotation in degrees. straight_separation: bend: bend spec. straight: straight spec. layer_label: optional layer_label. component_name: optional component name. with_loopback: If True, add compact loopback alignment ports. nlabels_loopback: number of ports to label (0: no labels, 1: first port, 2: both ports). cross_section: CrossSectionSpec. select_ports: function to select ports. kwargs: cross_section settings """ x = gf.get_cross_section(cross_section, **kwargs) bend_radius_loopback = bend_radius_loopback or x.radius excluded_ports = excluded_ports or [] gc = gf.get_component(grating_coupler) direction = "S" component_name = component_name or component.metadata_child.get("name") c = Component() c.component = component c.info["polarization"] = gc.info["polarization"] c.info["wavelength"] = gc.info["wavelength"] c.add_ref(component) # Find grating port name if not specified if gc_port_name is None: gc_port_name = list(gc.ports.values())[0].name # List the optical ports to connect optical_ports = select_ports(component.ports) optical_ports = list(optical_ports.values()) optical_ports = [p for p in optical_ports if p.name not in excluded_ports] optical_ports = direction_ports_from_list_ports(optical_ports)[direction] # Check if the ports are equally spaced grating_separation_extracted = check_ports_have_equal_spacing(optical_ports) if grating_separation_extracted != grating_separation: raise ValueError( f"Grating separation must be {grating_separation}. Got {grating_separation_extracted}" ) # Add grating references references = [] for port in optical_ports: gc_ref = c.add_ref(gc) gc_ref.connect(gc.ports[gc_port_name].name, port) references += [gc_ref] labels = get_input_labels_function( io_gratings=references, ordered_ports=optical_ports, component_name=component_name, layer_label=layer_label, gc_port_name=gc_port_name, ) c.add(labels) if with_loopback: y0 = references[0].ports[gc_port_name].y xs = [p.x for p in optical_ports] x0 = min(xs) - grating_separation x1 = max(xs) + grating_separation gca1, gca2 = [ gc.ref(position=(x, y0), rotation=gc_rotation, port_id=gc_port_name) for x in [x0, x1] ] gsi = gc.size_info port0 = gca1.ports[gc_port_name] port1 = gca2.ports[gc_port_name] p0 = port0.position p1 = port1.position a = bend_radius_loopback + 0.5 b = max(2 * a, grating_separation / 2) y_bot_align_route = -gsi.width - straight_separation points = np.array( [ p0, p0 + (0, a), p0 + (b, a), p0 + (b, y_bot_align_route), p1 + (-b, y_bot_align_route), p1 + (-b, a), p1 + (0, a), p1, ] ) bend90 = gf.get_component( bend, radius=bend_radius_loopback, cross_section=cross_section, **kwargs ) loopback_route = round_corners( points=points, bend=bend90, straight=straight, cross_section=cross_section, **kwargs, ) c.add([gca1, gca2]) c.add(loopback_route.references) component_name_loopback = f"loopback_{component_name}" if nlabels_loopback == 1: io_gratings_loopback = [gca1] ordered_ports_loopback = [port0] if nlabels_loopback == 2: io_gratings_loopback = [gca1, gca2] ordered_ports_loopback = [port0, port1] if nlabels_loopback == 0: pass elif 0 < nlabels_loopback <= 2: c.add( get_input_labels_function( io_gratings=io_gratings_loopback, ordered_ports=ordered_ports_loopback, component_name=component_name_loopback, layer_label=layer_label_loopback or layer_label, gc_port_name=gc_port_name, ) ) else: raise ValueError( f"Invalid nlabels_loopback = {nlabels_loopback}, " "valid (0: no labels, 1: first port, 2: both ports2)" ) c.copy_child_info(component) return c
4452851ecc05f46ebf46cd92d6edbea9062bae35
3,647,196
import pytz from datetime import datetime import json def auto_update_function(cities): """Auto-update weather function The function takes a list of the cities to update. If the error connecting to sources - an error with a status of 500 and JSON with the cause of the error and URL. If the connection is successful, it enters the data into the database and returns an empty response with code 200. """ try: connect = psycopg2.connect(database = 'django_test', user = 'roman', host = 'localhost', password = 'admin') cursor = connect.cursor() cursor.execute( 'SELECT city_name FROM frontend_city;' ) utc_timezone = pytz.timezone('UTC') #read current city list from database cities_list = [] cities_cursor = cursor.fetchall() #list of tuple to just list for i in range(len(cities_cursor)): cities_list.append(cities_cursor[i][0]) for i in range(len(cities)): yandex_value = yandex(cities[i]) open_weather_value = open_weather_map(cities[i]) # error in yandex source if type(yandex_value[0]) == error.HTTPError: data = { 'Error': 'Error in auto update function.', 'Time': str(datetime.datetime.now(utc_timezone)), 'Reason': '{}. Please, check url: {}'.format(yandex_value[0], yandex_value[1]) } json_data_error = json.dumps(data) response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8') return response # error in open weather source elif (type(open_weather_value[0]) == error.HTTPError): data = { 'Error': 'Error in auto update function.', 'Time': datetime.datetime.now(utc_timezone), 'Reason': '{}. Please, check url: {}'.format(open_weather_value[0], open_weather_value[1]) } json_data_error = json.dumps(data) response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8') return response #If the city has not been checked before elif (cities[i] not in cities_list): cursor.execute("INSERT INTO frontend_city (city_name) values ('{}');".format(cities[i])) connect.commit() data = { 'Yandex': str(yandex_value[0]), 'Open weather': str(open_weather_value[0]) } cursor.execute("SELECT id FROM frontend_city WHERE city_name = '{}';".format(cities[i])) city_id = cursor.fetchall() city_id = city_id[0][0] json_data = json.dumps(data) cursor.execute( "INSERT INTO frontend_history (city_id, temp_values, created) \ VALUES ({},'{}', '{}');".format(city_id, json_data, datetime.datetime.now(utc_timezone))) connect.commit() connect.close() response = HttpResponse(status=200, content_type='text/html', charset='utf-8') return response except Exception as connection_db_error: data = { 'Error': 'Error in auto update function.', 'Time': str(datetime.datetime.now(utc_timezone)), 'Reason': '{}'.format(connection_db_error) } json_data_error = json.dumps(data) response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8') return response connect.close()
6c57685b1d4a4c62d6225df17dd1bbee6c1a3934
3,647,198
def absolute_sum_of_changes(x): """ Returns the sum over the absolute value of consecutive changes in the series x .. math:: \\sum_{i=1, \ldots, n-1} \\mid x_{i+1}- x_i \\mid :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :return type: float """ return np.sum(abs(np.diff(x)))
b9cc5109335b754d7d6c8014af5d84e75cd94723
3,647,199
def start_v_imp(model, lval: str, rval: str): """ Calculate starting value for parameter in data given data in model. For Imputer -- just copies values from original data. Parameters ---------- model : Model Model instance. lval : str L-value name. rval : str R-value name. Returns ------- float Starting value. """ mx = model.mod.mx_v rows, cols = model.mod.names_v i, j = rows.index(lval), cols.index(rval) v = mx[i, j] return v
113266955e5115b0eb43d32feaa506a2b0c93e14
3,647,201
def get_image_features(X, y, appearance_dim=32): """Return features for every object in the array. Args: X (np.array): a 3D numpy array of raw data of shape (x, y, c). y (np.array): a 3D numpy array of integer labels of shape (x, y, 1). appearance_dim (int): The resized shape of the appearance feature. Returns: dict: A dictionary of feature names to np.arrays of shape (n, c) or (n, x, y, c) where n is the number of objects. """ appearance_dim = int(appearance_dim) # each feature will be ordered based on the label. # labels are also stored and can be fetched by index. num_labels = len(np.unique(y)) - 1 labels = np.zeros((num_labels,), dtype='int32') centroids = np.zeros((num_labels, 2), dtype='float32') morphologies = np.zeros((num_labels, 3), dtype='float32') appearances = np.zeros((num_labels, appearance_dim, appearance_dim, X.shape[-1]), dtype='float32') # iterate over all objects in y props = regionprops(y[..., 0], cache=False) for i, prop in enumerate(props): # Get label labels[i] = prop.label # Get centroid centroid = np.array(prop.centroid) centroids[i] = centroid # Get morphology morphology = np.array([ prop.area, prop.perimeter, prop.eccentricity ]) morphologies[i] = morphology # Get appearance minr, minc, maxr, maxc = prop.bbox appearance = np.copy(X[minr:maxr, minc:maxc, :]) resize_shape = (appearance_dim, appearance_dim) appearance = resize(appearance, resize_shape) appearances[i] = appearance # Get adjacency matrix # distance = cdist(centroids, centroids, metric='euclidean') < distance_threshold # adj_matrix = distance.astype('float32') return { 'appearances': appearances, 'centroids': centroids, 'labels': labels, 'morphologies': morphologies, # 'adj_matrix': adj_matrix, }
fa5cb730227b20b54b8d25270550c9dae9fc1348
3,647,202
def deactivate_text(shell: dict, env_vars: dict) -> str: """Returns the formatted text to write to the deactivation script based on the passed dictionaries.""" lines = [shell["shebang"]] for k in env_vars.keys(): lines.append(shell["deactivate"].format(k)) return "\n".join(lines)
0a75134a55bf9cd8eceb311c48a5547ad373593d
3,647,203
from typing import get_origin def is_dict(etype) -> bool: """ Determine whether etype is a Dict """ return get_origin(etype) is dict or etype is dict
a65af54bf6b24c94906765c895c899b18bf5c1eb
3,647,204
import scipy def t_plot_parameters(thickness_curve, section, loading, molar_mass, liquid_density): """Calculates the parameters from a linear section of the t-plot.""" slope, intercept, corr_coef, p, stderr = scipy.stats.linregress( thickness_curve[section], loading[section]) # Check if slope is good if slope * (max(thickness_curve) / max(loading)) < 3: adsorbed_volume = intercept * molar_mass / liquid_density area = slope * molar_mass / liquid_density * 1000 result_dict = { 'section': section, 'slope': slope, 'intercept': intercept, 'corr_coef': corr_coef, 'adsorbed_volume': adsorbed_volume, 'area': area, } return result_dict return None
46d2f65cac5a424b2054359dc8b083d3a2138cc6
3,647,205
import requests def get_data(stock, start_date): """Fetch a maximum of the 100 most recent records for a given stock starting at the start_date. Args: stock (string): Stock Ticker start_date (int): UNIX date time """ # Build the query string request_url = f"https://api.pushshift.io/reddit/search/comment/?q={stock}&sort=asc&size=100&after={start_date}" # get the query and convert to json result_json = requests.get(request_url).json() return result_json
aafdc913d80346e82a21767cdb7b5e40f2376857
3,647,206
def depart_people(state, goal): """Departs all passengers that can depart on this floor""" departures = [] for departure in state.destin.items(): passenger = departure[0] if passenger in goal.served and goal.served[passenger]: floor = departure[1] if state.lift_at == floor and state.boarded[passenger] and not state.served[passenger]: departures.append(('depart', passenger, state.lift_at)) return departures
f3a18ad9a6f884a57d0be1d0e27b3dfeeb95d736
3,647,207
def get_topic_for_subscribe(): """ return the topic string used to subscribe for receiving future responses from DPS """ return _get_topic_base() + "res/#"
346841c7a11f569a7309b087baf0d621a63b8ae9
3,647,208
from Crypto import Random def generate_AES_key(bytes = 32): """Generates a new AES key Parameters ---------- bytes : int number of bytes in key Returns ------- key : bytes """ try: return Random.get_random_bytes(bytes) except ImportError: print('PyCrypto not install. Reading from /dev/random instead') with open('/dev/random', 'r') as rand: return rand.read(bytes)
4435aeea860bb3bca847156de0626c2cacde93e0
3,647,209
def make_column_kernelizer(*transformers, **kwargs): """Construct a ColumnKernelizer from the given transformers. This is a shorthand for the ColumnKernelizer constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting with ``transformer_weights``. Parameters ---------- *transformers : tuples Tuples of the form (transformer, columns) specifying the transformer objects to be applied to subsets of the data. transformer : {'drop', 'passthrough'} or estimator Estimator must support ``fit`` and ``transform``. Special-cased strings 'drop' and 'passthrough' are accepted as well, to indicate to drop the columns or to pass them through untransformed, respectively. If the transformer does not return a kernel (as informed by the attribute kernelizer=True), a linear kernelizer is applied after the transformer. columns : str, array-like of str, int, array-like of int, slice, \ array-like of bool or callable Indexes the data on its second axis. Integers are interpreted as positional columns, while strings can reference DataFrame columns by name. A scalar string or int should be used where ``transformer`` expects X to be a 1d array-like (vector), otherwise a 2d array will be passed to the transformer. A callable is passed the input data `X` and can return any of the above. To select multiple columns by name or dtype, you can use :obj:`make_column_selector`. remainder : {'drop', 'passthrough'} or estimator, default='drop' By default, only the specified columns in `transformers` are transformed and combined in the output, and the non-specified columns are dropped. (default of ``'drop'``). By specifying ``remainder='passthrough'``, all remaining columns that were not specified in `transformers` will be automatically passed through. This subset of columns is concatenated with the output of the transformers. By setting ``remainder`` to be an estimator, the remaining non-specified columns will use the ``remainder`` estimator. The estimator must support ``fit`` and ``transform``. n_jobs : int, default=None Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. n_jobs does not work with with GPU backends. verbose : bool, default=False If True, the time elapsed while fitting each transformer will be printed as it is completed. Returns ------- column_kernelizer : ColumnKernelizer See also -------- himalaya.kernel_ridge.ColumnKernelizer : Class that allows combining the outputs of multiple transformer objects used on column subsets of the data into a single feature space. Examples -------- >>> import numpy as np >>> from himalaya.kernel_ridge import make_column_kernelizer >>> from himalaya.kernel_ridge import Kernelizer >>> ck = make_column_kernelizer( ... (Kernelizer(kernel="linear"), [0, 1, 2]), ... (Kernelizer(kernel="polynomial"), slice(3, 5))) >>> X = np.array([[0., 1., 2., 2., 3.], [0., 2., 0., 0., 3.], [0., 0., 1., 0., 3.], ... [1., 1., 0., 1., 2.]]) >>> # Kernelize separately the first three columns and the last two >>> # columns, creating two kernels of shape (n_samples, n_samples). >>> ck.fit_transform(X).shape (2, 4, 4) """ # transformer_weights keyword is not passed through because the user # would need to know the automatically generated names of the transformers n_jobs = kwargs.pop('n_jobs', None) remainder = kwargs.pop('remainder', 'drop') verbose = kwargs.pop('verbose', False) if kwargs: raise TypeError('Unknown keyword arguments: "{}"'.format( list(kwargs.keys())[0])) transformer_list = _get_transformer_list(transformers) return ColumnKernelizer(transformer_list, n_jobs=n_jobs, remainder=remainder, verbose=verbose)
cfddec675782a6e70d1921372961abbb7853fa09
3,647,213
def plugin_info(): """ Returns information about the plugin. Args: Returns: dict: plugin information Raises: """ return { 'name': 'PT100 Poll Plugin', 'version': '1.9.2', 'mode': 'poll', 'type': 'south', 'interface': '1.0', 'config': _DEFAULT_CONFIG }
f6d54b5ff64013ae17364db604cf1cb6b5204aba
3,647,214
def get_engine(isolation_level=None): """ Creates an engine with the given isolation level. """ # creates a shallow copy with the given isolation level if not isolation_level: return _get_base_engine() else: return _get_base_engine().execution_options(isolation_level=isolation_level)
32e055b2a4a1d0e7ecbc591218bb61c721113a09
3,647,215
from selenium.webdriver import PhantomJS def phantomjs_driver(capabilities, driver_path, port): """ Overrides default `phantomjs_driver` driver from pytest-selenium. Default implementation uses ephemeral ports just as our tests but it doesn't provide any way to configure them, for this reason we basically recreate the driver fixture using port fixture. """ kwargs = {} if capabilities: kwargs['desired_capabilities'] = capabilities if driver_path is not None: kwargs['executable_path'] = driver_path kwargs['port'] = port.get() return PhantomJS(**kwargs)
5c6453f4d753cd765fa7c9fff47b61c6c6efac04
3,647,216
import time def parse(address, addr_spec_only=False, strict=False, metrics=False): """ Given a string, returns a scalar object representing a single full mailbox (display name and addr-spec), addr-spec, or a url. If parsing the entire string fails and strict is not set to True, fall back to trying to parse the last word only and assume everything else is the display name. Returns an Address object and optionally metrics on processing time if requested. Examples: >>> address.parse('John Smith <john@smith.com') John Smith <john@smith.com> >>> print address.parse('John <john@smith.com>', addr_spec_only=True) None >>> print address.parse('john@smith.com', addr_spec_only=True) 'john@smith.com' >>> address.parse('http://host.com/post?q') http://host.com/post?q >>> print address.parse('foo') None """ mtimes = {'parsing': 0} if addr_spec_only: parser = addr_spec_parser else: parser = mailbox_or_url_parser # normalize inputs to bytestrings if isinstance(address, unicode): address = address.encode('utf-8') # sanity checks if not address: return None, mtimes if len(address) > MAX_ADDRESS_LENGTH: _log.warning('address exceeds maximum length of %s', MAX_ADDRESS_LENGTH) return None, mtimes bstart = time() try: parse_rs = parser.parse(address.strip(), lexer=lexer.clone()) addr_obj = _lift_parse_result(parse_rs) except (LexError, YaccError, SyntaxError): addr_obj = None if addr_obj is None and not strict: addr_parts = address.split(' ') addr_spec = addr_parts[-1] if len(addr_spec) < len(address): try: parse_rs = parser.parse(addr_spec, lexer=lexer.clone()) addr_obj = _lift_parse_result(parse_rs) if addr_obj: addr_obj._display_name = ' '.join(addr_parts[:-1]) if isinstance(addr_obj._display_name, str): addr_obj._display_name = addr_obj._display_name.decode('utf-8') except (LexError, YaccError, SyntaxError): addr_obj = None mtimes['parsing'] = time() - bstart return addr_obj, mtimes
b6516d530892a7db405b816987598ce53a0dc776
3,647,219
import requests def unfreeze_file(user, data): """ unfreeze a file. :return: status code, response data """ r = requests.post('%s/unfreeze' % URL, json=data, auth=(user, PASS), verify=False) return r.status_code, r.json()
4ee59dd44f42685a02907dec766dc8026f939da2
3,647,220
def prompt_url(q): """ :param q: The prompt to display to the user :return: The user's normalized input. We ensure there is an URL scheme, a domain, a "/" path, and no trailing elements. :rtype: str """ return prompt(q, _url_coerce_fn)
dfe810a4552c880d71efabffb2f9167bfce0ad8a
3,647,221
def eval_mnl_logsums(choosers, spec, locals_d, trace_label=None): """ like eval_nl except return logsums instead of making choices Returns ------- logsums : pandas.Series Index will be that of `choosers`, values will be logsum across spec column values """ trace_label = tracing.extend_trace_label(trace_label, 'mnl') check_for_variability = tracing.check_for_variability() print("running eval_mnl_logsums") expression_values = eval_variables(spec.index, choosers, locals_d) if check_for_variability: _check_for_variability(expression_values, trace_label) # utility values utilities = compute_utilities(expression_values, spec) # logsum is log of exponentiated utilities summed across # columns of each chooser row utils_arr = utilities.as_matrix().astype('float') logsums = np.log(np.exp(utils_arr).sum(axis=1)) logsums = pd.Series(logsums, index=choosers.index) if trace_label: # add logsum to utilities for tracing utilities['logsum'] = logsums tracing.trace_df(choosers, '%s.choosers' % trace_label) tracing.trace_df(utilities, '%s.utilities' % trace_label, column_labels=['alternative', 'utility']) tracing.trace_df(logsums, '%s.logsums' % trace_label, column_labels=['alternative', 'logsum']) tracing.trace_df( expression_values, '%s.expression_values' % trace_label, column_labels=['expression', None]) return logsums
d9b00c2f5f436a0825cbe3bdd60c6b2257c769b3
3,647,222
def find_zeroed_indices(adjusted, original): """Find the indices of the values present in ``original`` but missing in ``adjusted``. Parameters ---------- adjusted: np.array original: array_like Returns ------- Tuple[np.ndarray] Indices of the values present in ``original`` but missing in ``adjusted``. """ if sp.issparse(original): i, j, v = sp.find(original) # Use hash maps to figure out which indices have been lost in the original original_indices = set(zip(i, j)) adjusted_indices = set(zip(*np.where(~adjusted.mask))) zeroed_indices = original_indices - adjusted_indices # Convert our hash map of coords into the standard numpy indices format indices = list(zip(*zeroed_indices)) indices = tuple(map(np.array, indices)) return indices else: original = np.ma.masked_array(original, mask=original <= 0) return np.where(adjusted.mask & ~original.mask)
c01b91ec8be0d1bc22aad9042328a451b7424996
3,647,223
def inventory_update(arr1, arr2): """Add the inventory from arr2 to arr1. If an item exists in both arr1 and arr2, then the quantity of the item is updated in arr1. If an item exists in only arr2, then the item is added to arr1. If an item only exists in arr1, then that item remains unaffected. Arguments: arr1: the destination inventory arr2: the inventory to add to the destination inventory Returns: a combined inventory """ # Set longer to the longer of the two arrays longer = arr2 if len(longer) > len(arr1): temp = arr1 arr1 = longer longer = temp # Since longer is potentially modified, set it # to a copy of itself. longer = longer.copy() # Iterate over the shorter array, appending # items that don't exist in the longer array, # or updating the quantity of existing items. for tup in arr1: qty = tup[0] name = tup[1] # Funny way to get the index of an array # object based on the object's own indexed # elements. try: i = [x[1] for x in longer].index(name) except ValueError: i = -1 if i < 0: longer.append(tup) else: longer[i][0] += qty # Man, why doesn't the index function accept a # key argument? Sort on the string description # of each inventory item. longer.sort(key=lambda x: x[1]) return longer
febba1d2dac6c79fabf4e8aaad8c0fd482478b50
3,647,224
import re from bs4 import BeautifulSoup def racaty(url: str) -> str: """ Racaty direct link generator based on https://github.com/SlamDevs/slam-mirrorbot""" dl_url = '' try: link = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Racaty links found\n") scraper = create_scraper() r = scraper.get(url) soup = BeautifulSoup(r.text, "lxml") op = soup.find("input", {"name": "op"})["value"] ids = soup.find("input", {"name": "id"})["value"] rpost = scraper.post(url, data = {"op": op, "id": ids}) rsoup = BeautifulSoup(rpost.text, "lxml") dl_url = rsoup.find("a", {"id": "uniqueExpirylink"})["href"].replace(" ", "%20") return dl_url
8c0df1dd9bf96fcb63be7f59db20ae6c9e4cef00
3,647,225
from typing import List def build_tree(tree, parent, counts, ordered_ids): """ Recursively splits the data, which contained in the tree object itself and is indexed by ordered_ids. Parameters ---------- tree: Tree object parent: TreeNode object The last node added to the tree, which will be the parent of the two nodes resulting from the split (if any) of this function call. counts: numpy array (int) The class counts of the samples reaching the parent node. ordered_ids: numpy array (int) The ids of the samples reaching the parent node. """ root = TreeNode(0, counts, parent, ordered_ids, False) queue = List() queue.append(root) n_nodes = 1 np.random.seed(tree.random_state) while len(queue) > 0: node = queue.pop(0) split = find_best_split(node, tree, np.random.randint(1e6)) if split is not None: node.split = split left_child = TreeNode(n_nodes, split.left_counts, node, split.left_ids, False) node.left_child = left_child queue.append(left_child) n_nodes += 1 right_child = TreeNode(n_nodes, split.right_counts, node, split.right_ids, False) node.right_child = right_child queue.append(right_child) n_nodes += 1 else: node.isleaf = True tree.depth = max(tree.depth, node.depth) return root, n_nodes
8957ef481ef6b2ba02b6e60c97165a25231d89ae
3,647,228
def get_perf_measure_by_group(aif_metric, metric_name): """Get performance measures by group.""" perf_measures = ['TPR', 'TNR', 'FPR', 'FNR', 'PPV', 'NPV', 'FDR', 'FOR', 'ACC'] func_dict = { 'selection_rate': lambda x: aif_metric.selection_rate(privileged=x), 'precision': lambda x: aif_metric.precision(privileged=x), 'recall': lambda x: aif_metric.recall(privileged=x), 'sensitivity': lambda x: aif_metric.sensitivity(privileged=x), 'specificity': lambda x: aif_metric.specificity(privileged=x), 'power': lambda x: aif_metric.power(privileged=x), 'error_rate': lambda x: aif_metric.error_rate(privileged=x), } if metric_name in perf_measures: metric_func = lambda x: aif_metric.performance_measures(privileged=x)[metric_name] elif metric_name in func_dict.keys(): metric_func = func_dict[metric_name] else: raise NotImplementedError df = pd.DataFrame({ 'Group': ['all', 'privileged', 'unprivileged'], metric_name: [metric_func(group) for group in [None, True, False]], }) return df
d4b861c882d6f5502798d211c2ab1322e19cf9b2
3,647,229
from datetime import datetime def hello_world(request): """Return a greeting.""" return HttpResponse('Hello, world!{now}'.format( now=datetime.now().strftime('%b %dth, %Y : %M HttpResponses') ))
bcdf4c504d44883c7afc75c8a76ff052cd0b246d
3,647,230
import mimetypes import zlib def getfile(id, name): """ Retorna um arquivo em anexo. """ mime = mimetypes.guess_type(name)[0] if mime is None: mime = "application/octet-stream" c = get_cursor() c.execute( """ select files.ticket_id as ticket_id, files.size as size, files.contents as contents, tickets.admin_only as admin_only from files join tickets on tickets.id = files.ticket_id where files.id = :id """, {"id": id}, ) row = c.fetchone() blob = zlib.decompress(row["contents"]) if not user_admin(current_user()) and row["admin_only"] == 1: return "você não tem permissão para acessar este recurso!" else: response.content_type = mime return blob
1ce8322301b33a0d6762aa545344d4c0fe38269c
3,647,231
def get_default_wavelet(): """Sets the default wavelet to be used for scaleograms""" global DEFAULT_WAVELET return DEFAULT_WAVELET
0c403b5b7a21bedbd55c0cbd6faa6a3648c3a0cc
3,647,232
def check_output(file_path: str) -> bool: """ This function checks an output file, either from geomeTRIC or from Psi4, for a successful completion keyword. Returns True if the calculation finished successfully, otherwise False. """ with open(file_path, "r") as read_file: text = read_file.read() checks = ["Converged! =D", "Psi4 exiting successfully"] return any([check in text for check in checks])
2f0dea67216aff945b1b0db74e0131022acc3019
3,647,233
def dumps(value): """ Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ if not isinstance(value, TOMLFile): raise RuntimeError( 'Can only dump a TOMLFile instance loaded by load() or loads()' ) return value.dumps()
f92b906b502bc2b0ba2b8bf3840083bafce14086
3,647,234
def calc_graph(dict_graph): """ creates scatter of comfort and curves of constant relative humidity :param dict_graph: contains comfort conditions to plot, output of comfort_chart.calc_data() :type dict_graph: dict :return: traces of scatter plot of 4 comfort conditions :rtype: list of plotly.graph_objs.Scatter """ traces = [] # draw scatter of comfort conditions in building trace = go.Scatter(x=dict_graph['t_op_occupied_winter'], y=dict_graph['x_int_occupied_winter'], name='occupied hours winter', mode='markers', marker=dict(color=COLORS_TO_RGB['red'])) traces.append(trace) trace = go.Scatter(x=dict_graph['t_op_unoccupied_winter'], y=dict_graph['x_int_unoccupied_winter'], name='unoccupied hours winter', mode='markers', marker=dict(color=COLORS_TO_RGB['blue'])) traces.append(trace) trace = go.Scatter(x=dict_graph['t_op_occupied_summer'], y=dict_graph['x_int_occupied_summer'], name='occupied hours summer', mode='markers', marker=dict(color=COLORS_TO_RGB['purple'])) traces.append(trace) trace = go.Scatter(x=dict_graph['t_op_unoccupied_summer'], y=dict_graph['x_int_unoccupied_summer'], name='unoccupied hours summer', mode='markers', marker=dict(color=COLORS_TO_RGB['orange'])) traces.append(trace) return traces
19a277db0f59e2b871130099eab3b714bd5b94b9
3,647,235
def generate_arrays(df, resize=True, img_height=50, img_width=200): """ Generates image array and labels array from a dataframe """ num_items = len(df) images = np.zeros((num_items, img_height, img_width), dtype=np.float32) labels = [0] * num_items for i in range(num_items): input_img = keras.preprocessing.image.load_img(df["img_path"][i], color_mode='grayscale') img_array = keras.preprocessing.image.img_to_array(input_img) if resize: img_array = np.resize(img_array, (img_height, img_width)) img_array = (img_array/255.).astype(np.float32) label = df["label"][i] if is_valid_captcha(label): images[i, :, :] = img_array labels[i] = label return images, np.array(labels)
2a50fd84d5b8da2845205b65cd12f61868bd421d
3,647,237
def compute_cosine_distance(Q, feats, names): """ feats and Q: L2-normalize, n*d """ dists = np.dot(Q, feats.T) # print("dists:",dists) # exit(1) idxs = np.argsort(dists)[::-1] rank_dists = dists[idxs] rank_names = [names[k] for k in idxs] return (idxs, rank_dists, rank_names)
e15007fb6fc73aab27db00d7cf283300077dd1c7
3,647,238
def phase(ifc, inc_pt, d_in, normal, z_dir, wvl, n_in, n_out): """ apply phase shift to incoming direction, d_in, about normal """ try: d_out, dW = ifc.phase(inc_pt, d_in, normal, z_dir, wvl, n_in, n_out) return d_out, dW except ValueError: raise TraceEvanescentRayError(ifc, inc_pt, d_in, normal, n_in, n_out)
6289674f20718ed4e1e78b1a4da0fe5d4b89df75
3,647,239
from typing import Iterator def generate_udf(spec: "rikai.spark.sql.codegen.base.ModelSpec"): """Construct a UDF to run sklearn model. Parameters ---------- spec : ModelSpec the model specifications object Returns ------- A Spark Pandas UDF. """ def predict(model, X): if hasattr(model, "predict"): return model.predict(X) elif hasattr(model, "transform"): return model.transform(X) else: raise RuntimeError("predict or transform is not available") def sklearn_inference_udf( iter: Iterator[pd.Series], ) -> Iterator[pd.Series]: model = spec.load_model() for series in list(iter): X = np.vstack(series.apply(_pickler.loads).to_numpy()) y = [_pickler.dumps(pred.tolist()) for pred in predict(model, X)] yield pd.Series(y) return pandas_udf(sklearn_inference_udf, returnType=BinaryType())
ceab18240abc73c361108b859817723c08bdd0e3
3,647,240
def ssl_loss_mean_teacher(labels_x, logits_x, logits_teacher, logits_student): """ Computes two cross entropy losses based on the labeled and unlabeled data. loss_x is referring to the labeled CE loss and loss_u to the unlabeled CE loss. Args: labels_x: tensor, contains labels corresponding to logits_x of shape [batch, num_classes] logits_x: tensor, contains the logits of an batch of images of shape [batch, num_classes] logits_teacher: tensor, logits of teacher model of shape [batch, num_classes] labels_student: tensor, logits of student model of shape [batch, num_classes] Returns: Two floating point numbers, the first representing the labeled CE loss and the second holding the MSE loss values. """ x_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_x, logits=logits_x) x_loss = tf.reduce_mean(x_loss) loss_mt = tf.reduce_mean((tf.nn.softmax(logits_teacher) - tf.nn.softmax(logits_student)) ** 2, -1) loss_mt = tf.reduce_mean(loss_mt) return x_loss, loss_mt
016192ea6cf1002a0aa8735003e76a7c2af7526c
3,647,241
from typing import Tuple def _sch_el(self, *wert, **kwargs): """Element einer Schar; für einen Parameter""" if kwargs.get('h'): print("\nElement einer Schar von Matrizen\n") print("Aufruf matrix . sch_el( wert )\n") print(" matrix Matrix") print(" wert Wert des Scharparameters") print("\nEs ist nur ein Scharparameter zugelassen\n") return schar = any([ve.is_schar for ve in self.vekt]) if not schar or len(self.sch_par) > 1: print('agla: keine Schar mit einem Parameter') return if not wert or len(wert) != 1: print('agla: einen Wert für den Scharparameter angeben') return p = Tuple(*self.sch_par)[0] wert = sympify(*wert) if not is_zahl(wert): print('agla: für den Scharparameter Zahl oder freien Parameter angeben') return try: wert = nsimplify(wert) except RecursionError: pass vekt = [] for ve in self.vekt: if p in ve.sch_par: vekt.append(ve.sch_el(wert)) else: vekt.append(ve) return Matrix(*vekt)
8e88e04ee6e4f1b4be658c120a1bc66060aafc81
3,647,242
def scsilun_to_int(lun): """ There are two style lun number, one's decimal value is <256 and the other is full as 16 hex digit. According to T10 SAM, the full 16 hex digit should be swapped and converted into decimal. For example, SC got zlinux lun number from DS8K API, '40294018'. And it should be swapped to '40184029' and converted into decimal, 1075331113. When the lun number is '0c' and its decimal value is <256, it should be converted directly into decimal, 12. https://github.com/kubernetes/kubernetes/issues/45024 """ pretreated_scsilun = int(lun, 16) if pretreated_scsilun < 256: return pretreated_scsilun return (pretreated_scsilun >> 16 & 0xFFFF) | \ (pretreated_scsilun & 0xFFFF) << 16
2022938ccb5abbc89d5fb6f5f109d629e980c0ba
3,647,244
def ordered_indices(src_sizes,tgt_sizes,common_seed,shuffle=True,buckets=None): """Return an ordered list of indices. Batches will be constructed based on this order.""" if shuffle: indices = np.random.RandomState(common_seed).permutation(len(src_sizes)).astype(np.int64) else: indices = np.arange(len(src_sizes), dtype=np.int64) if buckets is None: # sort by target length, then source length # 排序 if tgt_sizes is not None: # 先按照tgt的tokens数排序 indices = indices[ np.argsort(tgt_sizes[indices], kind="mergesort")] # 把indices把tgtsize打乱后,再用稳定的mergesort排序,得到排序后的索引 return indices[np.argsort(src_sizes[indices], kind="mergesort")] # 再按照src tokens排序 else: # 按照最大的进行排序 # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) bucketed_num_tokens=np.array([max(src_size,tgt_size) for src_size,tgt_size in zip(src_sizes,tgt_sizes)]) return indices[ np.argsort(bucketed_num_tokens[indices], kind="mergesort") ]
469d7f963134d7df9c72be07182e7ba4e2533472
3,647,245
import io def get_predictions(single_stream, class_mapping_dict, ip, port, model_name): """Gets predictions for a single image using Tensorflow serving Arguments: single_stream (dict): A single prodigy stream class_mapping_dict (dict): with key as int and value as class name ip (str): tensorflow serving IP port (str): tensorflow serving port model_name (str): model name in tensorflow serving Returns: A tuple containing numpy arrays: (class_ids, class_names, scores, boxes) """ image_byte_stream = b64_uri_to_bytes(single_stream["image"]) encoded_image_io = io.BytesIO(image_byte_stream) image = Image.open(encoded_image_io) width, height = image.size filename = str(single_stream["meta"]["file"]) file_extension = filename.split(".")[1].lower() if file_extension == "png": image_format = b'png' elif file_extension in ("jpg", "jpeg"): image_format = b'jpg' else: log(("Only 'png', 'jpeg' or 'jpg' files are supported by ODAPI. " "Got {}. Thus treating it as `jpg` file. " "Might cause errors".format(file_extension) )) image_format = b'jpg' filename = filename.encode("utf-8") tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(image_byte_stream), 'image/format': dataset_util.bytes_feature(image_format), })) boxes, class_ids, scores = tf_odapi_client(tf_example.SerializeToString(), ip, port, model_name, "serving_default", input_name="serialized_example", timeout=300 ) class_names = np.array([class_mapping_dict[class_id] for class_id in class_ids]) return (class_ids, class_names, scores, boxes)
631c21878df03c240d32556279d9b31ebc6d723f
3,647,246
import itertools def interaction_graph(matrix): """Create a networkx graph object from a (square) matrix. Parameters ---------- matrix : numpy.ndarray | Matrix of mutual information, the information for the edges is taken from the upper matrix Returns ------- graph : networkx.Graph() The graph with MI as weighted edges and positions as nodes Raises ------ AssertionError If the matrix is not square """ # Assert if the matrix is a square matrix. assert matrix.shape[0] == matrix.shape[1], "The matrix is not square" graph = nx.Graph() positions = len(matrix) for pos1, pos2 in itertools.combinations(range(positions), 2): graph.add_edge(pos1, pos2, weight=matrix[pos1, pos2]) return graph
d1da8b6f0e269c1118f56840173e7895d5efb587
3,647,247
import torch def weight_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, alpha, n, cuda): """ Weight inter-relation aggregator Reference: https://arxiv.org/abs/2002.12307 :param num_relations: number of relations in the graph :param self_feats: batch nodes features or embeddings :param neigh_feats: intra-relation aggregated neighbor embeddings for each relation :param embed_dim: the dimension of output embedding :param weight: parameter used to transform node embeddings before inter-relation aggregation :param alpha: weight parameter for each relation used by Rio-Weight :param n: number of nodes in a batch :param cuda: whether use GPU :return: inter-relation aggregated node embeddings """ # transform batch node embedding and neighbor embedding in each relation with weight parameter center_h = weight.mm(self_feats.t()) neigh_h = weight.mm(neigh_feats.t()) # compute relation weights using softmax w = F.softmax(alpha, dim=1) # initialize the final neighbor embedding if cuda: aggregated = torch.zeros(size=(embed_dim, n)).cuda() else: aggregated = torch.zeros(size=(embed_dim, n)) # add weighted neighbor embeddings in each relation together for r in range(num_relations): aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n]) # sum aggregated neighbor embedding and batch node embedding # feed them to activation function combined = F.relu(center_h + aggregated) return combined
c664bd88fbd8abf30b050ca93c264a3e5ead147b
3,647,250
def ho2ro(ho): """Axis angle pair to Rodrigues-Frank vector.""" return Rotation.ax2ro(Rotation.ho2ax(ho))
be3ce1dd6ac9e0815a4cb50ff922f0816320fcae
3,647,251
def get_ratio(old, new): # type: (unicode, unicode) -> float """Return a "similiarity ratio" (in percent) representing the similarity between the two strings where 0 is equal and anything above less than equal. """ if not all([old, new]): return VERSIONING_RATIO if IS_SPEEDUP: return Levenshtein.distance(old, new) / (len(old) / 100.0) else: return levenshtein_distance(old, new) / (len(old) / 100.0)
28648934701445c9066e88b787465ccc21aa6ba5
3,647,252
def sample2D(F, X, Y, mask=None, undef_value=0.0, outside_value=None): """Bilinear sample of a 2D field *F* : 2D array *X*, *Y* : position in grid coordinates, scalars or compatible arrays *mask* : if present must be a 2D matrix with 1 at valid and zero at non-valid points *undef_value* : value to put at undefined points *outside_value* : value to return outside the grid defaults to None, raising ValueError if any points are outside Note reversed axes, for integers i and j we have ``sample2D(F, i, j) = F[j,i]`` If jmax, imax = F.shape then inside values requires 0 <= x < imax-1, 0 <= y < jmax-1 Using bilinear interpolation """ # --- Argument checking --- # X and Y should be broadcastable to the same shape Z = np.add(X, Y) # scalar is True if both X and Y are scalars scalar = np.isscalar(Z) if np.ndim(F) != 2: raise ValueError("F must be 2D") if mask is not None: if mask.shape != F.shape: raise ValueError("Must have mask.shape == F.shape") jmax, imax = F.shape # Broadcast X and Y X0 = X + np.zeros_like(Z) Y0 = Y + np.zeros_like(Z) # Find integer I, J such that # 0 <= I <= X < I+1 <= imax-1, 0 <= J <= Y < J+1 <= jmax-1 # and local increments P and Q I = X0.astype("int") J = Y0.astype("int") P = X0 - I Q = Y0 - J outside = (X0 < 0) | (X0 >= imax - 1) | (Y0 < 0) | (Y0 >= jmax - 1) if np.any(outside): if outside_value is None: raise ValueError("point outside grid") I = np.where(outside, 0, I) J = np.where(outside, 0, J) # try: # J[outside] = 0 # I[outside] = 0 # except TypeError: # Zero-dimensional # I = np.array(0) # J = np.array(0) # Weights for bilinear interpolation W00 = (1 - P) * (1 - Q) W01 = (1 - P) * Q W10 = P * (1 - Q) W11 = P * Q SW = 1.0 # Sum of weights if mask is not None: W00 = mask[J, I] * W00 W01 = mask[J + 1, I] * W01 W10 = mask[J, I + 1] * W10 W11 = mask[J + 1, I + 1] * W11 SW = W00 + W01 + W10 + W11 SW = np.where(SW == 0, -1.0, SW) # Avoid division by zero below S = np.where( SW <= 0, undef_value, (W00 * F[J, I] + W01 * F[J + 1, I] + W10 * F[J, I + 1] + W11 * F[J + 1, I + 1]) / SW, ) # Set in outside_values if outside_value: S = np.where(outside, outside_value, S) # Scalar input gives scalar output if scalar: S = float(S) return S
746782b7712ff28f76db280e9c55977e81a370a5
3,647,253
def rotations_to_radians(rotations): """ converts radians to rotations """ return np.pi * 2 * rotations
15beacbccbbe6d22ac4f659aa5cf22a4e63b503e
3,647,255
def _expect_ket(oper, state): """Private function to calculate the expectation value of an operator with respect to a ket. """ oper, ket = jnp.asarray(oper), jnp.asarray(state) return jnp.vdot(jnp.transpose(ket), jnp.dot(oper, ket))
c7b261852f0e77bda7dcb3cae53939f637e1dca7
3,647,256
def resnet152(pretrained=False, last_stride=1, model_path=''): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet(pretrained=pretrained, last_stride=last_stride, block=Bottleneck, layers=[3, 8, 36, 3], model_path=model_path, model_name='resnet152')
ad2837271ef98861dc8f2c3eae9687fc71d435b6
3,647,257
def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_): """ Check that the wheel moves by approximately 35 degrees during the closed-loop period on trials where a feedback (error sound or valve) is delivered. Metric: M = abs(w_resp - w_t0) - threshold_displacement, where w_resp = position at response time, w_t0 = position at go cue time, threshold_displacement = displacement required to move 35 visual degrees Criterion: displacement < 3 visual degrees Units: degrees angle of wheel turn :param data: dict of trial data with keys ('wheel_timestamps', 'wheel_position', 'choice', 'intervals', 'goCueTrigger_times', 'response_times', 'feedback_times', 'position') :param wheel_gain: the 'STIM_GAIN' task setting """ # Get the Bpod extracted wheel data timestamps = data['wheel_timestamps'] position = data['wheel_position'] return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)
6b696158a086cf899cc23d207b0c6142f1f50a65
3,647,258
def next_fast_len(target: int) -> int: """ Find the next fast size of input data to `fft`, for zero-padding, etc. SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this returns the next composite of the prime factors 2, 3, and 5 which is greater than or equal to `target`. (These are also known as 5-smooth numbers, regular numbers, or Hamming numbers.) Parameters ---------- target : int Length to start searching from. Must be a positive integer. Returns ------- out : int The first 5-smooth number greater than or equal to `target`. Notes ----- .. versionadded:: 0.18.0 Examples -------- On a particular machine, an FFT of prime length takes 133 ms: >>> from scipy import fftpack >>> min_len = 10007 # prime length is worst case for speed >>> a = np.random.randn(min_len) >>> b = fftpack.fft(a) Zero-padding to the next 5-smooth length reduces computation time to 211 us, a speedup of 630 times: >>> fftpack.helper.next_fast_len(min_len) 10125 >>> b = fftpack.fft(a, 10125) Rounding up to the next power of 2 is not optimal, taking 367 us to compute, 1.7 times as long as the 5-smooth size: >>> b = fftpack.fft(a, 16384) """ hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) target = int(target) if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target-1)): return target # Get result quickly for small sizes, since FFT itself is similarly fast. if target <= hams[-1]: return hams[bisect_left(hams, target)] match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient p2 = 2**((quotient - 1).bit_length()) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
e00aa69ffd425489cceef5f50b77c977e18b4a1f
3,647,259
def human_format(val : int, fmt = '.1f') -> str : """ convert e.g. 1230 -> 1.23k """ units = ['', 'K', 'M', 'G'] base = min(int(np.floor(np.log10(val)))//3, len(units)) if base==0: return str(val) val = val/10**(3*base) res = ('{{:{}}} {}'.format(fmt, units[base])).format(val) return res
73dfffa4f9afaa2c294ebceeabda4947d3a6cbe1
3,647,260
def random_choice(number: int) -> bool: """ Generate a random int and compare with the argument passed :param int number: number passed :return: is argument greater or equal then a random generated number :rtype: bool """ return number >= randint(1, 100)
d88e7e23bcff89b0f33a43e34b2ba640589fb0e3
3,647,262
from typing import Any def request_l3_attachments(session, apic) -> Any: """Request current policy enformation for encap for Outs""" root = None uri = f"https://{apic}/api/class/l3extRsPathL3OutAtt.xml" response = session.get(uri, verify=False) try: root = ET.fromstring(response.text) except ET.ParseError: print("Something went wrong. Please try again") # If reponse has totalcount of 0, notify user that encap wasnt found if response.text.rfind("totalCount=\"0\"") != -1 or response.text.rfind("error code") != -1: print("\n######## No External Policy Assigned ##########") return root
0623ffde29a67e0f96ec5284b0a27109bff5b1aa
3,647,263
def bet_plot( pressure, bet_points, minimum, maximum, slope, intercept, p_monolayer, bet_monolayer, ax=None ): """ Draw a BET plot. Parameters ---------- pressure : array Pressure points which will make up the x axis. bet_points : array BET-transformed points which will make up the y axis. minimum : int Lower bound of the selected points. maximum : int Higher bound of the selected points. slope : float Slope of the chosen linear region. intercept : float Intercept of the chosen linear region. p_monolayer : float Pressure at which statistical monolayer is achieved. rol_monolayer : float BET transform of the point at which statistical monolayer is achieved. ax : matplotlib axes object, default None The axes object where to plot the graph if a new figure is not desired. Returns ------- matplotlib.axes Matplotlib axes of the graph generated. The user can then apply their own styling if desired. """ # Generate the figure if needed if ax is None: _, ax = plt.pyplot.subplots(figsize=(6, 4)) ax.plot( pressure, bet_points, label='all points', **POINTS_ALL_STYLE, ) ax.plot( pressure[minimum:maximum], bet_points[minimum:maximum], label='chosen points', **POINTS_SEL_STYLE, ) x_lim = [0, pressure[maximum]] y_lim = [slope * x_lim[0] + intercept, slope * x_lim[1] + intercept] ax.plot( x_lim, y_lim, linestyle='--', color='black', label='model fit', ) ax.plot( p_monolayer, bet_monolayer, marker='X', markersize=10, linestyle='', color='k', label='monolayer point' ) ax.set_ylim(bottom=0, top=bet_points[maximum] * 1.2) ax.set_xlim(left=0, right=pressure[maximum] * 1.2) ax.set_title("BET plot") ax.set_xlabel('p/p°', fontsize=15) ax.set_ylabel('$\\frac{p/p°}{n ( 1- p/p°)}$', fontsize=15) ax.legend(loc='best') return ax
751abe12683ceff72066b3b2cd6938d6e9a67507
3,647,264
from typing import Optional def paged_response( *, view: viewsets.GenericViewSet, queryset: Optional[QuerySet] = None, status_code: Optional[int] = None, ): """ paged_response can be used when there is a need to paginate a custom API endpoint. Usage: class UsersView(ModelViewSet): ... @action( ['get'], detail=True, serializer_class=PostSerializer, filterset_class=PostsFilterSet, ) def posts(self, request: Request, pk: Optional[str] = None): queryset = Post.objects.filter(user=self.get_object()) return paged_response(view=self, queryset=queryset) :param view: any instance that statisfies the GenericViewSet interface :param queryset: Optional django.db.models.QuerySet. Default: get_queryset output :param status_code: Optional int :return: rest_framework.response.Response """ status_code = status_code or status.HTTP_200_OK queryset = queryset or view.get_queryset() queryset = view.filter_queryset(queryset) page = view.paginate_queryset(queryset) if page is not None: serializer = view.get_serializer(page, many=True) return view.get_paginated_response(serializer.data) serializer = view.get_serializer(queryset, many=True) return response.Response(serializer.data, status=status_code)
73c38abbedf8f22a57bb6bda1b42d6013520885a
3,647,265
def getObjectPositions(mapData, threshold, findCenterOfMass = True): """Creates a segmentation map and find objects above the given threshold. Args: mapData (:obj:`numpy.ndarray`): The 2d map to segment. threshold (float): The threshold above which objects will be selected. findCenterOfMass: If True, return the object center weighted according to the values in mapData. If False, return the pixel that holds the maximum value. Returns: objIDs (:obj:`numpy.ndarray`): Array of object ID numbers. objPositions (list): List of corresponding (y, x) positions. objNumPix (:obj:`numpy.ndarray`): Array listing number of pixels per object. segmentationMap (:obj:`numpy.ndarray`): The segmentation map (2d array). """ if threshold < 0: raise Exception("Detection threshold (thresholdSigma in the config file) cannot be negative unless in forced photometry mode.") sigPix=np.array(np.greater(mapData, threshold), dtype=int) sigPixMask=np.equal(sigPix, 1) segmentationMap, numObjects=ndimage.label(sigPix) objIDs=np.unique(segmentationMap) if findCenterOfMass == True: objPositions=ndimage.center_of_mass(mapData, labels = segmentationMap, index = objIDs) else: objPositions=ndimage.maximum_position(mapData, labels = segmentationMap, index = objIDs) objNumPix=ndimage.sum(sigPixMask, labels = segmentationMap, index = objIDs) return objIDs, objPositions, objNumPix, segmentationMap
d070f70270837ec1b1ff6f29eedc21deb2b4846c
3,647,266
def specific_humidity(p,RH,t,A=17.625,B=-30.11,C=610.94,masked=False): """ From Mark G. Lawrence, BAMS Feb 2005, eq. (6) q = specific_humidity(p,RH,t,A,B,C) inputs: p = pressure (Pa) RH = relative humidity (0-1) t = temperature (K) keywords: A, B and C are optional fitting parameters from Alduchov and Eskridge (1996). Masked = False (if True, perform operation on masked arrays) output: q, specific humidity (kg/kg) p, RH and t can be arrays. """ if masked==False: es = C * exp(A*(t-273.15)/(B+t)) q = 0.62198*(RH*es)/(maximum(p,es)-(1-0.62198)*es) else: es = C * ma.exp(A*(t-273.15)/(B+t)) q = 0.62198*(RH*es)/(maximum(p,es)-(1-0.62198)*es) return q
2cfd4cad24a0f412d8021fdfdbc9874823093dcc
3,647,267
from typing import Optional from typing import Literal def build_parser(): """ Build a pyparsing parser for our custom topology description language. :return: A pyparsing parser. :rtype: pyparsing.MatchFirst """ ParserElement.setDefaultWhitespaceChars(' \t') nl = Suppress(LineEnd()) inumber = Word(nums).setParseAction(lambda l, s, t: int(t[0])) fnumber = ( Combine( Optional('-') + Word(nums) + '.' + Word(nums) + Optional('E' | 'e' + Optional('-') + Word(nums)) ) ).setParseAction(lambda toks: float(toks[0])) boolean = ( CaselessLiteral('true') | CaselessLiteral('false') ).setParseAction(lambda l, s, t: t[0].casefold() == 'true') comment = Literal('#') + restOfLine + nl text = QuotedString('"') identifier = Word(alphas, alphanums + '_') empty_line = LineStart() + LineEnd() item_list = ( (text | fnumber | inumber | boolean) + Optional(Suppress(',')) + Optional(nl) ) custom_list = ( Suppress('(') + Optional(nl) + Group(OneOrMore(item_list)) + Optional(nl) + Suppress(')') ).setParseAction(lambda tok: tok.asList()) attribute = Group( identifier('key') + Suppress(Literal('=')) + ( custom_list | text | fnumber | inumber | boolean | identifier )('value') + Optional(nl) ) attributes = ( Suppress(Literal('[')) + Optional(nl) + OneOrMore(attribute) + Suppress(Literal(']')) ) node = identifier('node') port = Group( node + Suppress(Literal(':')) + (identifier | inumber)('port') ) link = Group( port('endpoint_a') + Suppress(Literal('--')) + port('endpoint_b') ) environment_spec = ( attributes + nl ).setResultsName('env_spec', listAllMatches=True) nodes_spec = ( Group( Optional(attributes)('attributes') + Group(OneOrMore(node))('nodes') ) + nl ).setResultsName('node_spec', listAllMatches=True) ports_spec = ( Group( Optional(attributes)('attributes') + Group(OneOrMore(port))('ports') ) + nl ).setResultsName('port_spec', listAllMatches=True) link_spec = ( Group( Optional(attributes)('attributes') + link('links') ) + nl ).setResultsName('link_spec', listAllMatches=True) statements = OneOrMore( comment | link_spec | ports_spec | nodes_spec | environment_spec | empty_line ) return statements
1eccb042b18c3c53a69a41e711a4347a6edf55b9
3,647,269
import math def decode_owner(owner_id: str) -> str: """Decode an owner name from an 18-character hexidecimal string""" if len(owner_id) != 18: raise ValueError('Invalid owner id.') hex_splits = split_by(owner_id, num=2) bits = '' for h in hex_splits: bits += hex_to_bin(h) test_owner = '' for seq in split_by(bits, 6): num = bin_to_dec(seq) test_owner += get_ascii_val_from_bit_value(num) if test_owner[0] != '?': return test_owner[:math.ceil(MAX_OWNER_LENGTH/2)] + '..' + test_owner[-math.floor(MAX_OWNER_LENGTH/2):] while test_owner[0] == '?': test_owner = test_owner[1:] return test_owner
1460ebe3dfd2f36aa2f5e42b28b2d7651d0d2cee
3,647,270
def _get_back_up_generator(frame_function, *args, **kwargs): """Create a generator for the provided animation function that backs up the cursor after a frame. Assumes that the animation function provides a generator that yields strings of constant width and height. Args: frame_function: A function that returns a FrameGenerator. args: Arguments for frame_function. kwargs: Keyword arguments for frame_function. Returns: a generator that generates backspace/backline characters for the animation func generator. """ lines = next(frame_function(*args, **kwargs)).split('\n') width = len(lines[0]) height = len(lines) if height == 1: return util.BACKSPACE_GEN(width) return util.BACKLINE_GEN(height)
a395e91864115f69dc0a7d8d8a3bb2eb90d957e9
3,647,271
from typing import Any from typing import Optional def from_aiohttp( schema_path: str, app: Any, *, base_url: Optional[str] = None, method: Optional[Filter] = None, endpoint: Optional[Filter] = None, tag: Optional[Filter] = None, operation_id: Optional[Filter] = None, skip_deprecated_operations: bool = False, validate_schema: bool = True, force_schema_version: Optional[str] = None, data_generation_methods: DataGenerationMethodInput = DEFAULT_DATA_GENERATION_METHODS, code_sample_style: str = CodeSampleStyle.default().name, **kwargs: Any, ) -> BaseOpenAPISchema: """Load Open API schema from an AioHTTP app. :param str schema_path: An in-app relative URL to the schema. :param app: An AioHTTP app instance. """ from ...extra._aiohttp import run_server # pylint: disable=import-outside-toplevel port = run_server(app) app_url = f"http://127.0.0.1:{port}/" url = urljoin(app_url, schema_path) return from_uri( url, base_url=base_url, method=method, endpoint=endpoint, tag=tag, operation_id=operation_id, skip_deprecated_operations=skip_deprecated_operations, validate_schema=validate_schema, force_schema_version=force_schema_version, data_generation_methods=data_generation_methods, code_sample_style=code_sample_style, **kwargs, )
11c7d2cf9e19e8876ef45118f3842b51fbc734b9
3,647,272
def compute_recall(true_positives, false_negatives): """Compute recall >>> compute_recall(0, 10) 0.0 >>> compute_recall(446579, 48621) 0.901815 """ return true_positives / (true_positives + false_negatives)
876bee73150d811e6b7c1a5de8d8e4349105c59b
3,647,274
def get_highest_seat_id(): """ Returns the highest seat ID from all of the boarding passes. """ return max(get_seat_ids())
0e8f95c9455869d283acfb9d6230c8a6f2ca10eb
3,647,275
def error_function_index(gpu_series, result_series): """ utility function to compare GPU array vs CPU array Parameters ------ gpu_series: cudf.Series GPU computation result series result_series: pandas.Series Pandas computation result series Returns ----- double maximum error of the two arrays int maximum index value diff """ err = error_function(gpu_series, result_series) error_index = np.abs(gpu_series.index.to_array() - result_series.index.values).max() return err, error_index
1886df532808be8e54ffc2448c74fcb415b4424a
3,647,276
def get_tipo_aqnext(tipo) -> int: """Solve the type of data used by DJANGO.""" tipo_ = 3 # subtipo_ = None if tipo in ["int", "uint", "serial"]: tipo_ = 16 elif tipo in ["string", "stringlist", "pixmap", "counter"]: tipo_ = 3 elif tipo in ["double"]: tipo_ = 19 elif tipo in ["bool", "unlock"]: tipo_ = 18 elif tipo in ["date"]: tipo_ = 26 elif tipo in ["time"]: tipo_ = 27 return tipo_
d5a066b98aa56785c4953a7ec8d7052e572e5630
3,647,277
from typing import List from typing import Dict def fetch_indicators_command(client: Client) -> List[Dict]: """Wrapper for fetching indicators from the feed to the Indicators tab. Args: client: Client object with request Returns: Indicators. """ indicators = fetch_indicators(client) return indicators
eb59b68362e0b30fdc5643259a1ddf757b7afce1
3,647,278
def hr_lr_ttest(hr, lr): """ Returns the t-test (T statistic and p value), comparing the features for high- and low-risk entities. """ res = stats.ttest_ind(hr.to_numpy(), lr.to_numpy(), axis=0, nan_policy="omit", equal_var=False) r0 = pd.Series(res[0], index=hr.columns) r1 = pd.Series(res[1], index=hr.columns) return pd.DataFrame({"ttest_T": r0, "ttest_p": r1})
86ccbbf3119ce7fc809ec68d50b57d514efb29b2
3,647,279
def _is_empty(str_: str) -> bool: """文字列が空か 文字列が空であるかを判別する Args: str_ (str): 文字列 Returns: bool: 文字列が空のときはTrue, 空でないときはFalseを返す. """ if str_: return False return True
f0eff540767028a80a3042e2d5bc6951ad28fe24
3,647,280
import random def energy_generate_random_range_dim2(filepath,dim_1_low,dim_1_high,dim_2_low,dim_2_high,num=500): """ 6, 8 and 10 """ queryPool=[] query=[] for _ in range(num): left1 = random.randint(dim_1_low, dim_1_high) right1 = random.randint(left1, dim_1_high) query.append((left1, right1)) left2 = random.randint(dim_2_low, dim_2_high) # right2 = random.randint(left2, dim_2_high) query.append((left2, left2)) queryPool.append(query[:]) query.clear() with open(filepath,"w+") as f: f.write(str(queryPool)) return queryPool
cdcafba427dbbab9b9e318f58f54a3a3c834bbd3
3,647,281
from typing import Optional from typing import Sequence def get_waas_policies(compartment_id: Optional[str] = None, display_names: Optional[Sequence[str]] = None, filters: Optional[Sequence[pulumi.InputType['GetWaasPoliciesFilterArgs']]] = None, ids: Optional[Sequence[str]] = None, states: Optional[Sequence[str]] = None, time_created_greater_than_or_equal_to: Optional[str] = None, time_created_less_than: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWaasPoliciesResult: """ This data source provides the list of Waas Policies in Oracle Cloud Infrastructure Web Application Acceleration and Security service. Gets a list of WAAS policies. ## Example Usage ```python import pulumi import pulumi_oci as oci test_waas_policies = oci.waas.get_waas_policies(compartment_id=var["compartment_id"], display_names=var["waas_policy_display_names"], ids=var["waas_policy_ids"], states=var["waas_policy_states"], time_created_greater_than_or_equal_to=var["waas_policy_time_created_greater_than_or_equal_to"], time_created_less_than=var["waas_policy_time_created_less_than"]) ``` :param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the compartment is created. :param Sequence[str] display_names: Filter policies using a list of display names. :param Sequence[str] ids: Filter policies using a list of policy OCIDs. :param Sequence[str] states: Filter policies using a list of lifecycle states. :param str time_created_greater_than_or_equal_to: A filter that matches policies created on or after the specified date and time. :param str time_created_less_than: A filter that matches policies created before the specified date-time. """ __args__ = dict() __args__['compartmentId'] = compartment_id __args__['displayNames'] = display_names __args__['filters'] = filters __args__['ids'] = ids __args__['states'] = states __args__['timeCreatedGreaterThanOrEqualTo'] = time_created_greater_than_or_equal_to __args__['timeCreatedLessThan'] = time_created_less_than if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:waas/getWaasPolicies:getWaasPolicies', __args__, opts=opts, typ=GetWaasPoliciesResult).value return AwaitableGetWaasPoliciesResult( compartment_id=__ret__.compartment_id, display_names=__ret__.display_names, filters=__ret__.filters, id=__ret__.id, ids=__ret__.ids, states=__ret__.states, time_created_greater_than_or_equal_to=__ret__.time_created_greater_than_or_equal_to, time_created_less_than=__ret__.time_created_less_than, waas_policies=__ret__.waas_policies)
4ab181b9776226a96b93757feb124c10b68eacc8
3,647,283
def _get_output_type(output): """Choose appropriate output data types for HTML and LaTeX.""" if output.output_type == 'stream': html_datatype = latex_datatype = 'ansi' text = output.text output.data = {'ansi': text[:-1] if text.endswith('\n') else text} elif output.output_type == 'error': html_datatype = latex_datatype = 'ansi' output.data = {'ansi': '\n'.join(output.traceback)} else: for datatype in DISPLAY_DATA_PRIORITY_HTML: if datatype in output.data: html_datatype = datatype break else: html_datatype = ', '.join(output.data.keys()) for datatype in DISPLAY_DATA_PRIORITY_LATEX: if datatype in output.data: latex_datatype = datatype break else: latex_datatype = ', '.join(output.data.keys()) return html_datatype, latex_datatype
4940f931f7ac3b87b68a5e84a5038feea331dac1
3,647,284
import gc def cat_train_validate_on_cv( logger, run_id, train_X, train_Y, test_X, metric, kf, features, params={}, num_class=None, cat_features=None, log_target=False, ): """Train a CatBoost model, validate using cross validation. If `test_X` has a valid value, creates a new model with number of best iteration found during holdout phase using training as well as validation data. Note: For CatBoost, categorical features need to be in String or Category data type. """ if num_class: # This should be true for multiclass classification problems y_oof = np.zeros(shape=(len(train_X), num_class)) y_predicted = np.zeros(shape=(len(test_X), num_class)) else: y_oof = np.zeros(shape=(len(train_X))) y_predicted = np.zeros(shape=(len(test_X))) cv_scores = [] result_dict = {} feature_importance = pd.DataFrame() best_iterations = [] fold = 0 n_folds = kf.get_n_splits() for train_index, validation_index in kf.split(train_X[features], train_Y): fold += 1 logger.info(f"fold {fold} of {n_folds}") X_train, X_validation, y_train, y_validation = _get_X_Y_from_CV( train_X, train_Y, train_index, validation_index ) if log_target: # feature_names accepts only list cat_train = Pool( data=X_train, label=np.log1p(y_train), feature_names=features, cat_features=cat_features, ) cat_eval = Pool( data=X_validation, label=np.log1p(y_validation), feature_names=features, cat_features=cat_features, ) else: # feature_names accepts only list cat_train = Pool( data=X_train, label=y_train, feature_names=features, cat_features=cat_features, ) cat_eval = Pool( data=X_validation, label=y_validation, feature_names=features, cat_features=cat_features, ) model = CatBoost(params=params) # List of categorical features have already been passed as a part of Pool # above. No need to pass via the argument of fit() model.fit(cat_train, eval_set=cat_eval, use_best_model=True) del train_index, X_train, y_train, cat_train gc.collect() if log_target: y_oof[validation_index] = np.expm1(model.predict(cat_eval)) else: y_oof[validation_index] = model.predict(cat_eval) if test_X is not None: cat_test = Pool( data=test_X, feature_names=features, cat_features=cat_features ) if log_target: y_predicted += np.expm1(model.predict(cat_test)) else: y_predicted += model.predict(cat_test) del cat_eval, cat_test best_iteration = model.best_iteration_ best_iterations.append(best_iteration) logger.info(f"Best number of iterations for fold {fold} is: {best_iteration}") cv_oof_score = _calculate_perf_metric( metric, y_validation, y_oof[validation_index] ) cv_scores.append(cv_oof_score) logger.info(f"CV OOF Score for fold {fold} is {cv_oof_score}") del validation_index, X_validation, y_validation gc.collect() feature_importance_on_fold = model.get_feature_importance() feature_importance = _capture_feature_importance_on_fold( feature_importance, features, feature_importance_on_fold, fold ) # util.update_tracking( # run_id, # "metric_fold_{}".format(fold), # cv_oof_score, # is_integer=False, # no_of_digits=5, # ) result_dict = _evaluate_and_log( logger, run_id, train_Y, y_oof, y_predicted, metric, n_folds, result_dict, cv_scores, best_iterations, ) del y_oof gc.collect() result_dict = _capture_feature_importance( feature_importance, n_important_features=10, result_dict=result_dict ) logger.info("Training/Prediction completed!") return result_dict
d4a1248463d7fa1f9f8f192cc9fa02f8fcdcf020
3,647,285
def find_left(char_locs, pt): """Finds the 'left' coord of a word that a character belongs to. Similar to find_top() """ if pt not in char_locs: return [] l = list(pt) while (l[0]-1, l[1]) in char_locs: l = [l[0]-1, l[1]] return l
8e924f301203bcad2936d4cf4d82c6e21cbebb16
3,647,286
import pathlib import urllib def make_file_url(file_id, base_url): """Create URL to access record by ID.""" url_parts = list(urlparse.urlparse(base_url)) url_parts[2] = pathlib.posixpath.join( DATAVERSE_API_PATH, DATAVERSE_FILE_API ) args_dict = {'persistentId': file_id} url_parts[4] = urllib.parse.urlencode(args_dict) return urllib.parse.urlunparse(url_parts)
e4b60f2cfd31a9617ee775d7d8ca0caaa9c692fd
3,647,287
def std_func(bins, mass_arr, vel_arr): """ Calculate std from mean = 0 Parameters ---------- bins: array Array of bins mass_arr: array Array of masses to be binned vel_arr: array Array of velocities Returns --------- std_arr: array Standard deviation from 0 of velocity difference values in each mass bin """ last_index = len(bins)-1 std_arr = [] for index1, bin_edge in enumerate(bins): cen_deltav_arr = [] for index2, stellar_mass in enumerate(mass_arr): if stellar_mass >= bin_edge and index1 == last_index: cen_deltav_arr.append(vel_arr[index2]) elif stellar_mass >= bin_edge and stellar_mass < bins[index1+1]: cen_deltav_arr.append(vel_arr[index2]) mean = 0 # mean = np.mean(cen_deltav_arr) diff_sqrd_arr = [] for value in cen_deltav_arr: diff = value - mean diff_sqrd = diff**2 diff_sqrd_arr.append(diff_sqrd) mean_diff_sqrd = np.mean(diff_sqrd_arr) std = np.sqrt(mean_diff_sqrd) std_arr.append(std) return std_arr
13e53952af3106fb7891859f81c146d4bc92703b
3,647,288
def log_neg(rho,mask=[1,0]): """ Calculate the logarithmic negativity for a density matrix Parameters: ----------- rho : qobj/array-like Input density matrix Returns: -------- logneg: Logarithmic Negativity """ if rho.type != 'oper': raise TypeError("Input must be a density matrix") rhopt = partial_transpose(rho,mask) logneg = log2( rhopt.norm() ) return logneg
b8ed0cd54dd879985ef6265085b789e91beceba7
3,647,289
def create_polygon(pixels_selected: set, raster_path: str) -> gpd.GeoDataFrame: """ It allows to transform each of the indexes of the pixel data in coordinates for further processing the answer polygon Parameters -------------- pixels_selected: set Set with the pixels selected for the Connected component raster_path: str Route to the raster of origin Return -------------- polygon: geopands.GeoDataFrame Polygon generated from the points """ with rio.open(raster_path) as raster: pixels_cords = [] for x, y in pixels_selected: cord = raster.xy(x, y) pixels_cords.append(cord) new_polygon_geometry = Polygon(pixels_cords) polygon_raw = gpd.GeoDataFrame( index=[0], crs=raster.meta["crs"], geometry=[new_polygon_geometry] ).unary_union.convex_hull new_polygon = gpd.GeoDataFrame( index=[0], crs=raster.meta["crs"], geometry=[polygon_raw] ) return new_polygon
f2484afcfb73a3adbdaaeacf25287c1c2ce1584a
3,647,290
import copy def read_output(path_elec,path_gas): """ Used to read the building simulation I/O file Args: path_elec: file path where data is to be read from in minio. This is a mandatory parameter and in the case where only one simulation I/O file is provided, the path to this file should be indicated here. path_gas: This would be path to the gas output file. This is optional, if there is no gas output file to the loaded, then a value of path_gas ='' should be used Returns: btap_df: Dataframe containing the clean building parameters file. floor_sq: the square foot of the building """ # Load the data from blob storage. s3 = acm.establish_s3_connection(settings.MINIO_URL, settings.MINIO_ACCESS_KEY, settings.MINIO_SECRET_KEY) logger.info("read_output s3 connection %s", s3) btap_df_elec = pd.read_excel(s3.open(settings.NAMESPACE.joinpath(path_elec).as_posix())) if path_gas: btap_df_gas = pd.read_excel(s3.open(settings.NAMESPACE.joinpath(path_gas).as_posix())) btap_df = pd.concat([btap_df_elec, btap_df_gas], ignore_index=True) else: btap_df = copy.deepcopy(btap_df_elec) floor_sq = btap_df['bldg_conditioned_floor_area_m_sq'].unique() # dropping output features present in the output file and dropping columns with one unique value output_drop_list = ['Unnamed: 0', ':erv_package', ':template'] for col in btap_df.columns: if ((':' not in col) and (col not in ['energy_eui_additional_fuel_gj_per_m_sq', 'energy_eui_electricity_gj_per_m_sq', 'energy_eui_natural_gas_gj_per_m_sq', 'net_site_eui_gj_per_m_sq'])): output_drop_list.append(col) btap_df = btap_df.drop(output_drop_list,axis=1) btap_df = copy.deepcopy(clean_data(btap_df)) btap_df['Total Energy'] = copy.deepcopy(btap_df[['net_site_eui_gj_per_m_sq']].sum(axis=1)) drop_list=['energy_eui_additional_fuel_gj_per_m_sq','energy_eui_electricity_gj_per_m_sq','energy_eui_natural_gas_gj_per_m_sq','net_site_eui_gj_per_m_sq'] btap_df = btap_df.drop(drop_list,axis=1) return btap_df,floor_sq
7ec4ce2d9776946e310fd843e722d0189c4ebcb2
3,647,291
def parse_lmap(filename, goal, values): """Parses an LMAP file into a map of literal weights, a LiteralDict object, the literal that corresponds to the goal variable-value pair, and the largest literal found in the file.""" weights = {} max_literal = 0 literal_dict = LiteralDict() for line in open(filename): if (line.startswith('cc$I') or line.startswith('cc$C') or line.startswith('cc$P')): components = line.split('$') literal = int(components[2]) weights[literal] = components[3] max_literal = max(max_literal, abs(literal)) if line.startswith('cc$I'): variable = components[5] value = int(components[6].rstrip()) literal_dict.add(variable, values[variable][value], literal=literal) if variable == goal.variable and value == goal.value_index: goal_literal = literal return weights, literal_dict, goal_literal, max_literal
db6a0e5f56817e7dd0ef47b5e72b2ea30256b2a3
3,647,292
def read_image(path: str): """ Read an image file :param path: str. Path to image :return: The image """ return imageio.imread(path)
8f3342f2454a3d3e821962d7040eebdbaee502cf
3,647,293
def electrolyte_conductivity_Capiglia1999(c_e, T, T_inf, E_k_e, R_g): """ Conductivity of LiPF6 in EC:DMC as a function of ion concentration. The original data is from [1]. The fit is from Dualfoil [2]. References ---------- .. [1] C Capiglia et al. 7Li and 19F diffusion coefficients and thermal properties of non-aqueous electrolyte solutions for rechargeable lithium batteries. Journal of power sources 81 (1999): 859-862. .. [2] http://www.cchem.berkeley.edu/jsngrp/fortran.html Parameters ---------- c_e: :class: `numpy.Array` Dimensional electrolyte concentration T: :class: `numpy.Array` Dimensional temperature T_inf: double Reference temperature E_k_e: double Electrolyte conductivity activation energy R_g: double The ideal gas constant Returns ------- :`numpy.Array` Solid diffusivity """ sigma_e = ( 0.0911 + 1.9101 * (c_e / 1000) - 1.052 * (c_e / 1000) ** 2 + 0.1554 * (c_e / 1000) ** 3 ) arrhenius = np.exp(E_k_e / R_g * (1 / T_inf - 1 / T)) return sigma_e * arrhenius
ea487399aba6cd1e70d1b5c84dd6f9294f8754b9
3,647,294
import random def random_bdays(n): """Returns a list of integers between 1 and 365, with length n. n: int returns: list of int """ t = [] for i in range(n): bday = random.randint(1, 365) t.append(bday) return t
7871548db569d435a5975bfa118ad6c262406333
3,647,295
def int_to_charset(val, charset): """ Turn a non-negative integer into a string. """ if not val >= 0: raise ValueError('"val" must be a non-negative integer.') if val == 0: return charset[0] output = "" while val > 0: val, digit = divmod(val, len(charset)) output += charset[digit] # reverse the characters in the output and return return output[::-1]
ec30e014aaf42b6cc3904f13776b4226b0b75a5b
3,647,296
def search(tabela, *, parms='*', clause=None): """ Função que recebe como parâmetro obrigatório o nome da tabela a ser consultada, como parâmetro padrão recebe os filtros da pesquisa e retorna todas as linhas encontradas """ banco = Banco() banco.connect() banco.execute(f"SELECT {parms} FROM {tabela} {clause}") rows = banco.fetchall() banco.disconnect() return rows
0cb0dad5fe0661ee7027ab8b43c28b0351d42a48
3,647,297
def hash(data): """run the default hashing algorithm""" return _blacke2b_digest(data)
e12433388a0d392f16a8e11ba812629ed4573ace
3,647,299
def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter
06532be051cb69b92fa79ef339edb733b8f31c15
3,647,300
def dumps(ndb_model, **kwargs): """Custom json dumps using the custom encoder above.""" return NdbEncoder(**kwargs).encode(ndb_model)
0f0a74cdaedd81a95874f745ad1bc24881d1fb73
3,647,301
def create_critic_train_op(hparams, critic_loss, global_step): """Create Discriminator train op.""" with tf.name_scope('train_critic'): critic_optimizer = tf.train.AdamOptimizer(hparams.critic_learning_rate) output_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('critic') ] if FLAGS.critic_update_dis_vars: if FLAGS.discriminator_model == 'bidirectional_vd': critic_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('dis/rnn') ] elif FLAGS.discriminator_model == 'seq2seq_vd': critic_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('dis/decoder/rnn/multi_rnn_cell') ] critic_vars.extend(output_vars) else: critic_vars = output_vars print('\nOptimizing Critic vars:') for v in critic_vars: print(v) critic_grads = tf.gradients(critic_loss, critic_vars) critic_grads_clipped, _ = tf.clip_by_global_norm(critic_grads, FLAGS.grad_clipping) critic_train_op = critic_optimizer.apply_gradients( zip(critic_grads_clipped, critic_vars), global_step=global_step) return critic_train_op, critic_grads_clipped, critic_vars
4c33ba79dacebff375971c123e5bbafd34f5ab91
3,647,305
def interpolate(data, tstep): """Interpolate limit order data. Uses left-hand interpolation, and assumes that the data is indexed by timestamp. """ T, N = data.shape timestamps = data.index t0 = timestamps[0] - (timestamps[0] % tstep) # 34200 tN = timestamps[-1] - (timestamps[-1] % tstep) + tstep # 57600 timestamps_new = np.arange(t0 + tstep, tN + tstep, tstep) # [34200, ..., 57600] X = np.zeros((len(timestamps_new), N)) # np.array X[-1, :] = data.values[-1, :] t = timestamps_new[0] # keeps track of time in NEW sampling frequency for i in np.arange(0, T): # observations in data... if timestamps[i] > t: s = timestamps[i] - (timestamps[i] % tstep) tidx = int((t - t0) / tstep - 1) sidx = int((s - t0) / tstep) # plus one for python indexing (below) X[tidx:sidx, :] = data.values[i - 1, :] t = s + tstep else: pass return pd.DataFrame(X, index=timestamps_new, columns=data.columns)
ae1fde01529a4d11ea864b0f5757c9cde096a142
3,647,306