content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def dice(labels, predictions, axis, weights=1.0, scope=None, loss_collection=tf.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Dice loss for binary segmentation. The Dice loss is one minus the Dice coefficient, and therefore this loss converges towards zero. The Dice loss between predictions `p` and labels `g` is .. math:: 1 - \frac{2 \Sigma_i^N p_i g_i + \epsilon} {\Sigma_i^N p_i^2 + \Sigma_i^N g_i^2 + \epsilon} where `\epsilon` is a small value for stability. Parameters ---------- labels: float `Tensor` predictions: float `Tensor` References ---------- https://arxiv.org/pdf/1606.04797.pdf """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with tf.name_scope(scope, "dice", (predictions, labels, weights)) as scope: predictions = tf.to_float(predictions) labels = tf.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) intersection = tf.reduce_sum(tf.abs(predictions * labels), axis=axis) union = (tf.reduce_sum(predictions, axis=axis) + tf.reduce_sum(labels, axis=axis)) losses = 1. - ((2 * intersection + _EPSILON) / (union + _EPSILON)) return compute_weighted_loss( losses=losses, weights=weights, scope=scope, loss_collection=loss_collection, reduction=reduction)
70e0e44e7d9b07350497a2c048f2bdd1c8ea952e
3,637,988
def air_density(temp, patm, pw = 0): """ Calculates the density of dry air by means of the universal gas law as a function of air temperature and atmospheric pressure. m / V = [Pw / (Rv * T)] + [Pd / (Rd * T)] where: Pd: Patm - Pw Rw: specific gas constant for water vapour [Rw = 461.495 MJ/kg/K] Rv: specific gas constant for dry air [Rv = 287.058 MJ/kg/K] T: air temperature [K] m/V: density of air [kg/m³] Parameters ---------- temp : float Air temperature [K]. patm : float Atmospheric pressure [Pa]. pw : float Vapour pressure [Pa]. Default to 0 Pa (dry air). Returns ------- float Air density [kg/m³]. """ rd, rw = 287.058, 461.495 # specific gas constant for dry air and water vapour [J / (kg K)] pd = patm - pw return (pd / (rd * temp)) + (pw / (rw * temp))
1af7afbf562fec105566a2c934f83c73f0be1173
3,637,989
def index(dataset: Dataset, min_df=5, inplace=False, **kwargs): """ Indexes the tokens of a textual :class:`quapy.data.base.Dataset` of string documents. To index a document means to replace each different token by a unique numerical index. Rare words (i.e., words occurring less than `min_df` times) are replaced by a special token `UNK` :param dataset: a :class:`quapy.data.base.Dataset` object where the instances of training and test documents are lists of str :param min_df: minimum number of occurrences below which the term is replaced by a `UNK` index :param inplace: whether or not to apply the transformation inplace (True), or to a new copy (False, default) :param kwargs: the rest of parameters of the transformation (as for sklearn's `CountVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>_`) :return: a new :class:`quapy.data.base.Dataset` (if inplace=False) or a reference to the current :class:`quapy.data.base.Dataset` (inplace=True) consisting of lists of integer values representing indices. """ __check_type(dataset.training.instances, np.ndarray, str) __check_type(dataset.test.instances, np.ndarray, str) indexer = IndexTransformer(min_df=min_df, **kwargs) training_index = indexer.fit_transform(dataset.training.instances) test_index = indexer.transform(dataset.test.instances) if inplace: dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_) dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_) dataset.vocabulary = indexer.vocabulary_ return dataset else: training = LabelledCollection(training_index, dataset.training.labels.copy(), dataset.classes_) test = LabelledCollection(test_index, dataset.test.labels.copy(), dataset.classes_) return Dataset(training, test, indexer.vocabulary_)
8cde5ed740e4879d62f4a73bf06d1da7b78bc22a
3,637,990
def PerpendicularFrameAt(thisCurve, t, multiple=False): """ Return a 3d frame at a parameter. This is slightly different than FrameAt in that the frame is computed in a way so there is minimal rotation from one frame to the next. Args: t (double): Evaluation parameter. Returns: bool: True on success, False on failure. plane (Plane): The frame is returned here. """ url = "rhino/geometry/curve/perpendicularframeat-curve_double_plane" if multiple: url += "?multiple=true" args = [thisCurve, t] if multiple: args = list(zip(thisCurve, t)) response = Util.ComputeFetch(url, args) return response
14216aa0091f82cb47dafe0970d685463529cfbb
3,637,991
from typing import Collection def _attributes_cosmo2dict(cosmo): """ Converts CoSMoMVPA-like attributes to a dictionary form Parameters ---------- cosmo: dict Dictionary that may contains fields 'sa', 'fa', 'a'. For any of these fields the contents can be a dict, np.ndarray (object array as returned by loadmat) or ArrayCollectable (from a PyMVPA Dataset's .a, .fa or .sa) Returns ------- pymvpa_attributes: dict Data represented in cosmo with fields 'sa', 'fa' and 'a'. Each element in pymvpa_attributes[key] is a dict itself mapping an attribute name to a value. """ # space for output pymvpa_attributes = dict() # go over 'sa', 'fa' and 'a' for fieldname, do_transpose in _attr_fieldname2do_transpose.items(): attrs = dict() if fieldname in cosmo: v = cosmo[fieldname] if type(v) is dict: # copy the data over attrs.update(v) elif isinstance(v, np.ndarray): # extract singleton element fsa_mat = _from_singleton(v) if fsa_mat is not None: # assume an object array fsa_keys = fsa_mat.dtype.names for fsa_key in fsa_keys: dim = fsa_mat[fsa_key] if do_transpose: # feature attribute case, to match dimensionality # in second dimension dim = dim.T # transform row-vectors in matrix form (shape=(1,P)) # to vectors (shape=(P,)) if len(dim.shape) == 2 and dim.shape[1] == 1: dim = dim.ravel() attrs[fsa_key] = dim elif isinstance(v, Collection): # from PyMVPA Dataset, extract keys and values attrs.update((k, v[k].value) for k in v) elif v is None: pass else: raise TypeError("Unsupported input %s" % v) pymvpa_attributes[fieldname] = attrs return pymvpa_attributes
3d3369ce0a1f65cd1bc1b8629ca028a4561224ca
3,637,992
def is_instrument_port(port_name): """test if a string can be a com of gpib port""" answer = False if isinstance(port_name, str): ports = ["COM", "com", "GPIB0::", "gpib0::"] for port in ports: if port in port_name: answer = not (port == port_name) return answer
f45f47d35a9172264d0474502b0df883685071a0
3,637,993
import types def share_data(value): """ Take a value and use the same value from the store, if the value isn't in the store this one becomes the shared version. """ # We don't want to change the types of strings, between str <=> unicode # and hash('a') == hash(u'a') ... so use different stores. # In theory eventaully we'll have all of one type, but don't hold breath. store = _share_data_store if isinstance(value, unicode): store = _share_data_store_u # hahahah, of course the above means that: # hash(('a', 'b')) == hash((u'a', u'b')) # ...which we have in deptuples, so just screw sharing those atm. if type(value) == types.TupleType: return value return store.setdefault(value, value)
70edaad0ef52e6f6866049bcd199ef109ebb825d
3,637,995
import math def gaussian_dropout(incoming, keep_prob, mc, scale_during_training = True, name=None): """ Gaussian Dropout. Outputs the input element multiplied by a random variable sampled from a Gaussian distribution with mean 1 and either variance keep_prob*(1-keep_prob) (scale_during_training False) or (1-keep_prob)/keep_prob (scale_during_training True) Arguments: incoming : A `Tensor`. The incoming tensor. keep_prob : A float representing the probability that each element is kept by Bernoulli dropout which is used to set the variance of the Gaussian distribution. scale_during_training : A boolean determining whether to match the variance of the Gaussian distribution to Bernoulli dropout with scaling during testing (False) or training (True) mc : A boolean Tensor correponding to whether or not Monte-Carlo sampling will be used to calculate the networks output name : A name for this layer (optional). References: Dropout: A Simple Way to Prevent Neural Networks from Overfitting. N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever & R. Salakhutdinov, (2014), Journal of Machine Learning Research, 5(Jun)(2), 1929-1958. Links: [https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf] (https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf) """ with tf.name_scope(name) as scope: inference = incoming if scale_during_training: stddev = math.sqrt((1-keep_prob)/keep_prob) else: stddev = math.sqrt((1-keep_prob)*keep_prob) def apply_gaussian_dropout(): return tf.multiply(inference,tf.random_normal(tf.shape(inference), mean = 1, stddev = stddev)) inference = tf.cond(mc, apply_gaussian_dropout, lambda: inference) return inference
225c88e0f45c2b319cfabfcb6c65162a5a21f778
3,637,996
def bottom_up_low_space(N,K,ts): """ Recursive algorithm. args: N :: int length of ts K :: int ts :: list of ints returns: res :: bool True :: if a subset of ts sums to K False :: otherwise subset :: list of tuples index and value in ts of the subset that sums to K. """ U = np.zeros(K+1, dtype = int) U[0] = 1 for t in ts: j = K while j >= t: if U[j-t] != 0: U[j] = t j -= 1 res = U[K] != 0 subset = [] k = K while U[k] != 0 and k > 0: t = U[k] i = [i for i, x in enumerate(U[k] == ts) if x] for l in i: if not (l,t) in subset: subset.append((l,t)) k -= U[k] return res, sorted(subset)
31e810871088309fded97b2ab844c3f910c84ffb
3,637,998
import json def getInfo(ID): """ get info from file :param ID: meter ID :return: info = { "distance": 10, "horizontal": 10, "vertical": 20, "name": "1_1", "type": SF6, "template": "template.jpg", "ROI": { "x": 200, "y": 200, "w": 1520, "h": 680 }, "startPoint": { "x": -1, "y": -1 }, "endPoint": { "x": -1, "y": -1 }, "centerPoint": { "x": -1, "y": -1 }, "startValue": 0, "totalValue": 2 } """ file = open("config/" + ID + ".json") info = json.load(file) # string to pointer if info["type"] == "SF6": info["type"] = SF6 elif info["type"] == "youwen": info["type"] = youwen info["template"] = cv2.imread("template/" + ID + ".jpg") return info
d0e415b547450a84d15c96b3800276f6f566e503
3,638,001
import tensorflow as tf def cast_tensor_by_spec(_input, spec): """ transform dtype & shape following spec """ try: except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact" ) if not _isinstance_wrapper(spec, "TensorSpec"): return _input if _isinstance_wrapper(_input, ["Tensor", "EagerTensor"]): # TensorFlow issue #43038 # pylint: disable=unexpected-keyword-arg, no-value-for-parameter return tf.cast(_input, dtype=spec.dtype, name=spec.name) else: return tf.constant(_input, dtype=spec.dtype, name=spec.name)
3cea0851c5cc9d457a05b58b0d2cdde50b3ed1ba
3,638,004
def StopRequestHook(ref, args, request): """Declarative request hook for TPU Stop command.""" del ref del args stop_request = GetMessagesModule().StopNodeRequest() request.stopNodeRequest = stop_request return request
8eba140a8f9bf59fec293d8f33d4c10bb03c1a99
3,638,005
def get_bucket( storage_bucket_name: str, **kwargs, ) -> Bucket: """Get a storage bucket.""" client = get_client() return client.get_bucket(storage_bucket_name, **kwargs)
ad33ae43f9d8ff2fb087519733931efd3952df18
3,638,006
def furl_for(endpoint: str, filename: str=None, **kwargs: dict) -> str: """ Replacement for url_for. """ return URL() + (url_for(endpoint, filename=filename) if filename != None else ("/" if endpoint == "" else url_for(endpoint, **kwargs)))
2e518ce13bd01771a5daffc111c6adc5d60e40cd
3,638,007
def image_max_value(img, region=None, scale=None): """Retrieves the maximum value of an image. Args: img (object): The image to calculate the maximum value. region (object, optional): The region over which to reduce data. Defaults to the footprint of the image's first band. scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None. Returns: object: ee.Number """ if region is None: region = img.geometry() if scale is None: scale = image_scale(img) max_value = img.reduceRegion(**{ 'reducer': ee.Reducer.max(), 'geometry': region, 'scale': scale, 'maxPixels': 1e12 }) return max_value
b7e9c4ece9639fbdad75146021fd16438567b9c7
3,638,008
def getblock(lst, limit): """Return first limit entries from list lst and remove them from the list""" r = lst[-limit:] del lst[-limit:] return r
8d230dec59fe00375d92b6c6a8b51f3e6e2d9126
3,638,009
def electrode_neighborhoods(mea='hidens', neighborhood_radius=HIDENS_NEIGHBORHOOD_RADIUS, x=None, y=None): """ Calculate neighbor matrix from distances between electrodes. :param mea: (optional) type of the micro electrode array, default: 'hidens' :param neighborhood_radius:(optional) depends on mea type :param x, y: (optional) electrode coordinates :return: neighbors: square matrix """ distances = electrode_distances(mea, x, y) neighbors = distances < neighborhood_radius return neighbors
7595b8cbd43bd12db2cb8e8a14d1968152a34fc0
3,638,010
def lat_from_meta(meta): """ Obtains a latitude coordinates array from rasterio metadata. :param meta: dict rasterio metadata. :return: numpy array """ try: t, h = meta["transform"], meta["height"] except KeyError as e: raise e lat = np.arange(t[5], t[5] + (t[4] * h), t[4]) # in rare cases coords may be too short or too long (e.g. due to rounding) lat = shorten_coords_array(lat, t[5], t[4], h) # try several times to be sure lat = enlarge_coords_array(lat, t[5], t[4], h) lat = shorten_coords_array(lat, t[5], t[4], h) lat = enlarge_coords_array(lat, t[5], t[4], h) lat = shorten_coords_array(lat, t[5], t[4], h) lat = enlarge_coords_array(lat, t[5], t[4], h) return lat
dd4624521071788e497dadabdbb3e7716c6132cc
3,638,011
def get_test_class(dbcase): """Return the implementation class of a TestCase, or None if not found. """ if dbcase.automated and dbcase.valid: impl = dbcase.testimplementation if impl: obj = module.get_object(impl) if type(obj) is type and issubclass(obj, core.Test): return obj else: raise InvalidTestError("%r is not a Test class object." % (obj,)) else: return None else: return None
0ddf0127f87308695fef83001424ce7fa94ca463
3,638,012
def calc_dp(t_c, rh): """Calculate the dew point in Celsius. Arguments: t_c - the temperature in °C. rh - the relative humidity as a percent, (0-100) Returns: The dew point in °C. """ sat_vp = vapor_pressure_liquid_water(t_c) vp = sat_vp * rh / 100.0 a = log(vp / 6.1037) / 17.641 return a * 243.27 / (1.0 - a)
a183a11e6a61e536376802d18b939caa7c02dd28
3,638,013
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. Note that from stage 3, the first conv layer at main path is with strides=(2,2) And the shortcut should have strides=(2,2) as well """ filters1, filters2, filters3 = filters if IMAGE_ORDERING == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut]) x = Activation('relu')(x) return x
f54486729405eb1ae41b554842c5942f8ad9483a
3,638,015
from typing import List from typing import Optional import tokenize def _fake_before_lines(first_line: str) -> List[str]: """Construct the fake lines that should go before the text.""" fake_lines = [] indent_levels = _indent_levels(first_line) # Handle regular indent for i in range(indent_levels): prefix = SINGLE_INDENT * i fake_lines.append(f"{prefix}if True:\n") # Handle else/elif/except/finally try: first_token: Optional[tokenize.TokenInfo] = next( tokenize.generate_tokens(iter([first_line.lstrip()]).__next__) ) except tokenize.TokenError: first_token = None if first_token and first_token.type == tokenize.NAME: name = first_token.string prefix = SINGLE_INDENT * indent_levels if name in {"else", "elif"}: fake_lines.append(f"{prefix}if True:\n") fake_lines.append(f"{prefix}{SINGLE_INDENT}pass\n") elif name in {"except", "finally"}: fake_lines.append(f"{prefix}try:\n") fake_lines.append(f"{prefix}{SINGLE_INDENT}pass\n") return fake_lines
9d3a450dce45690e1650ad2f1c86eff5df5d8e36
3,638,016
def normal_logpdf(x, mu, cov): """ Multivariate normal logpdf, numpy native implementation :param x: :param mu: :param cov: :return: """ part1 = 1 / (((2 * np.pi) ** (len(mu) / 2)) * (np.linalg.det(cov) ** (1 / 2))) part2 = (-1 / 2) * ((x - mu).T.dot(np.linalg.inv(cov))).dot((x - mu)) return float(np.log(part1) + part2)
47672093235cdf23562a83eac28ad428c5d4db24
3,638,017
def criteriarr(criteria): """Validate if the iterable only contains MIN (or any alias) and MAX (or any alias) values. And also always returns an ndarray representation of the iterable. Parameters ---------- criteria : Array-like Iterable containing all the values to be validated by the function. Returns ------- numpy.ndarray : Criteria array. Raises ------ DataValidationError : if some value of the criteria array are not MIN (-1) or MAX (1) """ pcriteria = np.array([ALIASES.get(c) for c in criteria]) if None in pcriteria: msg = ( "Criteria Array only accept minimize or maximize Values. Found {}") raise DataValidationError(msg.format(criteria)) return pcriteria
3f00db0e4a41ab09650b7779819e243c25b6ca35
3,638,018
import random def integer_or_rational(entropy, signed, min_abs=0): """Returns a rational, with 50% probability of it being an integer.""" if random.choice([False, True]): return integer(entropy, signed, min_abs=min_abs) else: return non_integer_rational(entropy, signed)
03e11aa082dfdb613f0f1861ff8346b1087b4880
3,638,019
import functools import warnings def ignore_python_warnings(function): """ Decorator for ignoring *Python* warnings. Parameters ---------- function : object Function to decorate. Returns ------- object Examples -------- >>> @ignore_python_warnings ... def f(): ... warnings.warn('This is an ignored warning!') >>> f() """ @functools.wraps(function) def wrapped(*args, **kwargs): """ Wrapped function. """ with warnings.catch_warnings(): warnings.simplefilter('ignore') return function(*args, **kwargs) return wrapped
438e54fe927f787783175faacf4eb9608fd27cf0
3,638,020
def runMetrics( initWorkingSetName, stepName, requestInfo, jobId, outputFolder, referenceFolder, referencePrefix, dtmFile, dsmFile, clsFile, mtlFile, ): """ Run a Girder Worker job to compute metrics on output files. Requirements: - Danesfield Docker image is available on host :param initWorkingSetName: The name of the top-level working set. :type initWorkingSetName: str :param stepName: The name of the step. :type stepName: str (DanesfieldStep) :param requestInfo: HTTP request and authorization info. :type requestInfo: RequestInfo :param jobId: Job ID. :type jobId: str :param outputFolder: Output folder document. :type outputFolder: dict :param referenceFolder: Reference directory. :type referenceFolder: dict :param referencePrefix: Reference file prefix. :type referencePrefix: str :param dtmFile: DTM file document. :type dtmFile: dict :param dsmFile: DSM file document. :type dsmFile: dict :param clsFile: CLS file document. :type clsFile: dict :param mtlFile: MTL file document. :type mtlFile: dict :returns: Job document. """ gc = createGirderClient(requestInfo) if referencePrefix == "STANDARD": # We know that there's no reference data with this selection containerArgs = ["echo", "No ground truth selected for scoring"] asyncResult = docker_run.delay( **createDockerRunArguments( image=DockerImage.DANESFIELD, containerArgs=containerArgs, jobTitle="[%s] Run metrics" % initWorkingSetName, jobType=stepName, user=requestInfo.user, ) ) else: # Otherwise we assume the reference data exists, and try to # run the metrics outputVolumePath = VolumePath("__output__") # Docker container arguments containerArgs = [ "danesfield/tools/run_metrics.py", "--output-dir", outputVolumePath, "--ref-dir", GirderFolderIdToVolume(referenceFolder["_id"], gc=gc), "--ref-prefix", referencePrefix, "--dsm", GirderFileIdToVolume(dsmFile["_id"], gc=gc), "--cls", GirderFileIdToVolume(clsFile["_id"], gc=gc), "--mtl", GirderFileIdToVolume(mtlFile["_id"], gc=gc), "--dtm", GirderFileIdToVolume(dtmFile["_id"], gc=gc), ] # Result hooks # - Upload output files to output folder # - Provide upload metadata upload_kwargs = createUploadMetadata(jobId, stepName) resultHooks = [ GirderUploadVolumePathToFolder( outputVolumePath, outputFolder["_id"], upload_kwargs=upload_kwargs, gc=gc, ) ] asyncResult = docker_run.delay( **createDockerRunArguments( image=DockerImage.DANESFIELD, containerArgs=containerArgs, jobTitle="[%s] Run metrics" % initWorkingSetName, jobType=stepName, user=requestInfo.user, resultHooks=resultHooks, ) ) # Add info for job event listeners job = asyncResult.job job = addJobInfo(job, jobId=jobId, stepName=stepName) return job
e908191b274e4d55dc77a5686dc462a2f8a43798
3,638,021
def matplotlib_kwarg_dealiaser(args, kind): """De-aliase the kwargs passed to plots.""" if args is None: return {} matplotlib_kwarg_dealiaser_dict = { "scatter": mpl.collections.PathCollection, "plot": mpl.lines.Line2D, "hist": mpl.patches.Patch, "bar": mpl.patches.Rectangle, "hexbin": mpl.collections.PolyCollection, "fill_between": mpl.collections.PolyCollection, "hlines": mpl.collections.LineCollection, "text": mpl.text.Text, "contour": mpl.contour.ContourSet, "pcolormesh": mpl.collections.QuadMesh, } return normalize_kwargs(args, getattr(matplotlib_kwarg_dealiaser_dict[kind], "_alias_map", {}))
9c267531bde5445d7024bee4860b882044359212
3,638,023
def volume(): """ Get volume number :return: """ return Scheduler.ret_volume
0a39ff47800d04055971e7687b240f0c1e1a2396
3,638,024
def grid_grad(input, grid, interpolation='linear', bound='zero', extrapolate=False): """Sample spatial gradients of an image with respect to a deformation field. Notes ----- {interpolation} {bound} Parameters ---------- input : ([batch], [channel], *inshape) tensor Input image. grid : ([batch], *inshape, dim) tensor Transformation field. shape : sequence[int], default=inshape Output shape interpolation : int or sequence[int], default=1 Interpolation order. bound : BoundType, or sequence[BoundType], default='zero' Boundary conditions. extrapolate : bool or int, default=True Extrapolate out-of-bound data. Returns ------- output : ([batch], [channel], *shape, dim) tensor Sampled gradients. """ # Broadcast dim = grid.shape[-1] input_no_batch = input.dim() == dim + 1 input_no_channel = input.dim() == dim grid_no_batch = grid.dim() == dim + 1 if input_no_channel: input = input[None, None] elif input_no_batch: input = input[None] if grid_no_batch: grid = grid[None] batch = max(input.shape[0], grid.shape[0]) input = expand(input, [batch, *input.shape[1:]]) grid = expand(grid, [batch, *grid.shape[1:]]) out = GridGrad.apply(input, grid, interpolation, bound, extrapolate) if input_no_channel: out = out[:, 0] if input_no_batch and grid_no_batch: out = out[0] return out
4df566e4612252169f2fab721b62cbd9be8c85ac
3,638,025
def delete_cart_item(quote_id, item_code): """Delete given item_codes from Quote if all deleted then delete Quote""" try: response = frappe._dict() item_code = item_code.encode('utf-8') item_list= [ i.strip() for i in item_code.split(",")] if not isinstance(item_code, list): item_code = [item_code] if not frappe.db.exists("Quotation", quote_id): response["message"] = "Quotation not found" frappe.local.response['http_status_code'] = 404 else: quote = frappe.get_doc("Quotation", quote_id) new_items = [] for idx, row in enumerate(quote.get("items")): if not row.item_code in item_list: new_items.append(row) quote.items = new_items quote.flags.ignore_mandatory = True quote.save() if not len(quote.get("items", [])): frappe.delete_doc("Quotation", quote_id) response["message"] = "Deleted all items" frappe.local.response["http_status_code"] = 200 else: response = get_cart_details(quote_id) frappe.db.commit() except Exception as e: http_status_code = getattr(e, "http_status_code", 500) frappe.local.response['http_status_code'] = http_status_code response["message"] = "Unable to Delete Quote Item" frappe.log_error(message=frappe.get_traceback() , title="Website API: delete_cart_item") finally: return response
f7a17cf74764322136bd3e3940a37399ec1ac53b
3,638,026
def list_methods(f): """Return a list of the multimethods currently registered to `f`. The multimethods are returned in the order they would be tested by the dispatcher when the generic function is called. The return value is a list, where each item is `(callable, type_signature)`. Each type signature is in the format returned by `typing.get_type_hints`. `f`: a callable that has been declared `@generic` or `@typed`. **Interaction with OOP**: Bound methods are resolved to the underlying function automatically. The `self`/`cls` argument is extracted from the `__self__` attribute of the bound method, enabling linked dispatcher lookups in the MRO. **CAUTION**: Recall that in Python, instance methods when accessed through the *class* are just raw functions; the method becomes bound, and thus `self` is set, when accessed through *an instance* of that class. Let `Cat` be a class with an OOP instance method `meow`, and `cat` an instance of that class. If you call `list_methods(cat.meow)`, you get the MRO lookup for linked dispatchers, as expected. But if you call `list_methods(Cat.meow)` instead, it won't see the MRO, because the value of the `self` argument isn't set for an unbound method (which is really just a raw function). If `Cat` has a `@classmethod` `iscute`, calling `list_methods(Cat.iscute)` performs the MRO lookup for linked dispatchers. This is because a class method is already bound (to the class, so the `cls` argument already has a value) when it is accessed through the class. Finally, note that while that is how `list_methods` works, it is not the mechanism actually used to determine `self`/`cls` when *calling* the generic function. There, the value of `self`/`cls` is extracted from the first positional argument of the call. This is because the dispatcher is actually installed on the underlying raw function, so it has no access to the metadata of the bound method (which, as seen from the dispatcher, is on the outside). """ function, _ = getfunc(f) if not isgeneric(function): raise TypeError(f"{_function_fullname(function)} is not a generic function, it does not have multimethods.") # In case of a bound method (either `Foo.classmeth` or `foo.instmeth`), # we can get the value for `self`/`cls` argument from its `__self__` attribute. # # Otherwise we have a regular function, an unbound method, or a `@staticmethod`; # in those cases, there's no `self`/`cls`. (Technically, an unbound method has # a parameter to receive it, but no value has been set yet.) self_or_cls = f.__self__ if hasattr(f, "__self__") else None return _list_multimethods(function, self_or_cls)
97ac92d58ee1edd7c38a2b9c1bcd05f0b7468a24
3,638,027
def get_requirements(extra=None): """ Load the requirements for the given extra from the appropriate requirements-extra.txt, or the main requirements.txt if no extra is specified. """ filename = f"requirements-{extra}.txt" if extra else "requirements.txt" with open(filename) as fp: # Parse out as one per line return [l.strip() for l in fp.readlines() if l.strip()]
7ce9e348357925b7ff165ebd8f13300d849ea0ee
3,638,029
def _format_mojang_uuid(uuid): """ Formats a non-hyphenated UUID into a whitelist-compatible UUID :param str uuid: uuid to format :return str: formatted uuid Example: >>> _format_mojang_uuid('1449a8a244d940ebacf551b88ae95dee') '1449a8a2-44d9-40eb-acf5-51b88ae95dee' Must have 32 characters: >>> _format_mojang_uuid('1') Traceback (most recent call last): ... ValueError: Expected UUID to have 32 characters """ if len(uuid) != 32: raise ValueError('Expected UUID to have 32 characters') return uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:]
517071b28f1e747091e2a539cd5d0b8765bebeba
3,638,031
def generate_uri(graph_base, username): """ Args: graph_base (): username (): Returns: """ return "{}{}".format(graph_base, username)
5e3557d300ed1a706e7b5257719135063d8c44e6
3,638,032
from typing import Type from typing import Optional from typing import Dict from typing import Any def _combine_model_kwargs_and_state( generator_run: GeneratorRun, model_class: Type[Model], model_kwargs: Optional[Dict[str, Any]] = None, ) -> Dict[str, Any]: """Produces a combined dict of model kwargs and model state after gen, extracted from generator run. If model kwargs are not specified, model kwargs from the generator run will be used. """ model_kwargs = model_kwargs or generator_run._model_kwargs or {} if generator_run._model_state_after_gen is None: return model_kwargs serialized_model_state = not_none(generator_run._model_state_after_gen) # We don't want to update `model_kwargs` on the `GenerationStep`, # just to add to them for the purpose of this function. return {**model_kwargs, **model_class.deserialize_state(serialized_model_state)}
b646b1cd12cdba02e0495e0592790933adadd3c9
3,638,033
def quick_boxcar(s, M=4, centered=True): """Returns a boxcar-filtered version of the input signal Keyword arguments: M -- number of averaged samples (default 4) centered -- recenter the filtered signal to reduce lag (default False) """ # Sanity check on signal and filter window length = s.shape[0] if length <= 2*M: raise ValueError('signal too short for specified filter window') # Set up staggered arrays for vectorized average z = np.empty((M, length+M-1), 'd') for i in range(M): z[i] = np.r_[np.zeros(i)+s[0], s, np.zeros(M-i-1)+s[-1]] # Center the average if specified start_ix = 0 end_ix = length if centered: start_ix += int(M/2) end_ix += int(M/2) return z.mean(axis=0)[start_ix:end_ix]
64e0a847b05972f674394984fe8799738465b96b
3,638,034
def read_file(file, assume_complete=False): """read_file(filename, assume_complete=False) -> Contest Read in a text file describing a contest, and construct a Contest object. This adds the ballots (by calling addballots()), but it doesn't do any further computation. If assume_complete is True, any entries missing from a ballot are assumed to be tied for last. """ contents = None ballots = [] while True: ln = file.readline() if (not ln): break ln = ln.strip() if (not ln): continue if (ln.startswith('#')): continue if (ln.startswith('*')): if (contents): raise Exception('More than one line in the input file begins with *.') contents = ln else: ballots.append(ln) if (not contents): raise Exception('No line in the input file begins with *.') entries = contents[1:].split() if (not entries): raise Exception('The * line has no contents.') dic = {} for val in entries: dic[val] = True if (len(dic) != len(entries)): raise Exception('Duplicate entry in * line.') contest = Contest(entries) for ln in ballots: ls = ln.split() ls = [ val.split('/') for val in ls ] dic = {} for subls in ls: for val in subls: if (not contest.iskey(val)): raise Exception('Unknown key in ballot: ' + val) if (val in dic): raise Exception('Repeated key in ballot: ' + val) dic[val] = True if (assume_complete): final = [] for val in contest.entries: if (val not in dic): final.append(val) if (final): ls.append(final) contest.addballot(ls) return contest
2e57353c66b82e73fbc4caace2f2e2e54d486dac
3,638,036
import six def merge_dict(a, b): """ Recursively merges and returns dict a with dict b. Any list values will be combined and returned sorted. :param a: dictionary object :param b: dictionary object :return: merged dictionary object """ if not isinstance(b, dict): return b result = deepcopy(a) for key, val in six.iteritems(b): if key in result and isinstance(result[key], dict): result[key] = merge_dict(result[key], val) elif key in result and isinstance(result[key], list): result[key] = sorted(list(set(val) | set(result[key]))) else: result[key] = deepcopy(val) return result
5adddd784ff4facdefebecea3ce2ab5069058885
3,638,037
def send_message(oc_user,params): """留言 """ to_uid = params.get("to_uid") content = params.get("content",'') if not to_uid: return 1,{"msg":"please choose user"} if not content: return 2,{"msg":"please input content"} if len(content) > 40: return 3,{"msg":"content too long"} compete_message_obj = UserCompeteMessage.hget(oc_user.uid,to_uid) compete_message_obj.set_message(oc_user.uid,to_uid,content) compete_message_obj = UserCompeteMessage.hget(to_uid,oc_user.uid) compete_message_obj.set_message(oc_user.uid,to_uid,content) return 0,{}
6e61bfd8bfd4b94584e093c2bd91e647c39f7f90
3,638,038
def BDD100K(path: str) -> Dataset: """`BDD100K <https://bdd-data.berkeley.edu>`_ dataset. The file structure should be like:: <path> bdd100k_images_100k/ images/ 100k/ test train val labels/ det_20/ det_train.json det_val.json lane/ polygons/ lane_train.json lane_val.json drivable/ polygons/ drivable_train.json drivable_val.json Arguments: path: The root directory of the dataset. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ return _BDD100K_loader(path, "100k")
c154f57e09fac6b01004015bcbebdbd3929a9ac3
3,638,039
def is_utf8(string): """Check if argument encodes to UTF8 without error. Args: string(str): string of bytes Returns: True if string can be successfully encoded """ try: string.encode('utf-8') except UnicodeEncodeError: return False except UnicodeDecodeError: return False except AttributeError: return False return True
4ea0f8f9b93976017a8add574098d89f86f1345d
3,638,040
def fixedwidth_bins(delta, xmin, xmax): """Return bins of width `delta` that cover `xmin`, `xmax` (or a larger range). The bin parameters are computed such that the bin size `delta` is guaranteed. In order to achieve this, the range `[xmin, xmax]` can be increased. Bins can be calculated for 1D data (then all parameters are simple floats) or nD data (then parameters are supplied as arrays, with each entry correpsonding to one dimension). Parameters ---------- delta : float or array_like desired spacing of the bins xmin : float or array_like lower bound (left boundary of first bin) xmax : float or array_like upper bound (right boundary of last bin) Returns ------- dict The dict contains 'Nbins', 'delta', 'min', and 'max'; these are either floats or arrays, depending on the input. Example ------- Use with :func:`numpy.histogram`:: B = fixedwidth_bins(delta, xmin, xmax) h, e = np.histogram(data, bins=B['Nbins'], range=(B['min'], B['max'])) """ if not np.all(xmin < xmax): raise ValueError('Boundaries are not sane: should be xmin < xmax.') _delta = np.asarray(delta, dtype=np.float_) _xmin = np.asarray(xmin, dtype=np.float_) _xmax = np.asarray(xmax, dtype=np.float_) _length = _xmax - _xmin N = np.ceil(_length / _delta).astype(np.int_) # number of bins dx = 0.5 * (N * _delta - _length) # add half of the excess to each end return {'Nbins': N, 'delta': _delta, 'min': _xmin - dx, 'max': _xmax + dx}
5f9e8bc31f44d66323689a1a79d6a597d422fa57
3,638,041
def get_word(path): """ extract word name from json path """ return path.split('.')[0]
e749bcdaaf65de0299d35cdf2a2264568ad5051b
3,638,042
def produce_edge_image(thresh, img): """ Threshold the image and return the edges """ (thresh, alpha_img) = cv.threshold(img, thresh, 255, cv.THRESH_BINARY_INV) blur_img = cv.medianBlur(alpha_img, 9) blur_img = cv.morphologyEx(blur_img, cv.MORPH_OPEN, (5,5)) # find the edged return cv.Canny(blur_img, 30, 200), alpha_img
e269f15f515e8c88da3761306430d4d49c445d3b
3,638,043
def aggregate_dicts(dicts: t.Sequence[dict], agg: str = "mean") -> dict: """ Aggregates a list of dictionaries into a single dictionary. All dictionaries in ``dicts`` should have the same keys. All values for a given key are aggregated into a single value using ``agg``. Returns a single dictionary with the aggregated values. Parameters ---------- dicts : sequence of dicts The dictionaries to aggregate. agg : {'mean', 'stdev', 'sum', 'median', 'min', 'max'} Name of the method to use to aggregate the values of `dicts` with. """ aggs: t.Dict[str, t.Callable] = { "mean": np.mean, "stdev": np.std, "sum": np.sum, "median": np.median, "min": np.min, "max": np.max, } assert len(dicts) > 0 keys = dicts[0].keys() result = {} for key in keys: values = [d[key] for d in dicts] if isinstance(values[0], dict): # Recurse result[key] = aggregate_dicts(values, agg) else: result[key] = aggs[agg](values, axis=0) return result
c8a557e26c885fcef381eb21edd27a0fcb91a12e
3,638,045
def aggregation_most_frequent(logits): """This aggregation mechanism takes the softmax/logit output of several models resulting from inference on identical inputs and computes the most frequent label. It is deterministic (no noise injection like noisy_max() above. :param logits: logits or probabilities for each sample :return: """ # Compute labels from logits/probs and reshape array properly labels = labels_from_probs(logits) labels_shape = np.shape(labels) labels = labels.reshape((labels_shape[0], labels_shape[1])) # Initialize array to hold final labels result = np.zeros(int(labels_shape[1])) # Parse each sample for i in xrange(int(labels_shape[1])): # Count number of votes assigned to each class label_counts = np.bincount(labels[:, i], minlength=10) label_counts = np.asarray(label_counts, dtype=np.int32) # Result is the most frequent label result[i] = np.argmax(label_counts) return np.asarray(result, dtype=np.int32)
28440569b918a2a58ca0ee1589884948809994c7
3,638,046
from typing import Dict def evaluate_submission_with_proto( submission: Submission, ground_truth: Submission, ) -> Dict[str, float]: """Calculates various motion prediction metrics given the submission and ground truth protobuf messages. Args: submission (Submission): Proto message with predicted trajectories. ground_truth (Submission): Proto message with ground truth trajectories. Raises: ValueError: Number of objects in submission is not equal to number of objects in ground truth. ValueError: Objects order in submission violates objects order in ground truth. Returns: Dict[str, float]: Mapping from metric name to its aggregated value. """ _check_submission_and_ground_truth(submission, ground_truth) metrics = defaultdict(list) gt_map = { (prediction.scene_id, prediction.track_id): prediction for prediction in ground_truth.predictions } for i in range(len(submission.predictions)): pred = submission.predictions[i] gt = gt_map[(pred.scene_id, pred.track_id)] if pred.scene_id != gt.scene_id: raise ValueError(f'Check scenes order: {pred.scene_id} != {gt.scene_id}') if pred.track_id != gt.track_id: raise ValueError(f'Check objects order: {pred.track_id} != {gt.track_id}') pred_trajectories, weights = get_trajectories_weights_arrays(pred.weighted_trajectories) pred_trajectories = pred_trajectories[np.argsort(weights)][-MAX_NUM_MODES:] weights = weights[np.argsort(weights)][-MAX_NUM_MODES:] gt_trajectory, _ = get_trajectories_weights_arrays(gt.weighted_trajectories) gt_trajectory = gt_trajectory[0] # Reduce modes dim metrics['avg_ade'].append(avg_ade(gt_trajectory, pred_trajectories)) metrics['avg_fde'].append(avg_fde(gt_trajectory, pred_trajectories)) metrics['min_ade'].append(min_ade(gt_trajectory, pred_trajectories)) metrics['min_fde'].append(min_fde(gt_trajectory, pred_trajectories)) metrics['top1_ade'].append(top1_ade(gt_trajectory, pred_trajectories, weights)) metrics['top1_fde'].append(top1_fde(gt_trajectory, pred_trajectories, weights)) metrics['weighted_ade'].append(weighted_ade(gt_trajectory, pred_trajectories, weights)) metrics['weighted_fde'].append(weighted_fde(gt_trajectory, pred_trajectories, weights)) metrics['log_likelihood'].append(log_likelihood(gt_trajectory, pred_trajectories, weights)) metrics['corrected_nll'].append( corrected_negative_log_likelihood(gt_trajectory, pred_trajectories, weights)) metrics['is_ood'].append(gt.is_ood) return metrics
c777359e6385164a463318f089ceb330e7bad814
3,638,049
def build_optimizer(args, model): """ Build an optimizer based on the arguments given """ if args['optim'].lower() == 'sgd': optimizer = optim.SGD(model.parameters(), lr=args['learning_rate'], momentum=0.9, weight_decay=args['weight_decay']) elif args['optim'].lower() == 'adadelta': optimizer = optim.Adadelta(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay']) elif args['optim'].lower() == 'adamw': optimizer = optim.AdamW(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay']) else: raise ValueError("Unknown optimizer: %s" % args.optim) return optimizer
09677cbd5dc13f0dbab227f64d7724ff653eb9ec
3,638,050
import re def decorator_matcher(func_names, keyword, fcreate=None): """Search pattern @[namespace]<func_name>("<skey>") Parameters ---------- func_names : list List of macro names to match. fcreate : Function (skey, path, range, func_name) -> result. """ decorator = r"@?(?P<decorator>([a-zA-Z_]?[a-zA-Z_0-9.]*.)?(" decorator += "|".join(re.escape(x) for x in func_names) decorator += "))((\(\"(?P<skey>[^\"]+)\")|(\s*\Z))" nextline = keyword + r"\s+(?P<skey>[a-zA-Z_0-9]+)\(" decorator = re.compile(decorator) nextline = re.compile(nextline) def _matcher(path, source, begin_line=0, end_line=None): source = source.split("\n") if isinstance(source, str) else source results = [] end_line = min(end_line, len(source)) if end_line else len(source) for line in range(begin_line, end_line): content = source[line] match = decorator.match(content) if match: skey = match.group("skey") if skey: start, end = match.span("skey") lineno = line if not skey and line + 1 < len(source): match_name = nextline.match(source[line + 1]) if match_name: skey = match_name.group("skey") start, end = match_name.span("skey") lineno = line + 1 if skey: start_pos = Position(lineno, start) end_pos = Position(lineno, end) item = fcreate(skey, path, Range(start_pos, end_pos), match.group("decorator")) if item: results.append(item) return results return _matcher
62db3ae8bfaab36266cbe07f886531d60f47905e
3,638,051
def log_binomial(n, k, tol=0.): """ Computes log binomial coefficient. When ``tol >= 0.02`` this uses a shifted Stirling's approximation to the log Beta function via :func:`log_beta`. :param torch.Tensor n: A nonnegative integer tensor. :param torch.Tensor k: An integer tensor ranging in ``[0, n]``. :rtype: torch.Tensor """ assert isinstance(tol, (float, int)) and tol >= 0 n_plus_1 = n + 1 if tol < 0.02: # At small tolerance it is cheaper to defer to torch.lgamma(). return n_plus_1.lgamma() - (k + 1).lgamma() - (n_plus_1 - k).lgamma() return -n_plus_1.log() - log_beta(k + 1, n_plus_1 - k, tol=tol)
6e61ac09020c65202a2ab32134cb0c2ff6ff7f80
3,638,052
def error_500(request, *args, **kwargs): """ Throws a JSON response for INTERNAL errors :param request: the request :return: response """ message = "An internal server error ocurred" response = JsonResponse(data={"message": message, "status_code": 500}) response.status_code = 500 return response
59f31de52a0d77ca71b3176929cf3142858637cf
3,638,053
from datetime import datetime import warnings def datetimes_to_durations(start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None): """ This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: bool, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- .. code:: python from lifelines.utils import datetimes_to_durations start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] end_dates = ['2016-02-02', None, '2014-05-06'] T, E = datetimes_to_durations(start_dates, end_dates, freq="D") T # array([ 397., 1414., 31.]) E # array([ True, False, True]) """ fill_date = pd.to_datetime(fill_date) freq_string = "timedelta64[%s]" % freq start_times = pd.Series(start_times).copy() end_times = pd.Series(end_times).copy() C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""])) end_times[~C] = fill_date start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst) end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce") deaths_after_cutoff = end_times_ > fill_date C[deaths_after_cutoff] = False T = (end_times_ - start_times_).values.astype(freq_string).astype(float) if (T < 0).sum(): warnings.warn("Warning: some values of start_times are after end_times.\n", UserWarning) return T, C.values
f30f64ac90b92a8614e60119d93f14954e24e2ef
3,638,054
from datetime import datetime import calendar def offset_from_date(v, offset, gran='D', exact=False): """ Given a date string and some numeric offset, as well as a unit, then compute the offset from that value by offset gran's. Gran defaults to D. If exact is set to true, then the exact date is figured out, otherwise the level of granuality given by gran is used. Returns a date string. """ gran = string_conversions.units_to_gran(gran) # check for valid refdate if len(v) > 0: # Extract date components into a datetime object for manipulation y = int(v[:4]) m = int(v[4:6]) if len(v) >= 8: d = int(v[6:8]) really_d = True else: really_d = False d = 1 if len(v) >= 11: h = int(v[9:11]) else: h = None dt = datetime.datetime(y, m, d) if len(v) >= 13: min = int(v[11:13]) else: min = None if h is not None: dt = datetime.datetime(y, m, d, h) if len(v) >= 15: s = int(v[13:15]) dt = datetime.datetime(y, m, d, h, min, s) else: s = None if min is not None: dt = datetime.datetime(y, m, d, h, min) elif offset >= 1: return 'FUTURE_REF' elif offset <= -1: return 'PAST_REF' else: return v # Do manipulations if gran == 'TM': # minutes dt += datetime.timedelta(minutes=offset) return dt.strftime('%Y%m%dT%H%M') elif gran == 'TH': # hours dt += datetime.timedelta(hours=offset) if exact: return dt.strftime('%Y%m%dT%H%M') else: return dt.strftime('%Y%m%dT%H') elif gran == 'D': # days dt += datetime.timedelta(days=offset) if exact and min is not None: return dt.strftime('%Y%m%dT%H%M') elif exact and h is not None: return dt.strftime('%Y%m%dT%H') else: return dt.strftime('%Y%m%d') elif gran == 'W' or gran == 'F': # weeks/fortnights if gran == 'F': offset *= 2 dt += datetime.timedelta(weeks=offset) if exact: return dt.strftime('%Y%m%d') else: return dt.strftime('%YW%W') elif gran == 'M': # months - timedelta rather annoyingly doesn't support months, so we # need to do a bit more work here m += offset if m > 12: y += int(m / 12) m %= 12 elif m < 0: y += int(m / 12) m %= 12 if m == 0: m = 12 y -= 1 # avoid bad days dt = None while dt is None and d > 0: try: dt = datetime.datetime(y, m, d) except ValueError: d -= 1 if exact: return dt.strftime('%Y%m%d') else: return dt.strftime('%Y%m') elif gran == 'Y' or gran == 'E' or gran == 'C': # years/decades/centuries - again, need to do a bit more work if gran == 'C': offset *= 100 if gran == 'E': offset *= 10 y += offset # Python doesn't allow datetime objects to be created representing years # before 1970, so do this the old fashioned way if not exact: if gran == 'C': return ("{0:04d}".format(y))[:2] elif gran == 'E': return ("{0:04d}".format(y))[:3] else: return "%04d" % y else: if d == 29 and m == 2 and not calendar.isleap(y): # eugh, mucking about with a date that's not going to be in the # target year - fall back d = 28 if really_d: return "%04d%02d%02d" % (y, m, d) else: return "%04d%02d" % (y, m) elif offset >= 1: return 'FUTURE_REF' elif offset <= -1: return 'PAST_REF' else: return v
6b45b553df7926ee0e68ee0e2678d4da5304e9b9
3,638,055
def _strip(x): """remvoe tensor-hood from the input structure""" if isinstance(x, Tensor): x = x.item() elif isinstance(x, dict): x = {k: _strip(v) for k, v in x.items()} return x
74023594b2da6f58dea425411d72ca493fb80f61
3,638,056
def rank_array(a, descending=True): """Rank array counting from 1""" temp = np.argsort(a) if descending: temp = temp[::-1] ranks = np.empty_like(temp) ranks[temp] = np.arange(1,len(a)+1) return ranks
8f15ab7123aa7652fe5208ef96808791454b876c
3,638,057
def normalize(params, axis=0): """ Function normalizing the parameters vector params with respect to the Axis: axis :param params: array of parameters of shape [axis0, axis1, ..., axisp] p can be variable :return: params: array of same shape normalized """ return params / np.sum(params, axis=axis, keepdims=True)
c11ee3ccab492769e8c60cf2057d010190c7362f
3,638,059
from typing import Sequence from typing import Dict from typing import List def estimate_cv_regression( results: pd.DataFrame, critical_values: Sequence[float] ) -> Dict[float, List[float]]: """ Parameters ---------- results : DataFrame A dataframe with rows contaoning the quantiles and columns containign the number of observations critical_values : Sequence[float] The critical values to use """ # For percentiles 1, 5 and 10, regress on a constant, and powers of 1/T out = {} quantiles = np.asarray(results.index) tau = np.array(results.columns).reshape((1, -1)).T rhs = (1.0 / tau) ** np.arange(4) for cv in critical_values: loc = np.argmin(np.abs(100 * quantiles - cv)) lhs = np.squeeze(np.asarray(results.iloc[loc])) res = OLS(lhs, rhs).fit() params = res.params.copy() params[res.pvalues > 0.05] = 0.0 out[cv] = [round(val, 5) for val in params] return out
22e163da18cb7a9707ebba16690788467a4132c9
3,638,061
def resize(image, size): """Resize multiband image to an image of size (h, w)""" n_channels = image.shape[2] if n_channels >= 4: return skimage.transform.resize( image, size, mode="constant", preserve_range=True ) else: return cv2.resize(image, size, interpolation=cv2.INTER_AREA)
6a67524552397f1b1c9cd315b2cbaccd624027fb
3,638,063
def version(): """Return the version of this cli tool""" return __version__
790059de16a48ea7dd5dbcc4470f2be851562146
3,638,065
def ho2cu(ho): """ Homochoric vector to cubochoric vector. References ---------- D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014 https://doi.org/10.1088/0965-0393/22/7/075013 """ rs = np.linalg.norm(ho,axis=-1,keepdims=True) xyz3 = np.take_along_axis(ho,Rotation._get_pyramid_order(ho,'forward'),-1) with np.errstate(invalid='ignore',divide='ignore'): # inverse M_3 xyz2 = xyz3[...,0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[...,2:3])) ) qxy = np.sum(xyz2**2,axis=-1,keepdims=True) q2 = qxy + np.max(np.abs(xyz2),axis=-1,keepdims=True)**2 sq2 = np.sqrt(q2) q = (beta/np.sqrt(2.0)/R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)) tt = np.clip((np.min(np.abs(xyz2),axis=-1,keepdims=True)**2\ +np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0) T_inv = np.where(np.abs(xyz2[...,1:2]) <= np.abs(xyz2[...,0:1]), np.block([np.ones_like(tt),np.arccos(tt)/np.pi*12.0]), np.block([np.arccos(tt)/np.pi*12.0,np.ones_like(tt)]))*q T_inv[xyz2<0.0] *= -1.0 T_inv[np.broadcast_to(np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-12),T_inv.shape)] = 0.0 cu = np.block([T_inv, np.where(xyz3[...,2:3]<0.0,-np.ones_like(xyz3[...,2:3]),np.ones_like(xyz3[...,2:3])) \ * rs/np.sqrt(6.0/np.pi), ])/ sc cu[np.isclose(np.sum(np.abs(ho),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0 cu = np.take_along_axis(cu,Rotation._get_pyramid_order(ho,'backward'),-1) return cu
be55f70c27be789a51c9aee0ba94b36879755915
3,638,067
def apparent_resistivity( dc_survey, survey_type='dipole-dipole', space_type='half-space', dobs=None, eps=1e-10 ): """ Calculate apparent resistivity. Assuming that data are normalized voltages - Vmn/I (Potential difference [V] divided by injection current [A]). For fwd modelled data an injection current of 1A is assumed in SimPEG. Input: :param SimPEG.EM.Static.DC.SurveyDC.Survey dc_survey: DC survey object :param numpy.ndarray dobs: normalized voltage measurements [V/A] :param str survey_type: Either 'dipole-dipole' | 'pole-dipole' | 'dipole-pole' | 'pole-pole' :param float eps: Regularizer in case of a null geometric factor Output: :return rhoApp: apparent resistivity """ # Use dobs in survey if dobs is None if dobs is None: if dc_survey.dobs is None: raise Exception() else: dobs = dc_survey.dobs # Calculate Geometric Factor G = geometric_factor( dc_survey, survey_type=survey_type, space_type=space_type ) # Calculate apparent resistivity # absolute value is required because of the regularizer rhoApp = np.abs(dobs*(1./(G+eps))) return rhoApp
ec9cc774b2ae9484916da46083730156c64559d1
3,638,068
import json def readJsonFile(filePath): """read data from json file Args: filePath (str): location of the json file Returns: variable: data read form the json file """ result = None with open(filePath, 'r') as myfile: result = json.load(myfile) return result
cf15e358c52edcfb00d0ca5257cf2b5456c6e951
3,638,069
def _sort_torch(tensor): """Update handling of sort to return only values not indices.""" sorted_tensor = _i("torch").sort(tensor) return sorted_tensor.values
0480d397726f9cd97c9fa9b7e566db1d9266a75a
3,638,070
def lambda_handler(event, context): """ Route the incoming request based on type (LaunchRequest, IntentRequest, etc). The JSON body of the request is provided in the event parameter. """ print("event.session.application.applicationId=" + event['session']['application']['applicationId']) """ Uncomment this if statement and populate with your skill's application ID to prevent someone else from configuring a skill that sends requests to this function. """ # if (event['session']['application']['applicationId'] != # "amzn1.echo-sdk-ams.app.[unique-value-here]"): # raise ValueError("Invalid Application ID") if event['session']['new']: on_session_started({'requestId': event['request']['requestId']}, event['session']) if event['request']['type'] == "LaunchRequest": return on_launch(event['request'], event['session']) elif event['request']['type'] == "IntentRequest": return on_intent(event['request'], event['session']) elif event['request']['type'] == "SessionEndedRequest": return on_session_ended(event['request'], event['session'])
cda1cecdde08ae0cb7c925a5f8b598efd1c47e16
3,638,071
def get_authorized_client(config): """Get an OAuth-authorized client Following http://requests-oauthlib.readthedocs.org/en/latest/examples/google.html """ client = requests_oauthlib.OAuth2Session( client_id=config['client']['id'], scope=SCOPE, redirect_uri=config['client']['redirect_uri']) # redirect user for authorization authorization_url, state = client.authorization_url( url=AUTHORIZATION_BASE_URL, access_type='offline', # offline for refresh token approval_prompt='force') # force to always make user click authorize print('Please go here and authorize,', authorization_url) # get the authorization verifier code from the callback url redirect_response = input('Paste the full redirect URL here: ') # fetch the access token client.fetch_token( token_url=TOKEN_URL, client_secret=config['client']['secret'], authorization_response=redirect_response) return client
00012be81fed9e29cfabcf81799d887259fa395c
3,638,072
from datetime import datetime def query_obs_4h(session, station_name: str, start: datetime, end: datetime) -> pd.DataFrame: """ SQLite 读取 & 解析数据. """ time_format = "%Y-%m-%d %H:00:00" resp = session.query( ObsDataQcLinear.time, ObsDataQcLinear.watertemp, ObsDataQcLinear.pH, ObsDataQcLinear.DO, ObsDataQcLinear.conductivity, ObsDataQcLinear.turbidity, ObsDataQcLinear.codmn, ObsDataQcLinear.nh3n, ObsDataQcLinear.tp, ObsDataQcLinear.tn) \ .filter_by(name=station_name) \ .filter(between(ObsDataQcLinear.time, start.strftime(time_format), end.strftime(time_format))) \ .all() data = pd.DataFrame(resp) return data.replace([-999.0, 999.0], [np.nan, np.nan])
35730e5de1b675dc14ef5771459e7cb9629d34e5
3,638,073
def get_valid_user_input(*, prompt='', strict=False): """Return a valid user input as Fraction.""" frac_converter = parse_fraction_strict if strict else Fraction while True: user_input = input(prompt) try: user_input_fraction = frac_converter(user_input) except (ValueError, ZeroDivisionError): print('Format error, please try again') else: return user_input_fraction
7726df5b1a57ab9c9122a60e847be95161829a09
3,638,074
def binary_irrev(t, kf, prod, major, minor, backend=None): """Analytic product transient of a irreversible 2-to-1 reaction. Product concentration vs time from second order irreversible kinetics. Parameters ---------- t : float, Symbol or array_like kf : number or Symbol Forward (bimolecular) rate constant. prod : number or Symbol Initial concentration of the complex. major : number or Symbol Initial concentration of the more abundant reactant. minor : number or Symbol Initial concentration of the less abundant reactant. backend : module or str Default is 'numpy', can also be e.g. ``sympy``. """ be = get_backend(backend) return prod + major * (1 - be.exp(-kf * (major - minor) * t)) / ( major / minor - be.exp(-kf * t * (major - minor)) )
c399cb13bbe32a066b07e0aaa93fc40ddfd8c6ec
3,638,076
def adapted_rand_error(seg, gt, all_stats=False): """Compute Adapted Rand error as defined by the SNEMI3D contest [1] Formula is given as 1 - the maximal F-score of the Rand index (excluding the zero component of the original labels). Adapted from the SNEMI3D MATLAB script, hence the strange style. Parameters ---------- seg : np.ndarray the segmentation to score, where each value is the label at that point gt : np.ndarray, same shape as seg the groundtruth to score against, where each value is a label all_stats : boolean, optional whether to also return precision and recall as a 3-tuple with rand_error Returns ------- are : float The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$, where $p$ and $r$ are the precision and recall described below. prec : float, optional The adapted Rand precision. (Only returned when `all_stats` is ``True``.) rec : float, optional The adapted Rand recall. (Only returned when `all_stats` is ``True``.) References ---------- [1]: http://brainiac2.mit.edu/SNEMI3D/evaluation """ # segA is query, segB is truth segA = seg segB = gt n = segA.size # This is the contingency table obtained from segA and segB, we obtain # the marginal probabilities from the table. p_ij = contingency_table(segA, segB, norm=False) # Sum of the joint distribution squared sum_p_ij = p_ij.data @ p_ij.data # These are the axix-wise sums (np.sumaxis) a_i = p_ij.sum(axis=0).A.ravel() b_i = p_ij.sum(axis=1).A.ravel() # Sum of the segment labeled 'A' sum_a = a_i @ a_i # Sum of the segment labeled 'B' sum_b = b_i @ b_i # This is the new code, wherein 'n' is subtacted from the numerator # and the denominator. precision = (sum_p_ij - n)/ (sum_a - n) recall = (sum_p_ij - n)/ (sum_b - n) fscore = 2. * precision * recall / (precision + recall) are = 1. - fscore if all_stats: return (are, precision, recall) else: return are
0530fad7982e83aaf33e3f190b435bc7e216af00
3,638,077
from typing import List def get_knp_span(type_: str, span: Span) -> List[Span]: """Get knp tag or bunsetsu list""" assert type_ != MORPH knp_list = span.sent._.get(getattr(KNP_USER_KEYS, type_).list_) if not knp_list: return [] res = [] i = span.start_char doc = span.doc for b in knp_list: j = i + len(b.midasi) bspan = doc.char_span(i, j) bspan._.set(getattr(KNP_USER_KEYS, type_).element, b) res.append(bspan) i = j return res
90c0f753a6dd4acdbdfebc550fa66b1e8c5f21eb
3,638,078
def get_namespace_leaf(namespace): """ From a provided namespace, return it's leaf. >>> get_namespace_leaf('foo.bar') 'bar' >>> get_namespace_leaf('foo') 'foo' :param namespace: :return: """ return namespace.rsplit(".", 1)[-1]
0cb21247f9d1ce5fa4dd8d313142c4b09a92fd7a
3,638,079
from typing import Tuple def fft_real_dB(sig: np.ndarray, sample_interval_s: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ FFT, real frequencies only, magnitude in dB :param sig: array with input signal :param sample_interval_s: sample interval in seconds :return: four numpy ndarrays with fft_frequency_pos, fft_sig_pos, fft_spectral_power_pos_dB, fft_spectral_phase_radians """ fft_points = len(sig) fft_sig_pos = np.fft.rfft(sig) # returns correct RMS power level sqrt(2) -> 1 fft_sig_pos /= fft_points fft_frequency_pos = np.fft.rfftfreq(fft_points, d=sample_interval_s) fft_spectral_power_pos_dB = 10.*np.log10(2.*(np.abs(fft_sig_pos))**2. + EPSILON) fft_spectral_phase_radians = np.angle(fft_sig_pos) return fft_frequency_pos, fft_sig_pos, fft_spectral_power_pos_dB, fft_spectral_phase_radians
fc2aec1bad283aa7e1817e6d91c833e3d8c83f8f
3,638,080
from typing import Union from typing import List def get_ipv4_gateway_mac_address_over_ssh(connected_ssh_client: SSHClient, target_os: str = 'MacOS', gateway_ipv4_address: str = '192.168.0.254') -> Union[None, str]: """ Get MAC address of IPv4 gateway in target host over SSH :param connected_ssh_client: Already connected SSH client :param target_os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse) :param gateway_ipv4_address: IPv4 address of gateway :return: None if error or MAC address string """ gateway_mac_address: Union[None, str] = None try: if target_os == 'Windows': arp_table_command: str = 'arp -a ' + gateway_ipv4_address + ' | findstr ' + gateway_ipv4_address else: arp_table_command: str = 'arp -an ' + gateway_ipv4_address stdin, stdout, stderr = connected_ssh_client.exec_command(arp_table_command) arp_table: bytes = stdout.read() arp_table: str = arp_table.decode('utf-8') assert 'No route to host' not in arp_table, \ 'No route to host' + base.error_text(args.target_ip) assert arp_table != '', \ 'Not found host: ' + base.error_text(gateway_ipv4_address) + \ ' in ARP table in host: ' + base.error_text(args.target_ip) if target_os == 'Windows': assert base.windows_mac_address_regex.search(arp_table), \ 'Not found host: ' + base.error_text(gateway_ipv4_address) + \ ' in ARP table in host: ' + base.error_text(args.target_ip) mac_address = base.windows_mac_address_regex.search(arp_table) return mac_address.group(1).replace('-', ':').lower() else: target_arp_table: List[str] = arp_table.split(' ') if target_os == 'Linux': assert base.mac_address_validation(target_arp_table[3]), \ 'Invalid MAC address: ' + base.error_text(target_arp_table[3]) return target_arp_table[3] except AssertionError as Error: base.print_error(Error.args[0]) return gateway_mac_address except IndexError: return gateway_mac_address
5701aaec179d0d36f38df929d7838fc9e31ebe4f
3,638,081
def sort(list_): """ This function is a selection sort algorithm. It will put a list in numerical order. :param list_: a list :return: a list ordered by numerial order. """ for minimum in range(0, len(list_)): for c in range(minimum + 1, len(list_)): if list_[c] < list_[minimum]: temporary = list_[minimum] list_[minimum] = list_[c] list_[c] = temporary return list_
99007e4b72a616ae73a20358afc94c76e0011d3e
3,638,082
import requests import tqdm def stock_em_xgsglb(market: str = "沪市A股") -> pd.DataFrame: """ 新股申购与中签查询 http://data.eastmoney.com/xg/xg/default_2.html :param market: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板"} :type market: str :return: 新股申购与中签数据 :rtype: pandas.DataFrame """ market_map = { "全部股票": """(APPLY_DATE>'2010-01-01')""", "沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""", "科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""", "深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""", "创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""", } url = "http://datacenter-web.eastmoney.com/api/data/v1/get" params = { 'sortColumns': 'APPLY_DATE,SECURITY_CODE', 'sortTypes': '-1,-1', 'pageSize': '5000', 'pageNumber': '1', 'reportName': 'RPTA_APP_IPOAPPLY', 'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS', 'filter': market_map[market], 'source': 'WEB', 'client': 'WEB', } r = requests.get(url, params=params) data_json = r.json() total_page = data_json['result']['pages'] big_df = pd.DataFrame() for page in tqdm(range(1, total_page+1), leave=False): params.update({"pageNumber": page}) r = requests.get(url, params=params) data_json = r.json() temp_df = pd.DataFrame(data_json['result']['data']) big_df = big_df.append(temp_df, ignore_index=True) big_df.columns = [ "股票代码", "股票简称", "_", "申购代码", "_", "_", "_", "发行总数", "网上发行", "_", "顶格申购需配市值", "_", "申购上限", "_", "发行价格", "最新价", "首日收盘价", "申购日期", "中签号公布日", "中签缴款日期", "上市日期", "发行市盈率", "中签率", "询价累计报价倍数", "_", "配售对象报价家数", "连续一字板数量", "涨幅", "每中一签获利", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "行业市盈率", "_", "_", "_", ] big_df = big_df[ [ "股票代码", "股票简称", "申购代码", "发行总数", "网上发行", "顶格申购需配市值", "申购上限", "发行价格", "最新价", "首日收盘价", "申购日期", "中签号公布日", "中签缴款日期", "上市日期", "发行市盈率", "行业市盈率", "中签率", "询价累计报价倍数", "配售对象报价家数", "连续一字板数量", "涨幅", "每中一签获利", ] ] big_df['申购日期'] = pd.to_datetime(big_df['申购日期']).dt.date big_df['中签号公布日'] = pd.to_datetime(big_df['中签号公布日']).dt.date big_df['中签缴款日期'] = pd.to_datetime(big_df['中签缴款日期']).dt.date big_df['发行总数'] = pd.to_numeric(big_df['发行总数']) big_df['网上发行'] = pd.to_numeric(big_df['网上发行']) big_df['顶格申购需配市值'] = pd.to_numeric(big_df['顶格申购需配市值']) big_df['申购上限'] = pd.to_numeric(big_df['申购上限']) big_df['发行价格'] = pd.to_numeric(big_df['发行价格']) big_df['最新价'] = pd.to_numeric(big_df['最新价']) big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价']) big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率']) big_df['行业市盈率'] = pd.to_numeric(big_df['行业市盈率']) big_df['中签率'] = pd.to_numeric(big_df['中签率']) big_df['询价累计报价倍数'] = pd.to_numeric(big_df['询价累计报价倍数']) big_df['配售对象报价家数'] = pd.to_numeric(big_df['配售对象报价家数']) big_df['涨幅'] = pd.to_numeric(big_df['涨幅']) big_df['每中一签获利'] = pd.to_numeric(big_df['每中一签获利']) return big_df
2ea409c176e5a2b266866f5af7bbced291dbadca
3,638,083
def geomfill_Mults(*args): """ :param TypeConv: :type TypeConv: Convert_ParameterisationType :param TMults: :type TMults: TColStd_Array1OfInteger & :rtype: void """ return _GeomFill.geomfill_Mults(*args)
d79a857308bb796e08ce3e84f75963d473f37b76
3,638,084
import ctypes def PCO_GetRecordingStruct(handle): """ Get the complete set of the recording function settings. Please fill in all wSize parameters, even in embedded structures. """ strRecording = PCO_Recording() f = pixelfly_dll.PCO_GetRecordingStruct f.argtypes = (ctypes.wintypes.HANDLE, ctypes.POINTER(PCO_Recording)) f.restype = ctypes.c_int ret_code = f(handle, ctypes.byref(strRecording)) PCO_manage_error(ret_code) return strRecording
2d50968e9445f3937726583c00a61c8839b488b2
3,638,086
import random def auxiliar2(Letra, tabuleiro): """ Função auxiliar para jogada do computador, esta função compõe a estratégia e é responsável por realizar uma das jogadas do computador. Recebe como parâmetro o Simbolo do computador e retorna a jogada que será realizada. """ if Letra == "X": Letra2 = "O" else: Letra2 = "X" if tabuleiro[1] == Letra2 and tabuleiro[5] == Letra: if tabuleiro[3] == Letra: return 7 elif tabuleiro[2] == Letra: return 8 elif tabuleiro[4] == Letra: return 6 elif tabuleiro[6] == Letra: return 4 elif tabuleiro[7] == Letra: return 3 elif tabuleiro[8] == Letra: return 2 elif tabuleiro[9] == Letra: jogada = random.choice([3, 7]) return jogada elif tabuleiro[3] == Letra and tabuleiro[5] == Letra2: if tabuleiro[1] == Letra2: return 9 elif tabuleiro[2] == Letra2: return 8 elif tabuleiro[4] == Letra2: return 6 elif tabuleiro[6] == Letra2: return 4 elif tabuleiro[8] == Letra2: return 2 elif tabuleiro[9] == Letra2: return 1 elif tabuleiro[7] == Letra2: jogada = random.choice([9, 1]) return jogada elif tabuleiro[7] == Letra and tabuleiro[5] == Letra2: if tabuleiro[1] == Letra2: return 9 elif tabuleiro[2] == Letra2: return 8 elif tabuleiro[4] == Letra2: return 6 elif tabuleiro[6] == Letra2: return 4 elif tabuleiro[8] == Letra2: return 2 elif tabuleiro[9] == Letra2: return 1 elif tabuleiro[3] == Letra2: jogada = random.choice([9, 1]) return jogada elif tabuleiro[9] == Letra and tabuleiro[5] == Letra2: if tabuleiro[3] == Letra2: return 7 elif tabuleiro[2] == Letra2: return 8 elif tabuleiro[4] == Letra2: return 6 elif tabuleiro[6] == Letra2: return 4 elif tabuleiro[7] == Letra2: return 3 elif tabuleiro[8] == Letra2: return 2 elif tabuleiro[1] == Letra2: jogada = random.choice([3, 7]) return jogada
1db9ca4a9a9e97a3b689efc4295b59ad553cfd7a
3,638,087
def create_task(): """ 处理创建根据相关抓取参数及抓取节点服务的名称,启动数据抓取. :return: """ payload = request.get_json() # 查找抓取节点信息 node = db.nodes.find_one({"name": payload["node"]}) # 未找到抓取节点返回404 if node is None: return abort(404) # 保存任务信息至数据库中 payload["task"] = "%s@%s" % (payload["node"], strftime("%Y%m%d%H%M%S", localtime())) payload["status"] = 0 payload["done"] = 0 db.tasks.insert(payload) # 请求节点的抓取任务接口,启动抓取任务 try: resp = post("http://%s:%d/tasks" % (node["addr"], node["port"]), json={ "task": str(payload["_id"]), "type": payload["type"], "keyword": payload["keyword"], "start": payload["start"], "end": payload["end"] }) # 如果抓取节点请求成功,直接返回请求节点的处理结果,反之返回请求节点http状态 if resp.status_code == codes.ok: payload["status"] = 1 db.tasks.save(payload) return jsonify(resp.json()) else: abort(resp.status_code) except ConnectionError: # 网络原因无法通知节点服务启动任务时,删除本次建立的任务信息. db.tasks.delete_one({"_id": payload["_id"]}) return jsonify({"success": False, "code": -1})
4963b66174f57687453a4a59b321d564cbec9225
3,638,088
def eco_hist_calcs(mass,bins,dlogM): """ Returns dictionaries with the counts for the upper and lower density portions; calculates the three different percentile cuts for each mass array given Parameters ---------- mass: array-like A 1D array with log stellar mass values, assumed to be an order which corresponds to the ascending densities; (necessary, as the index cuts are based on this) bins: array-like A 1D array with the values which will be used as the bin edges dlogM: float-like The log difference between bin edges Returns ------- hist_dict_low: dictionary-like A dictionary with three keys (the frac vals), with arrays as values. The values for the lower density cut hist_dict_high: dictionary like A dictionary with three keys (the frac vals), with arrays as values. The values for the higher density cut """ hist_dict_low = {} hist_dict_high = {} bin_cens_low = {} bin_cens_high = {} frac_val = np.array([2,4,10]) frac_dict = {2:0,4:1,10:2} edges = bins bin_centers = 0.5 * (edges[:-1]+edges[1:]) low_err = [[] for xx in xrange(len(frac_val))] high_err = [[] for xx in xrange(len(frac_val))] for ii in frac_val: # hist_dict_low[ii] = {} # hist_dict_high[ii] = {} frac_data = int(len(mass)/ii) frac_mass = mass[0:frac_data] counts, edges = np.histogram(frac_mass,bins) low_counts = (counts/float(len(frac_mass))/dlogM) non_zero = (low_counts!=0) low_counts_1 = low_counts[non_zero] hist_dict_low[ii] = low_counts_1 bin_cens_low[ii] = bin_centers[non_zero] ##So... I don't actually know if I need to be calculating error ##on the mocks. I thought I didn't, but then, I swear someone ##*ahem (Victor)* said to. So I am. Guess I'm not sure they're ##useful. But I'll have them if necessary. And ECO at least ##needs them. low_err = np.sqrt(counts)/len(frac_mass)/dlogM low_err_1 = low_err[non_zero] err_key = 'err_{0}'.format(ii) hist_dict_low[err_key] = low_err_1 frac_mass_2 = mass[-frac_data:] counts_2, edges_2 = np.histogram(frac_mass_2,bins) high_counts = (counts_2/float(len(frac_mass_2))/dlogM) non_zero = (high_counts!=0) high_counts_1 = high_counts[non_zero] hist_dict_high[ii] = high_counts_1 bin_cens_high[ii] = bin_centers[non_zero] high_err = np.sqrt(counts_2)/len(frac_mass_2)/dlogM high_err_1 = high_err[non_zero] hist_dict_high[err_key] = high_err_1 return hist_dict_low, hist_dict_high, bin_cens_low, bin_cens_high
bf4e9ffeec76b2cf7a3e85db73dd1a7fc996ecd9
3,638,089
def avgSentenceLength(text): """Return the average length of a sentence.""" tokens = langtools.tokenize(text) return len(tokens) / sentenceCount(text)
4988c4fe945e2eada9abf01aebe0d454620cf565
3,638,090
def retrieve_context_topology_link_available_capacity_total_size_total_size(uuid, link_uuid): # noqa: E501 """Retrieve total-size Retrieve operation of resource: total-size # noqa: E501 :param uuid: ID of uuid :type uuid: str :param link_uuid: ID of link_uuid :type link_uuid: str :rtype: CapacityValue """ return 'do some magic!'
6c0ee9cbf2784b17a6d624530bdf94875b4e751f
3,638,091
import re def harmonize_geonames_id(uri): """checks if a geonames Url points to geonames' rdf expression""" if 'geonames' in uri: geo_id = "".join(re.findall(r'\d', uri)) return "http://sws.geonames.org/{}/".format(geo_id) else: return uri
acfb8cb4277363c6bee4844a0a95ed2ea464e741
3,638,092
def get_apikey(api): """Return the API key.""" if api == "greynoise": return config.greynoise_key if api == "hybrid-analysis": return config.hybrid_analysis_apikey if api == "malshare": return config.malshare_apikey if api == "pulsedive": return config.pulsedive_apikey if api == "twitter": return { "access_token": config.twitter_access_token, "access_token_secret": config.twitter_access_token_secret, "consumer_key": config.twitter_consumer_key, "consumer_secret": config.twitter_consumer_secret }
bf52c5424c657dc9d2173c0fa7434040daf967f3
3,638,094
def create_admin_account(): """ Creates a new admin account """ try: original_api_key = generate_key() secret_key = generate_key() hashed_api_key = generate_password_hash(original_api_key) Interactions.insert(DEFAULT_ACCOUNTS_TABLE, **{'username': 'admin', 'endpoint': '', 'is_admin': True, 'api_key': hashed_api_key, 'secret_key': secret_key}) return {'api_key': original_api_key, 'secret_key': secret_key} except (RqlRuntimeError, RqlDriverError) as err: raise err
3d9d1c7fdfe0492080930855490eb8f050056c54
3,638,095
def _arg_wrap(func): """ Decorator to decorate decorators to support optional arguments. """ @wraps(func) def new_decorator(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return func(args[0]) else: return lambda realf: func(realf, *args, **kwargs) return new_decorator
a74f7cf363aab33d770c5f7a080b8796d27e91ec
3,638,096
def _point_as_tuple(input_string: str) -> _Tuple[float]: """ Attempts to parse a string as a tuple of floats. Checks that the number of elements corresponds to the specified dimensions. The purpose of this function more than anything else is to validate correct syntax of a CLI argument that is supposed to be a point in space. """ out = tuple(float(coordinate) for coordinate in input_string.split(',')) if len(out) == DIMENSIONS: return out raise TypeError
f828c5ed9cbcf9b1820abaff07c0b646027e6ab9
3,638,097
import random def generate_id(): """Generate Hexadecimal 32 length id.""" return "%032x" % random.randrange(16 ** 32)
2f9a9eb7cc1808515fb7d71607899bb43d2ac682
3,638,098
def __charge_to_sdf(charge): """Translate RDkit charge to the SDF language. Args: charge (int): Numerical atom charge. Returns: str: Str representation of a charge in the sdf language """ if charge == -3: return "7" elif charge == -2: return "6" elif charge == -1: return "5" elif charge == 0: return "0" elif charge == 1: return "+1" elif charge == 2: return "+2" elif charge == 3: return "+4" else: return "0"
1bfda86ee023e8c11991eaae2969b87a349b7f7e
3,638,099
def save_str(self=str(''), filename=str('output.txt'), permissions=str('w')): """Save a given string to disk using a given file name. Args: self(str): String to save to disk. (default str('')) filename(str): File name to use when saving to disk. (default str('output.txt')) permissions(str): Permissions to use when opening file. (default str('w')) Returns: dict: Input parameters specified.""" # Convert variables if self is not None: string = str(self) if filename is not None: filename = str(filename) if permissions is not None: permissions = str(permissions) # Open file on disk. file_on_disk = open(filename, permissions) # Write to file on disk. file_on_disk.write(string) # Close file on disk. file_on_disk.close() # Create dict of specified parameters. params = dict({'string': [string], 'filename': [filename], 'permissions': [permissions]}) # Return dict of specified parameters. return params
87df6f654834409397d63e0313d26c70d1944a11
3,638,100
def balanced(banked_chemicals): """return true if all non-ore chemicals have non-negative amounts.""" def _enough(chemical): return chemical == "ORE" or banked_chemicals[chemical] >= 0 return all(map(_enough, banked_chemicals))
c42d492bfc67664040095260c24bbff155e98d5e
3,638,101
import bz2 import json def dict2json(thedict, json_it=False, compress_it=False): """if json_it convert thedict to json if compress_it, do a bzip2 compression on the json""" if compress_it: return bz2.compress(json.dumps(thedict).encode()) elif json_it: return json.dumps(thedict) else: return thedict
b6158427c653a00cc6953ce9f0b0a0fb4881bd7a
3,638,102
def compoundedInterest(fv, p): """Compounded interest Returns: Interest value Input values: fv : Future value p : Principal """ i = fv - p return i
00f7fd1f141293afe595393eca23c308d3fdd7d0
3,638,105
def get_volumetric_scene(self, data_key="total", isolvl=0.5, step_size=3, **kwargs): """Get the Scene object which contains a structure and a isosurface components Args: data_key (str, optional): Use the volumetric data from self.data[data_key]. Defaults to 'total'. isolvl (float, optional): The cuoff for the isosurface to using the same units as VESTA so e/bhor and kept grid size independent step_size (int, optional): step_size parameter for marching_cubes_lewiner. Defaults to 3. **kwargs: kwargs for the Structure.get_scene function Returns: [type]: [description] """ struct_scene = self.structure.get_scene(**kwargs) iso_scene = self.get_isosurface_scene( data_key=data_key, isolvl=isolvl, step_size=step_size, origin=struct_scene.origin, ) struct_scene.contents.append(iso_scene) return struct_scene
836fb5f3158ed5fe55a2975ce05eb21636584a95
3,638,106
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OrdinalEncoder def encode_labels(x, features): """ Maps strings to integers """ encoder = ColumnTransformer([("", OrdinalEncoder(), features)], n_jobs=-1) x[:, features] = encoder.fit_transform(x) return x
8f318020f663a88733dea9389627cb62d483892c
3,638,107
def get_jogframe( conx: Connection, idx: int, group: int = 1, include_comment: bool = False ) -> t.Tuple[Position_t, t.Optional[str]]: """Return the jog frame at index 'idx'. :param idx: Numeric ID of the jog frame. :type idx: int :param group: Numeric ID of the motion group the jog frame is associated with. :type group: int :returns: A tuple containing the user frame and associated comment (if requested) :rtype: tuple(Position_t, str) """ if group < 1 or group > 8: raise ValueError( f"Requested group id invalid (must be between 1 and 8, got: {group})" ) if idx < 1 or idx > 5: raise ValueError( f"Requested jog frame idx invalid (must be between 1 and 5, got: {idx})" ) varname = f'[TPFDEF]JOGFRAMES[{group},{idx}]' frame = _get_frame_var(conx, varname) cmt = None if include_comment: JOGFRAME = 2 cmt = _get_frame_comment(conx, frame_type=JOGFRAME, group=group, idx=idx) return (frame, cmt)
9a1a215311d111c262ab9810c5933a07c39d4def
3,638,108
def rotate(arr, bins): """ Return an array rotated by 'bins' places to the left :param list arr: Input data :param int bins: Number of bins to rotate by """ bins = bins % len(arr) if bins == 0: return arr else: return np.concatenate((arr[bins:], arr[:bins]))
0ac038377ed173130d83ef5231bd9678058de28a
3,638,109
from typing import Iterable from typing import Optional from typing import List from typing import OrderedDict def clear_list(items: Iterable[Optional[Typed]]) -> List[Typed]: """ return unique items in order of first ocurrence """ return list(OrderedDict.fromkeys(i for i in items if i is not None))
511bbfb6b567d494a143fb1e1ee06c39f54c47e8
3,638,110
def chords(labels): """ Transform a list of chord labels into an array of internal numeric representations. Parameters ---------- labels : list List of chord labels (str). Returns ------- chords : numpy.array Structured array with columns 'root', 'bass', and 'intervals', containing a numeric representation of chords (`CHORD_DTYPE`). """ crds = np.zeros(len(labels), dtype=CHORD_DTYPE) cache = {} for i, lbl in enumerate(labels): cv = cache.get(lbl, None) if cv is None: cv = chord(lbl) cache[lbl] = cv crds[i] = cv return crds
53d14ab6318dfaeeb77d0dd5f815c8c2f3359918
3,638,111