content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_nums(image): """get the words from an image using pytesseract. the extracted words are cleaned and all spaces, newlines and non uppercase characters are removed. :param image: inpout image :type image: cv2 image :return: extracted words :rtype: list """ # pytesseract config config = ('--psm 6 --oem 3 -c tessedit_char_whitelist=0123456789/') # extract text and preprocess text = pytesseract.image_to_string(image, config=config) text = ''.join([c for c in text if c.isdigit() or c in ['\n', ' ', '.']]) # return as a lis return text.split()
0ff23d8363a14a46c7f6ffa2be130c6eb61409c8
3,642,373
def create_bag_of_vocabulary_words(): """ Form the array of words which can be conceived during the game. This words are stored in hangman/vocabulary.txt """ words_array = [] file_object = open("./hangman/vocabulary.txt") for line in file_object: for word in line.split(): words_array.append(word) file_object.close() return words_array
e3aadad2575e28b19b83158eb2127437c8aada89
3,642,374
import math def kato_ranking_candidates(identifier: Identifier, params=None): """rank candidates based on the method proposed by Kato, S. and Kano, M.. Candidates are the noun phrases in the sentence where the identifier was appeared first. Args: identifier (Identifier) params (dict) Returns: Definition_list (List[Definition]) """ if params is None: params = {'sigma_d': math.sqrt(12 / math.log(2)), 'sigma_s': 2 / math.sqrt(math.log(2)), 'alpha': 1, 'beta': 1, 'gamma': 0.1, 'eta': 1} ranked_definition_list = [] for candidate_ in identifier.candidates: n_sentence = candidate_.included_sentence.id - identifier.sentences[0].id delta = candidate_.word_count_btwn_var_cand + 1 # minimum is 1. tf_candidate = candidate_.candidate_count_in_sentence / len(candidate_.included_sentence.replaced.strip()) score_match_initial_char = candidate_.score_match_character r_sigma_d = math.exp(- 1 / 2 * (delta ** 2 - 1) / params['sigma_d'] ** 2) r_sigma_s = math.exp(- 1 / 2 * (n_sentence ** 2 - 1) / params['sigma_s'] ** 2) score = (params['alpha'] * r_sigma_d + params['beta'] * r_sigma_s + params['gamma'] * tf_candidate + params['eta'] * score_match_initial_char) score /= (params['alpha'] + params['beta'] + params['gamma'] + params['eta']) ranked_definition_list.append( Definition( definition=candidate_.text, score=score, params=params)) ranked_definition_list = sorted( ranked_definition_list, key=lambda x: x.score, reverse=True) if not ranked_definition_list: return [Definition(definition='')] return ranked_definition_list
c8a413118b599eb3cb9c9db877d7d489871d65a2
3,642,375
def _get_bag_of_pos_with_dependency(words, index): """Return pos list surrounding index Args: words (list): stanfordnlp word list object having pos attributes. index (int): target index Return: pos_list (List[str]): xpos format string list """ pos_list = [] def _get_governor(_index, name): governor_list = [] if int(words[_index].governor) == 0: # case _index word has no governer return -1, governor_list governor_index = _index + (int(words[_index].governor) - int(words[_index].index)) if governor_index < len(words): governor = words[governor_index] governor_list.append(_get_word_feature(governor) + '_' + name) else: governor_list.append(NONE_DEPENDENCY + '_' + name) return governor_index, governor_list def _get_children(_index, name): children = [] child_list = [] roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1] start_index = 0 end_index = len(words) - 1 for i, w in roots: if i <= _index: start_index = i else: end_index = i - 1 break for i, w in enumerate(words[start_index:end_index + 1]): if int(w.governor) == int(words[_index].index): children.append(start_index + i) child_list.append(_get_word_feature(w) + '_' + name) return children, child_list # add governor governor_index, governor_list = _get_governor(index, 'governor') if 0 <= governor_index < len(words): # case index word has a governer pos_list.extend(governor_list) if int(words[governor_index].governor) != 0: # case _index word has a governer # add ancestor _, ancestor_list = _get_governor(governor_index, 'ancestor') pos_list.extend(ancestor_list) # add sibling siblings, sibling_list = _get_children(governor_index, 'sibling') i_index = siblings.index(index) del sibling_list[i_index] del siblings[i_index] pos_list.extend(sibling_list) # add sibling list for i in siblings: sibling_children, sibling_child_list = _get_children(i, 'sibling_child') pos_list.extend(sibling_child_list) # add child children, child_list = _get_children(index, 'child') pos_list.extend(child_list) for i in children: grandchildren, grandchild_list = _get_children(i, 'grandchild') pos_list.extend(grandchild_list) return pos_list
02fc508583d79464161927080c1c55d308926274
3,642,376
def fix_time_individual(df): """ 1. pandas.apply a jit function to add 0 to time 2. concat date + time 3. change to np.datetime64 """ @jit def _fix_time(x): aux = "0" * (8 - len(str(x))) + str(x) return aux[:2] + ":" + aux[2:4] + ":" + aux[4:6] + "." + aux[6:] return (df["date"] + " " + df["time"].apply(_fix_time)).astype(np.datetime64)
8d0c99d3f485d852130f9f4fe7ab05bbcdd99557
3,642,377
def convolve_fft(data, kernel, kernel_fft=False, return_fft=False): """ Convolve data with a kernel. This is inspired by astropy.convolution.convolve_fft, but stripped down to what's needed for the expected application. That has the benefit of cutting down on the execution time, but limits its use. Beware: - ``data`` and ``kernel`` must have the same shape. - For the sum of all pixels in the convolved image to be the same as the input data, the kernel must sum to unity. - Padding is never added by default. Args: data (`numpy.ndarray`_): Data to convolve. kernel (`numpy.ndarray`_): The convolution kernel, which must have the same shape as ``data``. If ``kernel_fft`` is True, this is the FFT of the kernel image; otherwise, this is the direct kernel image with the center of the kernel at the center of the array. kernel_fft (:obj:`bool`, optional): Flag that the provided ``kernel`` array is actually the FFT of the kernel, not its direct image. return_fft (:obj:`bool`, optional): Flag to return the FFT of the convolved image, instead of the direct image. Returns: `numpy.ndarray`_: The convolved image, or its FFT, with the same shape as the provided ``data`` array. Raises: ValueError: Raised if ``data`` and ``kernel`` do not have the same shape or if any of their values are not finite. """ if data.shape != kernel.shape: raise ValueError('Data and kernel must have the same shape.') if not np.all(np.isfinite(data)) or not np.all(np.isfinite(kernel)): print('**********************************') print(f'nans in data: {(~np.isfinite(data)).sum()}, nans in kernel: {(~np.isfinite(kernel)).sum()}') raise ValueError('Data and kernel must both have valid values.') datafft = np.fft.fftn(data) kernfft = kernel if kernel_fft else np.fft.fftn(np.fft.ifftshift(kernel)) fftmult = datafft * kernfft return fftmult if return_fft else np.fft.ifftn(fftmult).real
64fc4c02f72c419f6c315f524597a32391ea7b8c
3,642,378
def friable_sand(Ks, Gs, phi, phic, P_eff, n=-1, f=1.0): """ Friable sand rock physics model. Reference: Avseth et al., Quantitative Seismic Interpretation, p.54 Inputs: Ks = Bulk modulus of mineral matrix Gs = Shear modulus of mineral matrix phi = porosity phic = critical porosity P_eff = effective pressure n = coordination number f = shear reduction factor Outputs: K_dry = dry rock bulk modulus of friable rock G_dry = dry rock shear modulus of friable rock """ K_hm, G_hm = hertz_mindlin(Ks, Gs, phic, P_eff, n, f) z = G_hm/6 * (9*K_hm + 8*G_hm)/(K_hm + 2*G_hm) A = (phi/phic)/(K_hm + 4/3*G_hm) B = (1 - phi/phic)/(Ks + 4.0/3.0*G_hm) K_dry = (A+B)**-1 - 4.0/3.0*G_hm C = (phi/phic)/(G_hm+z) D = (1.0-phi/phic)/(Gs + z) G_dry = (C+D)**-1 - z return K_dry, G_dry
ace533ee727cd4749ad210b13eec5193b74416b8
3,642,380
def get_available_currencies(): """ This function retrieves a listing with all the available currencies with indexed currency crosses in order to get to know which are the available currencies. The currencies listed in this function, so on, can be used to search currency crosses and used the retrieved data to get historical data of those currency crosses, so to determine which is the value of one base currency in the second currency. Returns: :obj:`list` - available_currencies: The resulting :obj:`list` contains all the available currencies with currency crosses being either the base or the second value of the cross, as listed in Investing.com. In case the listing was successfully retrieved, the :obj:`list` will look like:: available_currencies = [ 'AED', 'AFN', 'ALL', 'AMD', 'ANG', ... ] Raises: FileNotFoundError: raised if currency crosses file was not found. IOError: raised if currency crosses retrieval failed, both for missing file or empty file. """ return available_currencies_as_list()
139f775943bc251149444c702cb4290d78a58a03
3,642,381
def mktemp(suffix="", prefix=template, dir=None): """User-callable function to return a unique temporary file name. The file is not created. Arguments are as for mkstemp, except that the 'text' argument is not accepted. This function is unsafe and should not be used. The file name refers to a file that did not exist at some point, but by the time you get around to creating it, someone else may have beaten you to the punch. """ ## from warnings import warn as _warn ## _warn("mktemp is a potential security risk to your program", ## RuntimeWarning, stacklevel=2) if dir is None: dir = gettempdir() names = _get_candidate_names() for seq in xrange(TMP_MAX): name = names.next() file = _os.path.join(dir, prefix + name + suffix) if not _exists(file): return file raise IOError, (_errno.EEXIST, "No usable temporary filename found")
0785609c3284b0052fa31767d0df11476b28c786
3,642,382
def getTaskIdentifier( task_id ) : """Get tuple of Type and Instance identifiers.""" _inst = Instance.objects.get( id = task_id ) return ( _inst.type.identifier , _inst.identifier )
fb18be814330bd02205d355b3ebfb68f777ee9c2
3,642,383
def hessian_vector_product(loss, weights, v): """Compute the tensor of the product H.v, where H is the loss Hessian with respect to the weights. v is a vector (a rank 1 Tensor) of the same size as the loss gradient. The ordering of elements in v is the same obtained from flatten_tensor_list() acting on the gradient. Derivatives of dv/dweights should vanish. """ grad = flatten_tensor_list(tf.gradients(loss, weights)) grad_v = tf.reduce_sum(grad * tf.stop_gradient(v)) H_v = flatten_tensor_list(tf.gradients(grad_v, weights)) return H_v
35ef7772367f56fcded2e4173fe194cb28da3bc7
3,642,384
def clean_cells(nb_node): """Delete any outputs and resets cell count.""" for cell in nb_node['cells']: if 'code' == cell['cell_type']: if 'outputs' in cell: cell['outputs'] = [] if 'execution_count' in cell: cell['execution_count'] = None return nb_node
67dce7ecc3590143730f943d3eb07ae7df9d8145
3,642,385
def getProjectProperties(): """ :return: @rtype: list of ProjectProperty """ return getMetDataLoader().projectProperties
7f517a20d83002c41867bbc7911f775d64b21b88
3,642,387
def svn_client_cleanup(*args): """svn_client_cleanup(char dir, svn_client_ctx_t ctx, apr_pool_t scratch_pool) -> svn_error_t""" return _client.svn_client_cleanup(*args)
2a9921e8521e927e124633bb932b158a1f9abdf3
3,642,388
def model_chromatic(psrs, psd='powerlaw', noisedict=None, components=30, gamma_common=None, upper_limit=False, bayesephem=False, wideband=False, idx=4, chromatic_psd='powerlaw', c_psrs=['J1713+0747']): """ Reads in list of enterprise Pulsar instance and returns a PTA instantiated with model 2A from the analysis paper + additional chromatic noise for given pulsars per pulsar: 1. fixed EFAC per backend/receiver system 2. fixed EQUAD per backend/receiver system 3. fixed ECORR per backend/receiver system 4. Red noise modeled as a power-law with 30 sampling frequencies 5. Linear timing model. 6. Chromatic noise for given pulsar list global: 1.Common red noise modeled with user defined PSD with 30 sampling frequencies. Available PSDs are ['powerlaw', 'turnover' 'spectrum'] 2. Optional physical ephemeris modeling. :param psd: PSD to use for common red noise signal. Available options are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default value. :param noisedict: Dictionary of pulsar noise properties. Can provide manually, or the code will attempt to find it. :param gamma_common: Fixed common red process spectral index value. By default we vary the spectral index over the range [0, 7]. :param upper_limit: Perform upper limit on common red noise amplitude. By default this is set to False. Note that when perfoming upper limits it is recommended that the spectral index also be fixed to a specific value. :param bayesephem: Include BayesEphem model. Set to False by default :param wideband: Use wideband par and tim files. Ignore ECORR. Set to False by default. :param idx: Index of chromatic process (i.e DM is 2, scattering would be 4). If set to `vary` then will vary from 0 - 6 (This will be VERY slow!) :param chromatic_psd: PSD to use for chromatic noise. Available options are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default value. :param c_psrs: List of pulsars to use chromatic noise. 'all' will use all pulsars """ amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling Tspan = model_utils.get_tspan(psrs) # white noise s = white_noise_block(vary=False, wideband=wideband) # red noise s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components) # common red noise block s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan, components=components, gamma_val=gamma_common, name='gw') # ephemeris model if bayesephem: s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True) # timing model s += gp_signals.TimingModel() # chromatic noise sc = chromatic_noise_block(psd=chromatic_psd, idx=idx) if c_psrs == 'all': s += sc models = [s(psr) for psr in psrs] elif len(c_psrs) > 0: models = [] for psr in psrs: if psr.name in c_psrs: print('Adding chromatic model to PSR {}'.format(psr.name)) snew = s + sc models.append(snew(psr)) else: models.append(s(psr)) # set up PTA pta = signal_base.PTA(models) # set white noise parameters if noisedict is None: print('No noise dictionary provided!...') else: noisedict = noisedict pta.set_default_params(noisedict) return pta
568f4951930fe6f8175417785c4503895f76bc88
3,642,389
def test_f32(heavydb): """If UDF name ends with an underscore, expect strange behaviour. For instance, defining @heavydb('f32(f32)', 'f32(f64)') def f32_(x): return x+4.5 the query `select f32_(0.0E0))` fails but not when defining @heavydb('f32(f64)', 'f32(f32)') def f32_(x): return x+4.5 (notice the order of signatures in heavydb decorator argument). """ @heavydb('f32(f32)', 'f32(f64)') # noqa: F811 def f_32(x): return x+4.5 descr, result = heavydb.sql_execute( 'select f_32(0.0E0) from {heavydb.table_name} limit 1' .format(**locals())) assert list(result)[0] == (4.5,)
157560cc90e3f869d84198eeb26896a76157eb39
3,642,391
from typing import Union from pathlib import Path def get_message_bytes( file_path: Union[str, Path], count: int, ) -> bytes: """ 从 GRIB2 文件中读取第 count 个要素场,裁剪区域 (东北区域),并返回新场的字节码 Parameters ---------- file_path count 要素场序号,从 1 开始,ecCodes GRIB Key count Returns ------- bytes 重新编码后的 GRIB 2 消息字节码 """ message = load_message_from_file(file_path, count=count) message = extract_region( message, 0, 180, 89.875, 0.125 ) message_bytes = eccodes.codes_get_message(message) eccodes.codes_release(message) return message_bytes
6a2ad3a20e02283c2bffe31eb78cacf84d92ff6f
3,642,392
from typing import Union from typing import List import json def discover_climate_observations( time_resolution: Union[ None, str, TimeResolution, List[Union[str, TimeResolution]] ] = None, parameter: Union[None, str, Parameter, List[Union[str, Parameter]]] = None, period_type: Union[None, str, PeriodType, List[Union[str, PeriodType]]] = None, ) -> str: """ Function to print/discover available time_resolution/parameter/period_type combinations. :param parameter: Observation measure :param time_resolution: Frequency/granularity of measurement interval :param period_type: Recent or historical files :return: Result of available combinations in JSON. """ if not time_resolution: time_resolution = [*TimeResolution] if not parameter: parameter = [*Parameter] if not period_type: period_type = [*PeriodType] time_resolution = parse_enumeration(TimeResolution, time_resolution) parameter = parse_enumeration(Parameter, parameter) period_type = parse_enumeration(PeriodType, period_type) trp_mapping_filtered = { ts: { par: [p for p in pt if p in period_type] for par, pt in parameters_and_period_types.items() if par in parameter } for ts, parameters_and_period_types in TIME_RESOLUTION_PARAMETER_MAPPING.items() if ts in time_resolution } time_resolution_parameter_mapping = { str(time_resolution): { str(parameter): [str(period) for period in periods] for parameter, periods in parameters_and_periods.items() if periods } for time_resolution, parameters_and_periods in trp_mapping_filtered.items() if parameters_and_periods } return json.dumps(time_resolution_parameter_mapping, indent=4)
b96fd2a0a9bcb9a7b50018a1b7e3ae7add3e3c63
3,642,393
def set_template(template_name, file_name, p_name): """ Insert template into the E-mail. """ corp = template(template_name, file_name, p_name) msg = MIMEMultipart() msg['from'] = p_name msg['subject'] = f'{file_name}' msg.attach(MIMEText(corp, 'html')) return msg
8745d9729ddbe159e0bca90dee198ce4e3efb489
3,642,394
import gettext def lazy_gettext(string): """A lazy version of `gettext`.""" if isinstance(string, _TranslationProxy): return string return _TranslationProxy(gettext, string)
9229c987d6b2f300f7225ea4b58f964c70e882fc
3,642,395
def toggleautowithdrawalstatus(status, fid, alternate_token=False): """ Sets auto-withdrawal status of the account associated with the current OAuth token under the specified funding ID. :param status: Boolean for toggle. :param fid: String with funding ID for target account :return: String (Either "Enabled" or "Disabled") """ if not status: raise Exception('toggleautowithdrawlstatus() requires status parameter') if not fid: raise Exception('toggleautowithdrawlstatus() requires fid parameter') return r._post('/accounts/features/auto_withdrawl', { 'oauth_token': alternate_token if alternate_token else c.access_token, 'enabled': status, 'fundingId': fid })
4df2be7801a23978c58b7ce8aec7e5fd30fb1e76
3,642,396
def load_avenger_models(): """ Load each instance of data from the repository into its associated model at this point in the schema lifecycle """ avengers = [] for item in fetch_avenger_data(): # Explicitly assign each attribute of the model, so various attributes can be ignored avenger = Avenger(url=item.url, name=item.name, appearances=item.appearances, current=item.current == "YES", gender=item.gender, probationary=parse_date(item.probationary), full_reserve=parse_date(item.full_reserve, item.year), year=item.year, honorary=item.honorary, notes=item.notes) for occurrence in range(1, 6): # Iterate over the known indices of deaths (max in data range is 5) # If the death attribute exists and has a value, create a new Death instance and load the associated # instance data before adding it to the the list of deaths on the current avenger if getattr(item, f"death{occurrence}", None): avenger.deaths.append( Death(death=getattr(item, f"death{occurrence}") == "YES", # Convert string to boolean returned=getattr(item, f"return{occurrence}") == "YES", # Convert string to boolean sequence=occurrence) # Add the sequence of this death, order is important! ) else: break # If this is the last death, there is no reason to check subsequent iterations avengers.append(avenger) # Add this avenger to the list of avengers return avengers
70740495be63a198cf5ec1308608955f52be46f0
3,642,397
import json import _json import _datetime def aggregate_points(point_layer, bin_type=None, bin_size=None, bin_size_unit=None, polygon_layer=None, time_step_interval=None, time_step_interval_unit=None, time_step_repeat_interval=None, time_step_repeat_interval_unit=None, time_step_reference=None, summary_fields=None, output_name=None, gis=None, future=False): """ .. image:: _static/images/aggregate_points/aggregate_points.png This ``aggregate_points`` tool works with a layer of point features and a layer of areas. The layer of areas can be an input polygon layer or it can be square or hexagonal bins calculated when the task is run. The tool first determines which points fall within each specified area. After determining this point-in-area spatial relationship, statistics about all points in the area are calculated and assigned to the area. The most basic statistic is the count of the number of points within the area, but you can get other statistics as well. For example, suppose you have point features of coffee shop locations and area features of counties, and you want to summarize coffee sales by county. Assuming the coffee shops have a TOTAL_SALES attribute, you can get the sum of all TOTAL_SALES within each county, the minimum or maximum TOTAL_SALES within each county, or other statistics like the count, range, standard deviation, and variance. This tool can also work on data that is time-enabled. If time is enabled on the input points, then the time slicing options are available. Time slicing allows you to calculate the point-in area relationship while looking at a specific slice in time. For example, you could look at hourly intervals, which would result in outputs for each hour. For an example with time, suppose you had point features of every transaction made at a coffee shop location and no area layer. The data has been recorded over a year, and each transaction has a location and a time stamp. Assuming each transaction has a TOTAL_SALES attribute, you can get the sum of all TOTAL SALES within the space and time of interest. If these transactions are for a single city, we could generate areas that are one kilometer grids, and look at weekly time slices to summarize the transactions in both time and space. ================================================= ======================================================================== **Argument** **Description** ------------------------------------------------- ------------------------------------------------------------------------ point_layer Required point feature layer. The point features that will be aggregated into the polygons in the ``polygon_layer`` or bins of the specified ``bin_size``. See :ref:`Feature Input<FeatureInput>`. ------------------------------------------------- ------------------------------------------------------------------------ bin_type Optional string. If ``polygon_layer`` is not defined, it is required. The type of bin that will be generated and into which points will be aggregated. Choice list:['Square', 'Hexagon']. The default value is "Square". When generating bins for Square, the number and units specified determine the height and length of the square. For Hexagon, the number and units specified determine the distance between parallel sides. Either ``bin_type`` or ``polygon_layer`` must be specified. If ``bin_type`` is chosen, ``bin_size`` and ``bin_size_unit`` specifying the size of the bins must be included. ------------------------------------------------- ------------------------------------------------------------------------ bin_size (Required if ``bin_type`` is used) Optional float. The distance for the bins of type binType that the ``point_layer`` will be aggregated into. When generating bins, for Square, the number and units specified determine the height and length of the square. For Hexagon, the number and units specified determine the distance between parallel sides. ------------------------------------------------- ------------------------------------------------------------------------ bin_size_unit (Required if ``bin_size`` is used) Optional string. The distance unit for the bins that the ``point_layer`` will be aggregated into. Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles'] When generating bins for Square, the number and units specified determine the height and length of the square. For Hexagon, the number and units specified determine the distance between parallel sides. Either ``bin_type`` or ``polygon_layer`` must be specified. If ``bin_type`` is chosen, ``bin_size`` and ``bin_size_unit`` specifying the size of the bins must be included. ------------------------------------------------- ------------------------------------------------------------------------ polygon_layer Optional polygon feature layer. The polygon features (areas) into which the input points will be aggregated. See :ref:`Feature Input<FeatureInput>`. One of ``polygon_layer`` or bins ``bin_size`` and ``bin_size_unit`` is required. ------------------------------------------------- ------------------------------------------------------------------------ time_step_interval Optional integer. A numeric value that specifies duration of the time step interval. This option is only available if the input points are time-enabled and represent an instant in time. The default value is 'None'. ------------------------------------------------- ------------------------------------------------------------------------ time_step_interval_unit Optional string. A string that specifies units of the time step interval. This option is only available if the input points are time-enabled and represent an instant in time. Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds'] The default value is 'None'. ------------------------------------------------- ------------------------------------------------------------------------ time_step_repeat_interval Optional integer. A numeric value that specifies how often the time step repeat occurs. This option is only available if the input points are time-enabled and of time type instant. ------------------------------------------------- ------------------------------------------------------------------------ time_step_repeat_interval_unit Optional string. A string that specifies the temporal unit of the step repeat. This option is only available if the input points are time-enabled and of time type instant. Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds'] The default value is 'None'. ------------------------------------------------- ------------------------------------------------------------------------ time_step_reference Optional datetime. A date that specifies the reference time to align the time slices to, represented in milliseconds from epoch. The default is January 1, 1970, at 12:00 a.m. (epoch time stamp 0). This option is only available if the input points are time-enabled and of time type instant. ------------------------------------------------- ------------------------------------------------------------------------ summary_fields Optional list of dicts. A list of field names and statistical summary types that you want to calculate for all points within each polygon or bin. Note that the count of points within each polygon is always returned. By default, all statistics are returned. Example: [{"statisticType": "Count", "onStatisticField": "fieldName1"}, {"statisticType": "Any", "onStatisticField": "fieldName2"}] fieldName is the name of the fields in the input point layer. statisticType is one of the following for numeric fields: * ``Count`` -Totals the number of values of all the points in each polygon. * ``Sum`` -Adds the total value of all the points in each polygon. * ``Mean`` -Calculates the average of all the points in each polygon. * ``Min`` -Finds the smallest value of all the points in each polygon. * ``Max`` -Finds the largest value of all the points in each polygon. * ``Range`` -Finds the difference between the Min and Max values. * ``Stddev`` -Finds the standard deviation of all the points in each polygon. * ``Var`` -Finds the variance of all the points in each polygon. statisticType is one of the following for string fields: * ``Count`` -Totals the number of strings for all the points in each polygon. * ``Any` `-Returns a sample string of a point in each polygon. ------------------------------------------------- ------------------------------------------------------------------------ output_name Optional string. The method will create a feature service of the results. You define the name of the service. ------------------------------------------------- ------------------------------------------------------------------------ gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used. ------------------------------------------------- ------------------------------------------------------------------------ context Optional dict. The context parameter contains additional settings that affect task execution. For this task, there are four settings: * Extent (``extent``) - a bounding box that defines the analysis area. Only those features that intersect the bounding box will be analyzed. * Processing spatial reference (``processSR``) The features will be projected into this coordinate system for analysis. * Output Spatial Reference (``outSR``) - the features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84. * Data store (``dataStore``) Results will be saved to the specified data store. The default is the spatiotemporal big data store. ------------------------------------------------- ------------------------------------------------------------------------ future optional Boolean. If True, a GPJob is returned instead of results. The GPJob can be queried on the status of the execution. ================================================= ======================================================================== :returns: result_layer : Output Features as feature layer item. .. code-block:: python # Usage Example: To aggregate number of 911 calls within 1 km summarized by Day count. agg_result = aggregate_points(calls, bin_size=1, bin_size_unit='Kilometers', time_step_interval=1, time_step_interval_unit="Years", summary_fields=[{"statisticType": "Count", "onStatisticField": "Day"}], output_name='testaggregatepoints01') """ kwargs = locals() gis = _arcgis.env.active_gis if gis is None else gis url = gis.properties.helperServices.geoanalytics.url params = {} for key, value in kwargs.items(): if value is not None: params[key] = value if output_name is None: output_service_name = 'Aggregate Points Analysis_' + _id_generator() output_name = output_service_name.replace(' ', '_') else: output_service_name = output_name.replace(' ', '_') output_service = _create_output_service(gis, output_name, output_service_name, 'Aggregate Points') params['output_name'] = _json.dumps({ "serviceProperties": {"name" : output_name, "serviceUrl" : output_service.url}, "itemProperties": {"itemId" : output_service.itemid}}) if isinstance(summary_fields, list): summary_fields = json.dumps(summary_fields) _set_context(params) param_db = { "point_layer": (_FeatureSet, "pointLayer"), "bin_type": (str, "binType"), "bin_size": (float, "binSize"), "bin_size_unit": (str, "binSizeUnit"), "polygon_layer": (_FeatureSet, "polygonLayer"), "time_step_interval": (int, "timeStepInterval"), "time_step_interval_unit": (str, "timeStepIntervalUnit"), "time_step_repeat_interval": (int, "timeStepRepeatInterval"), "time_step_repeat_interval_unit": (str, "timeStepRepeatIntervalUnit"), "time_step_reference": (_datetime, "timeStepReference"), "summary_fields": (str, "summaryFields"), "output_name": (str, "outputName"), "context": (str, "context"), "output": (_FeatureSet, "Output Features"), } return_values = [ {"name": "output", "display_name": "Output Features", "type": _FeatureSet}, ] try: _execute_gp_tool(gis, "AggregatePoints", params, param_db, return_values, _use_async, url, True, future=future) return output_service except: output_service.delete() raise
fe946d4273ed1ce4e4cd3e46d9f9a3e0ff5c6725
3,642,398
def scattered_embedding_lookup(params, values, dimension, name=None, hash_key=None): """Looks up embeddings using parameter hashing for each value in `values`. The i-th embedding component of a value v in `values` is found by retrieving the weight whose index is a fingerprint of the pair (v,i). The concept is explored as "feature hashing" for model compression in this paper: http://arxiv.org/pdf/1504.04788.pdf Feature hashing has the pleasant effect of allowing us to compute an embedding without needing a pre-determined vocabulary, relieving some amount of process complexity. It also allows for us to maintain embeddings for possibly trillions of features with a fixed amount of memory. Note that this is superior to out-of-vocabulary shared "hash buckets" in that the embedding is extremely likely to be unique for each token as opposed to being shared across probably-colliding tokens. The price is that we must compute a hash once for each scalar in the token's embedding as opposed to once per token. If `params` is a list, it represents a partition of the embedding parameters. Each tensor in the list should have the same length, except for the first ones which may have an additional element. For instance 10 parameters can be partitioned in 4 tensors with length `[3, 3, 2, 2]`. Args: params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each tensor must be of rank 1 with fully-defined shape. values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`. dimension: Embedding dimension. name: An optional name for this op. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseFeatureCrossOp (optional). Returns: A `Tensor` with shape `[d0, ..., dn, dimension]`. Raises: ValueError: if dimension is not positive or the partition size is invalid. """ if dimension is None: raise ValueError("You must specify dimension.") return _sampled_scattered_embedding_lookup( params, values, dimension=dimension, sampled_candidates=None, hash_key=hash_key, name=name)
a317d7d494bd9b9918f6f2354d854c2fbffc1c6c
3,642,399
from typing import Iterable from typing import Callable def get_features_and_labels(instances: Iterable[NewsHeadlineInstance], feature_generator: Callable[[NewsHeadlineInstance], dict[str]]) -> tuple[list[dict[str]], list[int]]: """ Return a tuple of the features and labels for each instance within the dataset. """ features = [] labels = [] for instance in instances: features.append(feature_generator(instance)) labels.append(instance.label) return features, labels
56d2f1a0a18eb1d1f8ecf9547184ae873d0b60e3
3,642,401
def countBarcodeStats(bcseqs,chopseqs='none',bcs = ["0","1"],use_specific_beginner=None): """this function uses edlib to count the number of matches to given bcseqs. chopseqs can be left, right, both, or none. This tells the program to chop off one barcode from either the left, right, both, or none of the ends.""" x=[] o1list = [] o2list = [] pcount = [] jcount = [] pjcount = [] jpcount = [] all_lists = {} switch_lists = {} run_lists = {} first_last = {} for bc in bcseqs: if(bc=="conditions"): continue seqs = [] for seq in bcseqs[bc]: #for every sequence we want to eliminate where it turns to -1 curseq = "" if(len(seq)==0): continue elif((use_specific_beginner is not None) and (use_specific_beginner not in seq)): continue elif("B" in str(seq[0]) or "E" in str(seq[-1])): #this sequence is already forwards for element in seq: if("B" in str(element)): continue elif(element == -1): continue elif('E' in str(element)): break else: curseq+=str(element) seqs += [curseq] elif("E" in str(seq[0]) or "B" in str(seq[-1])): #turn the seq forwards for element in seq[::-1]: if("B" in str(element)): continue elif(element == -1): continue elif('E' in str(element)): break else: curseq+=str(element) seqs += [curseq] seqschop = [] curpcount = 0 curjcount = 0 curjpcount = 0 curpjcount = 0 curbclist = [] curswlist = [] currunslist = [] curfirstlast = [0,0,0] for a in seqs: anew = a if(chopseqs=='right'): anew = a[:-1] elif(chopseqs == 'left'): anew = a[1:] elif(chopseqs == 'both'): anew = a[1:-1] #if(len(anew)>0): seqschop+=[anew] pct = anew.count(bcs[0]) jct = anew.count(bcs[1]) curbclist+=[[pct,jct]] curpcount+=pct curjcount+=jct pjct = anew.count("".join(bcs)) jpct = anew.count("".join(bcs[::-1])) curswlist += [[pjct,jpct]] curpjcount+=pjct curjpcount+=jpct currunslist += [longestRun(a,"".join(bcs))] if(len(anew)>1): if(anew[0]==bcs[1]): curfirstlast[0]+=1 #J in the first position if(anew[-1]==bcs[1]): curfirstlast[1]+=1 #J in the last position curfirstlast[2]+=1 #this one counts all seqs first_last.update({bc:tuple(curfirstlast)}) run_lists.update({bc:currunslist}) all_lists.update({bc:curbclist}) switch_lists.update({bc:curswlist}) pcount+=[curpcount] jcount+=[curjcount] jpcount +=[curjpcount] pjcount +=[curpjcount] return all_lists,run_lists,switch_lists,first_last
af19f5a77f241362d50245885ab15dabd5197dcd
3,642,402
def is_underflow(bin_nd, hist): """Retuns whether global bin number bin_nd is an underflow bin. Works for any number of dimensions """ flat1d_bin = get_flat1d_bin(bin_nd, hist, False) return flat1d_bin == 0
377c5a339f404ef4e55832f163952575f7b8d6a4
3,642,403
def deprecated_func_docstring(foo=None): """DEPRECATED. Deprecated function.""" return foo
f9c996c4f3735ed2767f0bbb139b1494e2a0fa39
3,642,404
def get_all_nodes(starting_node : 'NodeDHT') -> 'list[NodeDHT]': """Return all nodes in the DHT""" nodes = [starting_node] node = starting_node while node != starting_node: node = node.succ nodes.append(node) return nodes
91b2968b000abac3d6f9f51bad5889ccf0fe8388
3,642,405
import re def by_regex(regex_tuples, default=True): """Only call function if regex_tuples is a list of (regex, filter?) where if the regex matches the requested URI, then the flow is applied or not based on if filter? is True or False. For example: from aspen.flows.filter import by_regex @by_regex( ( ("/secret/agenda", True), ( "/secret.*", False ) ) ) def use_public_formatting(request): ... would call the 'use_public_formatting' flow step only on /secret/agenda and any other URLs not starting with /secret. """ regex_res = [ (re.compile(regex), disposition) \ for regex, disposition in regex_tuples.iteritems() ] def filter_function(function): def function_filter(request, *args): for regex, disposition in regex_res: if regex.matches(request.line.uri): if disposition: return function(*args) if default: return function(*args) algorithm._transfer_func_name(function_filter, function) return function_filter return filter_function
a3d47690120a8091596047d73792b0d1f637132b
3,642,407
def deserialize(name): """Get the activation from name. :param name: name of the method. among the implemented Keras activation function. :return: """ name = name.lower() if name == SOFTMAX: return backward_softmax if name == ELU: return backward_elu if name == SELU: return backward_selu if name == SOFTPLUS: return backward_softplus if name == SOFTSIGN: return backward_softsign if name == SIGMOID: return backward_sigmoid if name == TANH: return backward_tanh if name in [RELU, RELU_]: return backward_relu if name == EXPONENTIAL: return backward_exponential if name == LINEAR: return backward_linear raise ValueError("Could not interpret " "activation function identifier:", name)
133f01edaa678d60f85bf720590c0df3d1c552f3
3,642,408
def delete_item_image(itemid, imageid): """ Delete an image from item. Args: itemid (int) - item's id imageid (int) - image's id Status Codes: 204 No Content – when image deleted successfully """ path = '/items/{}/images/{}'.format(itemid, imageid) return delete(path, auth=True, accepted_status_codes=[204])
28d3c7bea85cd7132de6010def1c2ec41a9cfc82
3,642,409
def bytes_(s, encoding='utf-8', errors='strict'): # pragma: no cover """Utility to ensure binary-like usability. If ``s`` is an instance of ``text_type``, return ``s.encode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, text_type): return s.encode(encoding, errors) return s
269d315c1204be941766558fc3cbbc07c8e63657
3,642,410
from operator import inv import numpy def normal_transform(matrix): """Compute the 3x3 matrix which transforms normals given an affine vector transform.""" return inv(numpy.transpose(matrix[:3,:3]))
b7f7256b9057b9a77b074080e698ff859ccbefb2
3,642,412
async def async_unload_entry(hass, config_entry): """Unload OMV config entry.""" unload_ok = await hass.config_entries.async_unload_platforms( config_entry, PLATFORMS ) if unload_ok: controller = hass.data[DOMAIN][config_entry.entry_id] await controller.async_reset() hass.data[DOMAIN].pop(config_entry.entry_id) return True
60955e2aac51d211a296de0736f784c2332f855b
3,642,413
import typing import csv def create_prediction_data(validation_file: typing.IO) -> dict: """Create a dictionary object suitable for prediction.""" validation_data = csv.DictReader(validation_file) races = {} # Read each horse from each race for row in validation_data: race_id = row["EntryID"] finish_pos = float(row["Placement"]) if race_id not in races: races[race_id] = [] # Skip horses that didn't run if finish_pos < 1: continue # Create validation array data = np.array( [ float(feat if len(str(feat)) > 0 else 0) for feat in list(row.values())[4:] ] ) data = data.reshape(1, -1) races[race_id].append( {"data": data, "prediction": None, "finish_pos": finish_pos} ) return races
6ec67b277460feb5d80bf7a35e7bc40f3014e6ce
3,642,414
def username(request): """ Returns ESA FTP username """ return request.config.getoption("--username")
2393884c2c9f65055cd7a14c1b732fccf70a6e28
3,642,415
def complete_data(df): """Add some temporal columns to the dataset - day of the week - hour of the day - minute Parameters ---------- df : pandas.DataFrame Input data ; must contain a `ts` column Returns ------- pandas.DataFrame Data with additional columns `day`, `hour` and `minute` """ logger.info("Complete some data") df = df.copy() df['day'] = df['ts'].apply(lambda x: x.weekday()) df['hour'] = df['ts'].apply(lambda x: x.hour) df['minute'] = df['ts'].apply(lambda x: x.minute) return df
be342df461c04fc4b7f5b757f8287973c8826bd8
3,642,416
import re def is_valid_mac_address_normalized(mac): """Validates that the given MAC address has what we call a normalized format. We've accepted the HEX only format (lowercase, no separators) to be generic. """ return re.compile('^([a-f0-9]){12}$').match(mac) is not None
7c4ea0a3353a3753907de21bbf114b2a228bb3c0
3,642,417
def get_Y(data): """ Function: convert pandas data table to sklearn Y variable Arguments --------- data: panadas data table Result ------ Y[:,:]: float sklearn Y variable """ return np.array((data["H"],data["sigma"])).T
d5e9d5b116fe8e82165d019c23394b6f1dfc4d9c
3,642,418
def get_bbox(mask, show=False): """ Get the bbox for a binary mask Args: mask: a binary mask Returns: bbox: (col_min, col_max, row_min, row_max) """ area_obj = np.where(mask != 0) bbox = np.min(area_obj[0]), np.max(area_obj[0]), np.min(area_obj[1]), np.max(area_obj[1]) if show: cv2.rectangle(mask, (bbox[2], bbox[0]), (bbox[3], bbox[1]), (255, 255, 255), 1) mmcv.imshow(mask, "test", 10) exit() return bbox
2e074d305d50334809eb0fe3e15def6fd4d21644
3,642,419
from pineboolib.core import settings def check_mobile_mode() -> bool: """ Return if you are working in mobile mode, searching local settings or check QtCore.QSysInfo().productType(). @return True or False. """ return ( True if QtCore.QSysInfo().productType() in ("android", "ios") else settings.CONFIG.value(u"ebcomportamiento/mobileMode", False) )
99327efbc3d329218d027e4451aae1979a9ebccc
3,642,420
def check_for_overflow_candidate(node): """ Checks if the node contains an expression which can potentially produce an overflow meaning an expression which is not wrapped by any cast, which involves the operator +, ++, *, **. Note, the expression can have several sub-expression. It is the case of the expression (a + 3 > 0 && a * 3 > 5). In this case, the control is not just done for the first expression (which is the &&), but should be applied recursively to all the subexpression, until it founds the expression with one of the whitelisted operator. :param node: Node could be an Expression or AstNode (Tuple or Literal) in both cases, they have a dictionary called 'dic'. :return: List of tuples [(AstNode, {exp_id: expression}], where the AstNode is a node which of type Identifier and it is refereeing to a newly created variable called exp_id. The seconds object of the tuple is the map between the name of the variable added and its expression. """ # Check if in all the expression (also in depth) there is some operations expression_candidates = [] whitelist_operators = ['+', '++', '*', '**', '-', '--'] logic_operators = ['||', '&&', '>', '>=', '<', '<=', '==', '!='] # to let find_parent works if not node: return None if node.parent: node.parent = None first_expression = asthelper.find_node(node.dic, {'nodeType': r'.*Operation'}) if not first_expression: # no expression it is or an identifier or a literal return None if asthelper.find_parent(first_expression, {'kind': 'typeConversion'}) is not None: # The expression is wrapped by a cast, if wrapped, can't be a candidate return None if first_expression['operator'] in whitelist_operators: exp_map = {} if 'name' not in first_expression.dic: # if not name, it is not a variable declaration # so expression is identifier exp_name = 'exp_{}'.format(first_expression.dic['id']) exp_map[exp_name] = expressionhelper.Expression(first_expression.dic) # override first_expression.dic['name'] = exp_name first_expression.dic['nodeType'] = 'Identifier' return [(first_expression, exp_map)] # recursive case if first_expression['operator'] in logic_operators: left_candidates = check_for_overflow_candidate(expressionhelper.Expression(first_expression['leftExpression'])) right_candidates = check_for_overflow_candidate(expressionhelper.Expression(first_expression['rightExpression'])) if left_candidates is not None: expression_candidates += left_candidates if right_candidates is not None: expression_candidates += right_candidates return expression_candidates return None
77232f5d94a6cba6fef79bd51886145e2dfec4bf
3,642,421
import struct def parse_monitor_message(msg): """decode zmq_monitor event messages. Parameters ---------- msg : list(bytes) zmq multipart message that has arrived on a monitor PAIR socket. First frame is:: 16 bit event id 32 bit event value no padding Second frame is the endpoint as a bytestring Returns ------- event : dict event description as dict with the keys `event`, `value`, and `endpoint`. """ if len(msg) != 2 or len(msg[0]) != 6: raise RuntimeError("Invalid event message format: %s" % msg) event = { 'event': struct.unpack("=hi", msg[0])[0], 'value': struct.unpack("=hi", msg[0])[1], 'endpoint': msg[1], } return event
df71541d34bc04b1ac25c6435b1b298394e27362
3,642,422
import toml import json def load_config(fpath): """ Load configuration from fpath and return as AttrDict. :param fpath: configuration file path, either TOML or JSON file :return: configuration object """ if fpath.endswith(".toml"): data = toml.load(fpath) elif fpath.endswith(".json"): with open(fpath, "rt", encoding="utf-8") as infp: data = json.load(infp) else: raise Exception(f"Cannot load config file {fpath}, must be .toml or json file") return AttrDict(data)
27c68c944a431b4d8b12c6b64609f33043363b03
3,642,423
def softmax_layer(inputs, n_hidden, random_base, drop_rate, l2_reg, n_class, scope_name='1'): """ Method adapted from Trusca et al. (2020). Encodes the sentence representation into a three dimensional vector (sentiment classification) using a softmax function. :param inputs: :param n_hidden: :param random_base: :param drop_rate: :param l2_reg: :param n_class: :param scope_name: :return: """ w = tf.get_variable( name='softmax_w' + scope_name, shape=[n_hidden, n_class], # initializer=tf.random_normal_initializer(mean=0., stddev=np.sqrt(2. / (n_hidden + n_class))), initializer=tf.random_uniform_initializer(-random_base, random_base), regularizer=tf.keras.regularizers.L2(l2_reg) ) b = tf.get_variable( name='softmax_b' + scope_name, shape=[n_class], # initializer=tf.random_normal_initializer(mean=0., stddev=np.sqrt(2. / (n_class))), initializer=tf.random_uniform_initializer(-random_base, random_base), regularizer=tf.keras.regularizers.L2(l2_reg) ) with tf.name_scope('softmax'): outputs = tf.nn.dropout(inputs, rate=drop_rate) predict = tf.matmul(outputs, w) + b predict = tf.nn.softmax(predict) return predict, w
1f77d99d12c927c0d77e136098fe8f9c2bc458b8
3,642,424
def node2freqt(docgraph, node_id, child_str='', include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a docgraph node into a FREQT string.""" node_attrs = docgraph.node[node_id] if istoken(docgraph, node_id): token_str = escape_func(node_attrs[docgraph.ns+':token']) if include_pos: pos_str = escape_func(node_attrs.get(docgraph.ns+':pos', '')) return u"({pos}({token}){child})".format( pos=pos_str, token=token_str, child=child_str) else: return u"({token}{child})".format(token=token_str, child=child_str) else: # node is not a token label_str=escape_func(node_attrs.get('label', node_id)) return u"({label}{child})".format(label=label_str, child=child_str)
8c6690e5fec41f98501060f5bf24ed823a2c31b6
3,642,425
def search(news_name): """method to fetch search results""" news_name_list = news_name.split(" ") search_name_format = "+".join(news_name_list) searched_results = search_news(search_name_format) sourcess=get_source_news() title = f'search results for {news_name}' return render_template('search.html', results=searched_results,my_sources=sourcess)
7521221b66a872b00310693a3ccc6c81013098a2
3,642,429
def encrypt_document(document): """ Useful method to encrypt a document using a random cipher """ cipher = generate_random_cipher() return decrypt_document(document, cipher)
9a7e4bd79a83df261c4f946f62ff9bf40bfbf068
3,642,430
def bootstrap_alert(visitor, items): """ Format: [[alert(class=error)]]: message """ txt = [] for x in items: cls = x['kwargs'].get('class', '') if cls: cls = 'alert-%s' % cls txt.append('<div class="alert %s">' % cls) if 'close' in x['kwargs']: txt.append('<button class="close" data-dismiss="alert">&times;</button>') text = visitor.parse_text(x['body'], 'article') txt.append(text) txt.append('</div>') return '\n'.join(txt)
c2803176b2e1ed9b3d4aecd622eedcac673d4c42
3,642,431
def masked_mean(x, *, mask, axis, paxis_name, keepdims): """Calculates the mean of a tensor, excluding masked-out entries. Args: x: Tensor to take the mean of. mask: Boolean array of same shape as 'x'. True elements are included in the mean, false elements are excluded. axis: Axis of 'x' to compute the mean over. paxis_name: Optional. If not None, will take a distributed mean of 'x' across devices using the specified parallel axis. keepdims: Same meaning as the corresponding parameter in `numpy.mean`. Whether to keep the reduction axes or squeeze them out. Returns: Tensor resulting from reducing 'x' over axes in 'axis'. """ assert x.shape == mask.shape x_masked_sum = masked_sum( x, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=keepdims) mask_count = masked_sum( x=mask, mask=None, axis=axis, paxis_name=paxis_name, keepdims=keepdims) x_masked_mean = x_masked_sum / mask_count return x_masked_mean
3242e86f571af61909efa63bd60158aa0f8eba88
3,642,432
def aspectRatioFix(preserve,anchor,x,y,width,height,imWidth,imHeight): """This function helps position an image within a box. It first normalizes for two cases: - if the width is None, it assumes imWidth - ditto for height - if width or height is negative, it adjusts x or y and makes them positive Given (a) the enclosing box (defined by x,y,width,height where x,y is the \ lower left corner) which you wish to position the image in, and (b) the image size (imWidth, imHeight), and (c) the 'anchor point' as a point of the compass - n,s,e,w,ne,se etc \ and c for centre, this should return the position at which the image should be drawn, as well as a scale factor indicating what scaling has happened. It returns the parameters which would be used to draw the image without any adjustments: x,y, width, height, scale used in canvas.drawImage and drawInlineImage """ scale = 1.0 if width is None: width = imWidth if height is None: height = imHeight if width<0: width = -width x -= width if height<0: height = -height y -= height if preserve: imWidth = abs(imWidth) imHeight = abs(imHeight) scale = min(width/float(imWidth),height/float(imHeight)) owidth = width oheight = height width = scale*imWidth-1e-8 height = scale*imHeight-1e-8 if anchor not in ('nw','w','sw'): dx = owidth-width if anchor in ('n','c','s'): x += dx/2. else: x += dx if anchor not in ('sw','s','se'): dy = oheight-height if anchor in ('w','c','e'): y += dy/2. else: y += dy return x,y, width, height, scale
73a686f122ad31ee6693641e1ef386f13b67b4d8
3,642,433
import random def circle_area(radius: int) -> float: """ estimate the area of a circle using the monte carlo method. Note that the decimal precision is log(n). So if you want a precision of three decimal points, n should be $$ 10 ^ 3 $$. :param r (int): the radius of the circle :return (int): the estimated area of the circle to three decimal places """ hits = 0 n = 1000 left_bottom = -1 * radius right_top = radius for _ in range(n): # get random coordinates x = left_bottom + (random() * right_top) y = left_bottom + (random() * right_top) # check if points fall within the bounds of the circle (geometrically) if sqrt((x ** 2) + (y ** 2)) < radius: hits += 1 return (hits / n) * ((2 * radius) ** 2)
2c85759ffbf798749263fca368cdfd159d67028b
3,642,435
def Quantized_MLP(pre_model, args): """ quantize the MLP model :param pre_model: :param args: :return: """ #full-precision first and last layer weights = [p for n, p in pre_model.named_parameters() if 'fp_layer' in n and 'weight' in n] biases = [pre_model.fp_layer2.bias] #layers that need to be quantized ternary_weights = [p for n, p in pre_model.named_parameters() if 'ternary' in n] params = [ {'params': weights}, {'params': ternary_weights}, {'params': biases} ] optimizer = optim.SGD(params, lr=args.lr) loss_fun = nn.CrossEntropyLoss() return pre_model, loss_fun, optimizer
cd5b36c1b10567fee5a8b1f10679e6868f42f98f
3,642,436
def _super_tofrom_choi(q_oper): """ We exploit that the basis transformation between Choi and supermatrix representations squares to the identity, so that if we munge Qobj.type, we can use the same function. Since this function doesn't respect :attr:`Qobj.type`, we mark it as private; only those functions which wrap this in a way so as to preserve type should be called externally. """ data = q_oper.data.toarray() dims = q_oper.dims new_dims = [[dims[1][1], dims[0][1]], [dims[1][0], dims[0][0]]] d0 = np.prod(np.ravel(new_dims[0])) d1 = np.prod(np.ravel(new_dims[1])) s0 = np.prod(dims[0][0]) s1 = np.prod(dims[1][1]) return Qobj(dims=new_dims, inpt=data.reshape([s0, s1, s0, s1]). transpose(3, 1, 2, 0).reshape((d0, d1)))
da91aff35d891000773100b998b80dc5d998414f
3,642,437
def get_attention_weights(data): """Get the attention weights of the given function.""" # USE INTERACTIONS token_interaction = data['tokeninteraction'] df_token_interaction = pd.DataFrame(token_interaction) # check clicked tokens to draw squares around them clicked_tokens = np.array(data['finalclickedtokens']) clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist() # COMPUTE ATTENTION attentions = [] for i, t in enumerate(data['tokens']): new_attention = \ get_attention(index_token=t['id'], df_interaction=df_token_interaction) attentions.append(new_attention) return attentions
e3189bd67f3da6ee8c1173348eec249d9c8cfa9a
3,642,438
def save_ecg_example(gen_data: np.array, image_name, image_title='12-lead ECG'): """ Save 12-lead ecg signal in fancy .png :param gen_data: :param image_name: :param image_title: :return: """ fig = plt.figure(figsize=(12, 14)) for _lead_n in range(gen_data.shape[1]): curr_lead_data = gen_data[:, _lead_n] plt.subplot(4, 3, _lead_n + 1) plt.plot(curr_lead_data, label=f'lead_{_lead_n + 1}') plt.title(f'lead_{_lead_n + 1}') fig.suptitle(image_title) plt.savefig(f'out/{image_name}.png', bbox_inches='tight') plt.close(fig) return fig
456fa204b20eee53645a900614877a6fb6a53e9c
3,642,439
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload an entry.""" component: EntityComponent = hass.data[DOMAIN] return await component.async_unload_entry(entry)
b4ae648493b63a27f5127139876cf0bca2a2dcbb
3,642,440
def run_random_climate(gdir, nyears=1000, y0=None, halfsize=15, bias=None, seed=None, temperature_bias=None, climate_filename='climate_monthly', climate_input_filesuffix='', output_filesuffix='', init_area_m2=None, unique_samples=False): """Runs the random mass balance model for a given number of years. This initializes a :py:class:`oggm.core.vascaling.RandomVASMassBalance`, and runs and stores a :py:class:`oggm.core.vascaling.VAScalingModel` with the given mass balance model. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` the glacier directory to process nyears : int, optional length of the simulation, default = 1000 y0 : int, optional central year of the random climate period. The default is to be centred on t*. Default = None halfsize : int, optional the half-size of the time window (window size = 2 * halfsize + 1), default = 15 bias : float, optional bias of the mb model. Default is to use the calibrated one, which is often a better idea. For t* experiments it can be useful to set it to zero. Default = None seed : int seed for the random generator. If you ignore this, the runs will be different each time. Setting it to a fixed seed accross glaciers can be usefull if you want to have the same climate years for all of them temperature_bias : float, optional add a bias to the temperature timeseries, default = None climate_filename : str, optional name of the climate file, e.g. 'climate_monthly' (default) or 'gcm_data' climate_input_filesuffix: str, optional filesuffix for the input climate file output_filesuffix : str, optional this add a suffix to the output file (useful to avoid overwriting previous experiments) init_area_m2: float, optional glacier area with which the model is initialized, default is RGI value unique_samples: bool, optional if true, chosen random mass-balance years will only be available once per random climate period-length if false, every model year will be chosen from the random climate period with the same probability (default) Returns ------- :py:class:`oggm.core.vascaling.VAScalingModel` """ # instance mass balance model mb_mod = RandomVASMassBalance(gdir, y0=y0, halfsize=halfsize, bias=bias, seed=seed, filename=climate_filename, input_filesuffix=climate_input_filesuffix, unique_samples=unique_samples) if temperature_bias is not None: # add given temperature bias to mass balance model mb_mod.temp_bias = temperature_bias # where to store the model output diag_path = gdir.get_filepath('model_diagnostics', filesuffix='vas', delete=True) # instance the model min_hgt, max_hgt = get_min_max_elevation(gdir) if init_area_m2 is None: init_area_m2 = gdir.rgi_area_m2 model = VAScalingModel(year_0=0, area_m2_0=init_area_m2, min_hgt=min_hgt, max_hgt=max_hgt, mb_model=mb_mod) # specify path where to store model diagnostics diag_path = gdir.get_filepath('model_diagnostics', filesuffix=output_filesuffix, delete=True) # run model model.run_until_and_store(year_end=nyears, diag_path=diag_path) return model
2887c1e62d3357e028c7be0539225bfb879323d9
3,642,442
from typing import Optional def sync_get_ami_arch_from_instance_type(instance_type: str, region_name: Optional[str]=None) -> str: """For a given EC2 instance type, returns the AMI architecture associated with the instance type Args: instance_type (str): An EC2 instance type; e.g., "t2.micro" region_name (Optional[str], optional): AWS region to use for query, or None to use the default region. Defaults to None. Returns: str: The AMI architecture associated with instance_type """ processor_arches = sync_get_processor_arches_from_instance_type(instance_type, region_name=region_name) result = sync_get_ami_arch_from_processor_arches(processor_arches) return result
2289deea91c9a9dafa0492fac9230292b546e9b7
3,642,443
import math def atan2(y, x): """Returns angle of a 2D coordinate in the XY plane""" return math.atan2(y, x)
ede5a647c175bebf2800c22d92e396deff6077e2
3,642,444
def index_objects( *, ids, indexer_class, index=None, transforms=None, manager_name=None ): """ Index specified `ids` in ES using `indexer_class`. This is done in a single bulk action. Pass `index` to index on the specific index instead of the default index alias from the `indexed_class`. Pass `transforms` or `manager_name` to change the queryset used to fetch the objects to index. Unless an `index` is specified, if a reindexing is taking place for the default index then this function will index on both the old and new indices to allow indexing to still work while reindexing isn't complete yet. """ if index is None: index = indexer_class.get_index_alias() # If we didn't have an index passed as argument, then we should index # on both old and new indexes during a reindex. indices = Reindexing.objects.get_indices(index) else: # If we did have an index passed then the caller wanted us to only # consider the index they specified, so we only consider that one. indices = [index] if manager_name is None: manager_name = 'objects' manager = getattr(indexer_class.get_model(), manager_name) if transforms is None: transforms = [] qs = manager.filter(id__in=ids) for transform in transforms: qs = qs.transform(transform) bulk = [] es = amo_search.get_es() major_version = get_major_version(es) for obj in qs.order_by('pk'): data = indexer_class.extract_document(obj) for index in indices: item = { '_source': data, '_id': obj.id, '_index': index, } if major_version < 7: # While on 6.x, we use the `addons` type when creating indices # and when bulk-indexing. We completely ignore it on searches. # When on 7.x, we don't pass type at all at creation or # indexing, and continue to ignore it on searches. # That should ensure we're compatible with both transparently. item['_type'] = 'addons' bulk.append(item) return helpers.bulk(es, bulk)
c93ea99946bb1516a58bb39aa5d43b1644f4f4da
3,642,445
def get_attrs_titles_with_transl() -> dict: """Returns attribut titles and translation""" attr_titles = [] attrs = Attribute.objects.filter(show_in_list=True).order_by('weight') for attr in attrs: attr_titles.append(attr.name) result = {} for title in attr_titles: result[title] = _(title) return result
167955e669ddb3f6d5bbbd48cc01d26155a9e4ba
3,642,446
def kde_KL_divergence_2d(x, y, h_x, h_y, nb_bins=100, fft=True): """Uses Kernel Density Estimator with Gaussian kernel on two dimensional samples x and y and returns estimated Kullback- Leibler divergence. @param x, y: samples, given as a (n, 2) shaped numpy array, @param h: width of the Gaussian kernel, @param nb_bins: number of grid points to use, @param fft: whether to use FFT to compute convolution. """ min_ = np.min(np.vstack([np.min(x, axis=0), np.min(y, axis=0)]), axis=0) max_ = np.max(np.vstack([np.max(x, axis=0), np.max(y, axis=0)]), axis=0) bounds_ = np.vstack((min_, max_)) (x_grid, y_grid, kde_x) = gaussian_kde_2d(x, h_x, h_y, nb_bins=nb_bins, fft=fft, bounds=bounds_ ) (x_grid2, y_grid2, kde_y) = gaussian_kde_2d(y, h_x, h_y, nb_bins=nb_bins, fft=fft, bounds=bounds_ ) delta_x = x_grid[1] - x_grid[0] delta_y = y_grid[1] - y_grid[0] plogp = - kde_x * np.log((kde_x + EPSILON) / (kde_y + EPSILON)) # Integrate div = trapz(trapz(plogp, dx=delta_x, axis=1), dx=delta_y, axis=0) return div
ce7ef19846dfd729fe5703aceaec69392f455ca6
3,642,447
def gml_init(code): """ Initializes a Group Membership List (GML) for schemes of the given type. Parameters: code: The code of the scheme. Returns: A native object representing the GML. Throws an Exception on error. """ gml = lib.gml_init(code) if gml == ffi.NULL: raise Exception('Error initializing GML.') return gml
5558f2db6a1c2269796cd52f675d5579ce357949
3,642,448
def before_run(func, force=False): """ Adds a function *func* to the list of callbacks that are invoked right before luigi starts running scheduled tasks. Unless *force* is *True*, a function that is already registered is not added again and *False* is returned. Otherwise, *True* is returned. """ if func not in _before_run_funcs or force: _before_run_funcs.append(func) return True else: return False
378604f6c574345682d8bd3d155ef8e4344aac27
3,642,449
def calc_z_scores(baseline, seizure): """ This function is meant to generate the figures shown in the Brainstorm demo used to select the 120-200 Hz frequency band. It should also be similar to panel 2 in figure 1 in David et al 2011. This function will compute a z-score for each value of the seizure power spectrum using the mean and sd of the control power spectrum at each frequency. In the demo, the power spectrum is calculated for the 1st 10 seconds of all three seizures and then averaged. Controls are similarly averaged Parameters ---------- baseline : ndarray power spectrum of baseline EEG seizure : ndarray power spectrum of seizure EEG Returns ------- ndarray seizure power spectrum scaled to a z-score by baseline power spectrum mean and SD """ mean = np.mean(baseline, 1) sd = np.std(baseline, 1) z_scores = (seizure - mean)/sd return z_scores
db3f6fbc42450658700ca2d120bf6faa31fccdfd
3,642,450
def get_column(data, column_index): """ Gets a column of data from the given data. :param data: The data from the CSV file. :param column_index: The column to copy. :return: The column of data (as a list). """ return [row[column_index] for row in data]
3fd5c8c76ccfed145aba0e685aa57ad01b3695a5
3,642,451
def analytic_solution(num_dims, t_val, x_val=None, domain_bounds=(0.0, 1.0), x_0=(0.5, 0.5), d=1.0, k_decay=0.0, k_influx=0.0, trunc_order=100, num_points=None): """This function returns the analytic solution to the heat equation with decay i.e. du/dt = nabla^2 u + k_1 - k_2 u k_1 is the production rate, k_2 is the decay rate Returns x-axis values, followed by an array of the solutions at different time points""" if isinstance(t_val, (int, float)): t_val = np.array([t_val]) if isinstance(num_points, (int, float)): num_points = [num_points, num_points] if isinstance(x_0, (int, float)): x_0 = np.array([x_0, x_0]) if len(domain_bounds) < 4: domain_bounds = (domain_bounds[0], domain_bounds[1], domain_bounds[0], domain_bounds[1]) assert isinstance(t_val, (list, tuple, np.ndarray)) assert isinstance(x_val, (tuple, list, np.ndarray)) or x_val is None assert isinstance(domain_bounds, (list, tuple, np.ndarray)) assert isinstance(x_0, (tuple, list, np.ndarray)) assert isinstance(d, (int, float)) assert isinstance(k_decay, (int, float)) assert isinstance(k_influx, (int, float)) assert isinstance(trunc_order, int) length = float(domain_bounds[1] - domain_bounds[0]) t = np.array(t_val) if x_val is None: assert num_points is not None x_val = [np.linspace(domain_bounds[0], domain_bounds[1], num_points[0]), np.linspace(domain_bounds[0], domain_bounds[1], num_points[1])] if num_dims == 1: if isinstance(x_val[0], (tuple, list, np.ndarray)): x = np.array(x_val[0]) y = np.array(x_val[0]) else: x = np.array(x_val) y = np.array(x_val) assert t.ndim == 1 t = t.reshape([t.shape[0], 1]) u = 1.0 / length for n in range(1, trunc_order): u += (2/length)*np.cos((n*np.pi/length)*x_0[0])*np.cos((n*np.pi/length)*x)*np.exp(-d*(n*np.pi/length)**2*t) else: assert isinstance(x_val[0], (tuple, list, np.ndarray)) assert isinstance(x_val[1], (tuple, list, np.ndarray)) x = np.array(x_val[0]) y = np.array(x_val[1]) xx, yy = np.meshgrid(x, y) assert t.ndim == 1 t = t.reshape([t.shape[0], 1, 1]) u = 1.0 / length ** 2 for k in range(1, trunc_order): u += (2.0 / length ** 2) * np.cos(k * np.pi * x_0[1] / length) * np.cos(k * np.pi * yy / length) * np.exp( -d * t * (k * np.pi / length) ** 2) for j in range(1, trunc_order): u += (2.0 / length ** 2) * np.cos(j * np.pi * x_0[0] / length) * np.cos(j * np.pi * xx / length) * np.exp( -d * t * (j * np.pi / length) ** 2) for j in range(1, trunc_order): for k in range(1, trunc_order): u += (4.0 / length ** 2) * np.cos(j * np.pi * x_0[0] / length) * np.cos(k * np.pi * x_0[1] / length) * \ np.cos(j * np.pi * xx / length) * np.cos(k * np.pi * yy / length) * \ np.exp(-d * t * ((j * np.pi / length) ** 2 + (k * np.pi / length) ** 2)) if k_decay > 0.0 and k_influx == 0.0: u *= np.exp(- k_decay * t) elif k_decay == 0.0 and k_influx > 0.0: u += k_influx * t elif k_decay > 0.0 and k_influx > 0.0: u += k_influx * (1.0 - np.exp(-k_decay * t)) / k_decay if num_dims == 1: return u, x else: return u, x, y
0a920ec22fbe1ae3ff510ddd4389c1cf4ae0912d
3,642,452
def safe_gas_limit(*estimates: int) -> int: """Calculates a safe gas limit for a number of gas estimates including a security margin """ assert None not in estimates, "if estimateGas returned None it should not reach here" calculated_limit = max(estimates) return int(calculated_limit * constants.GAS_FACTOR)
439eca363dc1fe1f53972c69191513913feef39b
3,642,453
import typing def integer_years(dates: typing.Any) -> typing.List[int]: """Maps a list of 'normalized_date' strings to a sorted list of integer years. Args: dates: A list of strings containing dates in the 'normalized_date' format. Returns: A list of years extracted from "dates". """ if not isinstance(dates, typing.Iterable): return [] years: typing.Set[int] = set() for date in dates: if not isinstance(date, str): continue match = RANGE.search(date) if match: start_str, end_str = match.groups() start = get_year(start_str) end = get_year(end_str) if start and end: years.update(range(start, end + 1)) else: year = get_year(date) if year: years.add(year) return sorted(years)
cdf14f0a2fee197177f12ead43346dfd4eabb5ef
3,642,454
def add_wmts_gibs_basemap(ax, date='2016-02-05'): """http://gibs.earthdata.nasa.gov/""" URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi' wmts = WebMapTileService(URL) # Layers for MODIS true color and snow RGB # NOTE: what other tiles available?: TONS! #https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products#expand-ReferenceLayers9Layers #layer = 'MODIS_Terra_SurfaceReflectance_Bands143' #layer = 'MODIS_Terra_CorrectedReflectance_Bands367' #layer = 'ASTER_GDEM_Greyscale_Shaded_Relief' #better zoomed in layer = 'SRTM_Color_Index' #layer = 'BlueMarble_ShadedRelief' #static #layer = 'BlueMarble_NextGeneration' #layer = 'BlueMarble_ShadedRelief_Bathymetry' #layer = 'Reference_Labels' #layer = 'Reference_Features' ax.add_wmts(wmts, layer, wmts_kwargs={'time': date}) # alpha=0.5 #NOTE: can access attributes: #wmts[layer].title return wmts
434ff85e1a721937ba83d0438bb7384d1a1f0600
3,642,455
import torch def encode_position( batch_size: int, axis: list, max_frequency: float, num_frequency_bands: int, sine_only: bool = False, ) -> torch.Tensor: """ Encode the Fourier Features and return them Args: batch_size: Batch size axis: List containing the size of each axis max_frequency: Max frequency num_frequency_bands: Number of frequency bands to use sine_only: (bool) Whether to only use Sine features or both Sine and Cosine, defaults to both Returns: Torch tensor containing the Fourier Features of shape [Batch, *axis] """ axis_pos = list( map( lambda size: torch.linspace(-1.0, 1.0, steps=size), axis, ) ) pos = torch.stack(torch.meshgrid(*axis_pos), dim=-1) enc_pos = fourier_encode( pos, max_frequency, num_frequency_bands, sine_only=sine_only, ) enc_pos = einops.rearrange(enc_pos, "... n d -> ... (n d)") enc_pos = einops.repeat(enc_pos, "... -> b ...", b=batch_size) return enc_pos
06a81219b85006226069b288cce8602fc62e7119
3,642,456
def expr_erode(src, size = 5): """ Same result as core.morpho.Erode(), faster and workable in 32 bit. """ expr = _morpho_matrix(size, mm = 'min') return core.akarin.Expr(src, expr)
06f76f889cadcec538639ca1a920168c6a9ec467
3,642,457
def response_modification(response): """ Modify API response format. """ if ( status.is_client_error(response.status_code) or status.is_server_error(response.status_code) ) and (status.HTTP_400_BAD_REQUEST != response.status_code): return response # Modify the response data modified_data = {} modified_data["code"] = response.status_code modified_data["status"] = get_status(response.status_code) modified_data["data"] = response.data response.data = modified_data return response
f8a3120f3a1671d71f32158b742212b896074bdc
3,642,458
import trace def process_source_lineage(grid_sdf, data_sdf, value_field=None): """ performs the operation to generate the """ try: subtypes = arcpy.da.ListSubtypes(data_sdf) st_dict = {} for stcode, stdict in list(subtypes.items()): st_dict[stcode] = subtypes[stcode]['Name'] fields = arcpy.ListFields(data_sdf) use_subtypes = False for field in fields: if field.name == value_field and field.type == 'Integer': arcpy.AddMessage("Field has subtypes") use_subtypes = True poly_desc = arcpy.Describe(grid_sdf) fc_desc = arcpy.Describe(data_sdf) if poly_desc.extent.within(fc_desc.extent): temp_fc = 'in_memory/clip' arcpy.AddMessage('Clipping features to polygon') arcpy.Clip_analysis(data_sdf, grid_sdf, temp_fc) arcpy.AddMessage('Created in_memory fc') data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc, fields=[value_field]) arcpy.AddMessage('features read into spatial dataframe after clipping') else: data_sdf = geomotion.SpatialDataFrame.from_featureclass(data_sdf, fields=[value_field]) arcpy.AddMessage('features read into spatial dataframe without clipping') grid_sdf = geomotion.SpatialDataFrame.from_featureclass(grid_sdf) #data_sdf = geomotion.SpatialDataFrame.from_featureclass(data_sdf, fields=[value_field]) index = data_sdf.sindex results = [] for idx, row in enumerate(grid_sdf.iterrows()): geom = row[1].SHAPE ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y, geom.extent.upperRight.X, geom.extent.upperRight.Y] row_oids = list(index.intersect(ext)) df_current = data_sdf.loc[data_sdf.index.isin(row_oids)] # disjoint == False means intersection with Grid polygon df_sub = df_current.loc[df_current.disjoint(geom) == False].copy() df_sub = df_sub.replace({np.nan: "NULL"}) grp = df_sub.groupby(by=value_field).size() # Get the counts. # sort the values to get the biggest on the top grp.sort_values(axis=0, ascending=False, inplace=True, kind='quicksort', na_position='last') if use_subtypes: if len(grp) > 1: grp = grp.head(2) results.append( ( int(row[1].OBJECTID), ",".join([st_dict[i] for i in df_sub[value_field].unique().tolist()]), st_dict[grp.index[0]], int(grp[grp.index[0]]), round(float(grp[grp.index[0]]) * 100.0 / float(len(df_sub)),1), st_dict[grp.index[1]], int(grp[grp.index[1]]), round(float(grp[grp.index[1]]) * 100.0 / float(len(df_sub)),1), ) ) elif len(grp) == 0: results.append( (int(row[1].OBJECTID), 'None', 'None', 0, float(0), 'None', 0, float(0)) ) elif len(grp) == 1: results.append( ( int(row[1].OBJECTID), ",".join([st_dict[i] for i in df_sub[value_field].unique().tolist()]), st_dict[grp.index[0]], int(grp[grp.index[0]]), round(float(grp[grp.index[0]]) * 100.0 / float(len(df_sub)),1), 'None', 0, float(0) ) ) else: if len(grp) > 1: grp = grp.head(2) results.append( ( int(row[1].OBJECTID), ",".join(df_sub[value_field].unique().tolist()), grp.index[0], int(grp[0]), round(float(grp[0]) * 100.0 / float(len(df_sub)),1), grp.index[1], int(grp[1]), round(float(grp[1]) * 100.0 / float(len(df_sub)),1), ) ) elif len(grp) == 0: results.append( (int(row[1].OBJECTID), 'None', 'None', 0, float(0), 'None', 0, float(0)) ) elif len(grp) == 1: results.append( ( int(row[1].OBJECTID), ",".join(df_sub[value_field].unique().tolist()), grp.index[0], int(grp[0]), round(float(grp[0]) * 100.0 / float(len(df_sub)),1), 'None', 0, float(0) ) ) del grp del df_sub del row_oids del df_current del grid_sdf del data_sdf dtypes = np.dtype( [ ('_ID', np.int), ('THEME_LIST', '|S1024'), ('PRI_THEME', '|S256'), ('PRI_THEME_CNT', np.int32), ('PRI_THEME_PER', np.float64), ('SEC_THEME', '|S256'), ('SEC_THEME_CNT', np.int32), ('SEC_THEME_PER', np.float64) ] ) array = np.array(results, dtypes) del results return array except: line, filename, synerror = trace() raise FunctionError( { "function": "process_source_lineage", "line": line, "filename": filename, "synerror": synerror, "arc" : str(arcpy.GetMessages(2)) } )
298e615474debbb01addc583ae19fc1c5191084b
3,642,460
def class_to_mask(classes: np.ndarray, class_colors: np.ndarray) -> np.ndarray: """クラスIDの配列をRGBのマスク画像に変換する。 Args: classes: クラスIDの配列。 shape=(H, W) class_colors: 色の配列。shape=(num_classes, 3) Returns: ndarray shape=(H, W, 3) """ return np.asarray(class_colors)[classes]
c574594b18d312e9ce432b68c8c2ff4d73771e6f
3,642,461
from typing import List import logging def get_vocab(iob2_files:List[str]) -> List[str]: """Retrieve the vocabulary of the iob2 annotated files Arguments: iob2_files {List[str]} -- List of paths to the iob2 annotated files Returns: List[str] -- Returns the unique list of vocabulary found in the files """ vocab = set() for iob2_file in iob2_files: logging.info("Loading file %s for creating corpus embeddings", iob2_file) for line in open(iob2_file): token = line.split("\t")[0] vocab.add(token) return list(vocab)
0dc2a1f969ed6f92b36b1b31875c855d5efda2d9
3,642,462
import numpy def taylor_green_vortex(x, y, t, nu): """Return the solution of the Taylor-Green vortex at given time. Parameters ---------- x : numpy.ndarray Gridline locations in the x direction as a 1D array of floats. y : numpy.ndarray Gridline locations in the y direction as a 1D array of floats. t : float Time value. nu : float Coefficient of viscosity. Returns ------- numpy.ndarray x-component of the velocity field as a 2D array of floats. numpy.ndarray y-component of the velocity field as a 2D array of floats. numpy.ndarray pressure field as a 2D array of floats. """ X, Y = numpy.meshgrid(x, y) a = 2 * numpy.pi u = -numpy.cos(a * X) * numpy.sin(a * Y) * numpy.exp(-2 * a**2 * nu * t) v = +numpy.sin(a * X) * numpy.cos(a * Y) * numpy.exp(-2 * a**2 * nu * t) p = (-0.25 * (numpy.cos(2 * a * X) + numpy.cos(2 * a * Y)) * numpy.exp(-4 * a**2 * nu * t)) return u, v, p
f47f4cdf11b81fe8b8c38ae50d708ec4361f7098
3,642,463
def static_initial_state(batch_size, h_size): """ Function to make an initial state for a single GRU. """ state = jnp.zeros([h_size], dtype=jnp.complex64) if batch_size is not None: state = add_batch(state, batch_size) return state
a803da5b0af0ce17fc7d1f303f6141416da6d120
3,642,464
def get_desklamp(request, index): """ A pytest fixture to initialize and return the DeskLamp object with the given index. """ desklamp = DeskLamp(index) try: desklamp.open() except RuntimeError: pytest.skip("Could not open desklamp connection") def fin(): desklamp.unsubscribe() desklamp.off() desklamp.close() request.addfinalizer(fin) return desklamp
8f00296f5625c8a80bb094d1e470936a0733b83e
3,642,465
import torch def conj(x): """ Calculate the complex conjugate of x x is two-channels complex torch tensor """ assert x.shape[-1] == 2 return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
b22cfd3f12759f9b237099ca0527f0cbe9b99348
3,642,466
def label_clusters(img, min_cluster_size=50, min_thresh=1e-6, max_thresh=1, fully_connected=False): """ Label Clusters """ dim = img.dimension clust = threshold_image(img, min_thresh, max_thresh) temp = int(fully_connected) args = [dim, clust, clust, min_cluster_size, temp] processed_args = _int_antsProcessArguments(args) lib.LabelClustersUniquely(processed_args) return clust
efe63ea0e71d3a5bf3b2f0a03f3c0f1c295c063b
3,642,467
def update_schema(schema_old, schema_new): """ Given an old BigQuery schema, update it with a new one. Where a field name is the same, the new will replace the old. Any new fields not present in the old schema will be added. Arguments: schema_old: the old schema to update schema_new: the new schema which will overwrite/extend the old """ old_fields = schema_old["fields"] new_fields = schema_new["fields"] output_fields = list(old_fields) field_indices = {field["name"]: i for i, field in enumerate(output_fields)} for field in new_fields: name = field["name"] if name in field_indices: # replace old field with new field of same name output_fields[field_indices[name]] = field else: # add new field output_fields.append(field) return {"fields": output_fields}
e97827ac0d8ee943b88fc54506af3f6fc8285d71
3,642,468
def get_estimators(positions_all, positions_relevant): """ Extracts density estimators from a judged sample of paragraph positions. Parameters ---------- positions_all : dict of (Path, float) A sample of paragraph positions from various datasets in the NTCIR-11 Math-2, and NTCIR-12 MathIR format. positions_relevant : dict of (Path, float) A sample of relevant paragraph positions from various datasets in the NTCIR-11 A subsample of relevant paragraph positions. Returns ------- (float, KernelDensity, KernelDensity) An estimate of P(relevant), and estimators of p(position), and p(position | relevant). """ samples_all = [ (position,) for _, positions in positions_all.items() for position in positions] samples_relevant = [ (position,) for _, positions in positions_relevant.items() for position in positions] estimators = dict() estimators["P(relevant)"] = len(samples_relevant) / len(samples_all) LOGGER.info("Fitting prior p(position) density estimator") estimators["p(position)"] = KernelDensity(**KERNEL).fit(samples_all) LOGGER.info("Fitting conditional p(position | relevant) density estimator") estimators["p(position|relevant)"] = KernelDensity(**KERNEL).fit(samples_relevant) return ( estimators["P(relevant)"], estimators["p(position)"], estimators["p(position|relevant)"])
b5f95247ff683e6e7e86d425ec64c988daacab60
3,642,469
def openbabel_force_field(label, mol, num_confs=None, xyz=None, force_field='GAFF', return_xyz_strings=True, method='diverse'): """ Optimize conformers using a force field (GAFF, MMFF94s, MMFF94, UFF, Ghemical) Args: label (str): The species' label. mol (Molecule, optional): The RMG molecule object with connectivity and bond order information. num_confs (int, optional): The number of random 3D conformations to generate. xyz (list, optional): The 3D coordinates in an array format. force_field (str, optional): The type of force field to use. return_xyz_strings (bool, optional): Whether to return xyz in string or array format. True for string. method (str, optional): The conformer searching method to use in open babel. For method description, see http://openbabel.org/dev-api/group__conformer.shtml Returns: list: Entries are optimized xyz's in a list format. Returns: list: Entries are float numbers representing the energies in kJ/mol. """ xyzs, energies = list(), list() ff = ob.OBForceField.FindForceField(force_field) if xyz is not None: if isinstance(xyz, (str, unicode)): xyz = converter.get_xyz_matrix(xyz)[0] # generate an open babel molecule obmol = ob.OBMol() atoms = mol.vertices ob_atom_ids = dict() # dictionary of OB atom IDs for i, atom in enumerate(atoms): a = obmol.NewAtom() a.SetAtomicNum(atom.number) a.SetVector(xyz[i][0], xyz[i][1], xyz[i][2]) # assume xyz is ordered like mol; line not in in toOBMol if atom.element.isotope != -1: a.SetIsotope(atom.element.isotope) a.SetFormalCharge(atom.charge) ob_atom_ids[atom] = a.GetId() orders = {1: 1, 2: 2, 3: 3, 4: 4, 1.5: 5} for atom1 in mol.vertices: for atom2, bond in atom1.edges.items(): if bond.isHydrogenBond(): continue index1 = atoms.index(atom1) index2 = atoms.index(atom2) if index1 < index2: obmol.AddBond(index1 + 1, index2 + 1, orders[bond.order]) # optimize ff.Setup(obmol) ff.SetLogLevel(0) ff.SetVDWCutOff(6.0) # The VDW cut-off distance (default=6.0) ff.SetElectrostaticCutOff(10.0) # The Electrostatic cut-off distance (default=10.0) ff.SetUpdateFrequency(10) # The frequency to update the non-bonded pairs (default=10) ff.EnableCutOff(False) # Use cut-off (default=don't use cut-off) # ff.SetLineSearchType('Newton2Num') ff.SteepestDescentInitialize() # ConjugateGradientsInitialize v = 1 while v: v = ff.SteepestDescentTakeNSteps(1) # ConjugateGradientsTakeNSteps if ff.DetectExplosion(): raise ConformerError('Force field {0} exploded with method {1} for {2}'.format( force_field, 'SteepestDescent', label)) ff.GetCoordinates(obmol) elif num_confs is not None: obmol, ob_atom_ids = toOBMol(mol, returnMapping=True) pybmol = pyb.Molecule(obmol) pybmol.make3D() ff.Setup(obmol) if method.lower() == 'weighted': ff.WeightedRotorSearch(num_confs, 2000) elif method.lower() == 'random': ff.RandomRotorSearch(num_confs, 2000) elif method.lower() == 'diverse': rmsd_cutoff = 0.5 energy_cutoff = 50. confab_verbose = False ff.DiverseConfGen(rmsd_cutoff, num_confs, energy_cutoff, confab_verbose) elif method.lower() == 'systematic': ff.SystematicRotorSearch(num_confs) else: raise ConformerError('Could not identify method {0} for {1}'.format(method, label)) else: raise ConformerError('Either num_confs or xyz should be given for {0}'.format(label)) ff.GetConformers(obmol) obconversion = ob.OBConversion() obconversion.SetOutFormat('xyz') for i in range(obmol.NumConformers()): obmol.SetConformer(i) ff.Setup(obmol) xyz = '\n'.join(obconversion.WriteString(obmol).splitlines()[2:]) if not return_xyz_strings: xyz = converter.get_xyz_matrix(xyz)[0] xyz = [xyz[ob_atom_ids[mol.atoms[j]]] for j, _ in enumerate(xyz)] # reorder xyzs.append(xyz) energies.append(ff.Energy()) return xyzs, energies
9964d94d2601e5cd7871886e396778457bb6e2cd
3,642,470
def parse_flarelabels(label_file): """ Parses a flare-label file and generates a dictionary mapping residue identifiers (e.g. A:ARG:123) to a user-specified label, trees that can be parsed by flareplots, and a color indicator for vertices. Parameters ---------- label_file : file A flare-label file where each line contains 2-3 columns formatted as - CHAIN:RESN:RESI (e.g. A:ARG:123) - [[TOPLEVEL.]MIDLEVEL.]LABEL (e.g. Receptor.Helix2.2x44) - COLOR (e.g. #FF0000 or white) Returns ------- dict of str : (dict of str : str) Keys are all residue identifiers and values are dicts that hold both the LABEL by itself (key "label", the full tree-path (key "treepath") and a CSS-compatible color string (key "color"). Raises ------ AssertionError if a residue identifier (CHAIN:RESN:RESI) is specified twice in the file, or if a LABEL appears twice. """ if label_file is None: return None ret = {} flarelabels = set() # Only used to check for duplicates for line in label_file: line = line.strip() if not line: continue # Ignore empty lines columns = line.split("\t") residentifier = columns[0] flaretreepath = columns[1] if len(columns) > 1 else columns[0] flarelabel = flaretreepath.split(".")[-1] flarecolor = columns[2] if len(columns) > 2 else "white" if residentifier in ret: raise AssertionError("Residue identifier '"+residentifier+"' appears twice in "+label_file.name) if flarelabel in flarelabels: raise AssertionError("Flare label '"+flarelabel+"' used twice in "+label_file.name) ret[residentifier] = {"label": flarelabel, "treepath": flaretreepath, "color": flarecolor} flarelabels.add(flarelabel) return ret
23df49af14af720311b320f65894e995983365bf
3,642,471
def remove_background(data, dim="t2", deg=0, regions=None): """Remove polynomial background from data Args: data (DNPData): Data object dim (str): Dimension to perform background fit deg (int): Polynomial degree regions (None, list): Background regions, by default entire region is background corrected. Regions can be specified as a list of tuples [(min, max), ...] Returns: DNPData: Background corrected data """ proc_parameters = { "dim": dim, "deg": deg, "regions": regions, } fit = background(data, dim=dim, deg=deg, regions=regions) data = data - fit proc_attr_name = "remove_backround" data.add_proc_attrs(proc_attr_name, proc_parameters) return data
54141b6f28b7a21ebdf1b0b920af3bfea4303b07
3,642,472
def get_hmm_datatype(query_file): """Takes an HMM file (HMMer3 software package) and determines what data type it has (i.e., generated from an amino acid or nucleic acid alignment). Returns either "prot" or "nucl". """ datatype = None with open(query_file) as infh: for i in infh: if i.startswith('ALPH'): dname = i.strip().split(' ')[1] if dname == 'amino': datatype = 'prot' elif dname == 'DNA': datatype = 'nucl' break # Check that it worked. assert datatype is not None, """Error: Data type could not be determined for input file: %s""" % query_file # Return the data type. return datatype
27653784b8a9fbae92226f8ea7d7b6e2b647765e
3,642,473
def detect_min_threshold_outliers(series, threshold): """Detects the values that are lower than the threshold passed series : series, mandatory The series where to detect the outliers threshold : integer, float, mandatory The threshold of the minimum value that will be considered outliers. """ bool_outliers = series < threshold return bool_outliers
6032693341073d101c0aad598a105f6cbc0ec578
3,642,474
from datetime import datetime def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw)
58479d70918dd287bfd29b1a15b6cd4dc1bfd695
3,642,475
def _to_str(x): """Converts a bool tensor to a string with True/False values.""" x = tf.convert_to_tensor(x) if x.dtype == tf.bool: return tf.where(x, 'True', 'False') return x
7919139e0f2cb19cd0856110e962acb616193ada
3,642,476
def inpaintn(x,m=100, x0=None, alpha=2): """ This function interpolates the input (2-dimensional) image 'x' with missing values (can be NaN of Inf). It is based on a recursive process where at each step the discrete cosine transform (dct) is performed of the residue, multiplied by some weights, and then the inverse dct is taken. The initial guess 'x0' for the interpolation can be provided by the user, otherwise it starts with a nearest neighbor filling. Args INPUTS: x (numpy array) - is the image with missing elements (eiher np.nan or np.inf) from which you want to perform interpolation m (int) - is the number of iteration; default=100 x0 (numpy array) - can be your initial guess; defaut=None alpha (float) - some input number used as a power scaling; default=2 OUT: y (numpy array) - is the interpolated image wrt proposed method """ sh = x.shape ids0 = np.isfinite(x) if ids0.all(): #Nothing to interpolate... return x # Smoothness paramaters: s0 = 3 s1 = -6 s = np.logspace(s0,s1,num=m) # Relaxation factor: rf = 2 # Weight matrix, here we add some basis vectors to Lambda depending on original size of 'x': Lambda = np.zeros(sh, float) u0 = np.cos(np.pi*np.arange(0,sh[0]).reshape((sh[0],1))/sh[0]) u1 = np.cos(np.pi*np.arange(0,sh[1]).reshape((1,sh[1]))/sh[1]) Lambda = np.add(np.add(Lambda,u0),u1) Lambda = 2*(2-Lambda) Lambda = Lambda**alpha # Starting interpolation: if x0 is None: y = initial_nn(x) else: y = np.copy(x0) for mu in range(m): Gamma = 1/(1+s[mu]*Lambda) a = np.copy(y) a[ids0] = (x-y)[ids0]+y[ids0] y = rf*idct(Gamma*dct(a, norm='ortho'), norm='ortho')+(1-rf)*y y[ids0] = x[ids0] return y
2fddabc6e512f9fc1ae7e8298f8d44582eaf7c46
3,642,477
def obtain_bboxs(path) -> list: """ obatin bbox annotations from the file """ file = open(path, "r") lines = file.read().split("\n") lines = [x for x in lines if x and not x.startswith("%")] lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces bboxs = [] for line in lines: items = line.split(" ") bboxs.append([items[0], float(items[1]), float(items[2]), float(items[3]), float(items[4])]) return bboxs
75ceaac4bd8500320007d2ffb4cf4c490bd29473
3,642,478
def Timeline_Integral_with_cross_before(Tm,): """ 计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)不清零,金叉(0-->1)清零) 这个我一直不会写成 lambda 或者 apply 的形式,只能用 for循环,谁有兴趣可以指导一下 """ T = [Tm[0]] for i in range(1,len(Tm)): T.append(T[i - 1] + 1) if (Tm[i] != 1) else T.append(0) return np.array(T)
fdbd68e84e2a79a96c2078f92a7b69ab0138874e
3,642,479
from typing import Generator def list_image_paths() -> Generator[str, None, None]: """List each image path in the input directory.""" return list_input_directory(INPUT_DIRECTORIES["image_dir"])
bce70f2af3c42905a27a30bf97de0a993161130f
3,642,480
def a_star(graph: Graph, start: Node, goal: Node, heuristic): """ Standard A* search algorithm. :param graph: Graph A graph with all nodes and connections :param start: Node Start node, where the search starts :param goal: Node End node, the goal for the search :return: shortest_path: list|False Either a list of node ids or false """ # Indexed priority queue queue = pqdict() # All visited connections visited_stack = {} # Add start node visited_stack[start] = True # The costs from start to a node cost_to_node = {} # Full costs from a node to goal full_costs = {} # All paths that have been taken shortest_path = [] # Create a dummy for the start node dummy_connection = Connection(start, start) # Assign it to the queue so we can start queue[dummy_connection] = 0 while queue: # Get next connection from top queue # and remove it (its a get + pop) connection = queue.pop() # Add the node to the shortest path # cause otherwise we would not be here shortest_path.append(connection) cost_to_node[connection.to_node] = connection.cost # We have found the target if connection.to_node.id == goal.id: # Remove all unneded paths and return # a sorted list return clean_route_list(shortest_path, goal.id) # Get all connected nodes next_connections = graph.get_connections(connection.to_node) # Iterate through all connected nodes # and calculate the costs and stuff for c in next_connections: # Calculate total costs from start to the goal node to_goal_cost = heuristic(goal.position, c.to_node.position) # Calculate costs from start to this node current_cost = cost_to_node[connection.to_node] + c.cost # Update lists and costs queue[c] = current_cost cost_to_node[c.to_node] = current_cost full_costs[c.to_node] = current_cost + to_goal_cost visited_stack[c.to_node] = True # Never found the target, so sad ... return False
ca25a15733d041cfca2560164ea8b047e55991b8
3,642,481
def buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True): """Take the model and model parameters, build and train the model""" # Build and compile model # To use other optimizers, refer to: https://keras.io/optimizers/ # Please do not change the loss function optimizer = tf.keras.optimizers.Adam(lr=learningRate) model.compile(optimizer=optimizer, loss=tf.keras.losses.MeanSquaredError()) if isPrintModel: print(model.summary()) for epoch in range(0, epochs): model.fit(trainingData, trainingLabels, epochs=1, verbose=0, batch_size=batchSize, shuffle=False) # Evaluate model valLoss = model.evaluate(validationData, validationLabels, verbose=False) ## get metrics predictions = model.predict(testingData) MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions) MeanSquaredError.append(MSE) RootMeanSquaredError.append(RMSE) MeanAbsoluteError.append(MAE) MeanAbsolutePercentageError.append(MAPE) PearsonR.append(PR) ValMSE.append(valLoss) Epoch.append(epoch) if valLoss <= min(ValMSE): max_predictions = predictions return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions
af00f383311588525e66cff317908a99fa39859f
3,642,482
def gaussian_temporal_filter(tsincr: np.ndarray, cutoff: float, span: np.ndarray, thr: int) -> np.ndarray: """ Function to apply a Gaussian temporal low-pass filter to a 1D time-series vector for one pixel with irregular temporal sampling. :param tsincr: 1D time-series vector to be filtered. :param cutoff: filter cutoff in years. :param span: 1D vector of cumulative time spans, in years. :param thr: threshold for non-NaN values in tsincr. :return: ts_lp: Low-pass filtered time series vector. """ nanmat = ~isnan(tsincr) sel = np.nonzero(nanmat)[0] # don't select if nan ts_lp = np.empty(tsincr.shape, dtype=np.float32) * np.nan m = len(sel) if m >= thr: for k in range(m): yr = span[sel] - span[sel[k]] # apply Gaussian smoothing kernel wgt = _kernel(yr, cutoff) wgt /= np.sum(wgt) ts_lp[sel[k]] = np.sum(tsincr[sel] * wgt) return ts_lp
54060dbfc84ce1738698fda893afb556b48396e4
3,642,483
import requests import json def get_mactable(auth): """ Function to get list of mac-addresses from Aruba OS switch :param auth: AOSSAuth class object returned by pyarubaoss.auth :return list of mac-addresses :rtype list """ url_mactable = "http://" + auth.ipaddr + "/rest/" + auth.version + "/mac-table" try: r = requests.get(url_mactable, headers=auth.cookie) mactable = json.loads(r.text)['mac_table_entry_element'] return mactable except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_mactable: An Error has occurred"
8f81a03640d7a4ed0d6d70bcaf268b647dee987e
3,642,484