content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def polinomsuzIntegralHesapla(veriler): """ Gelen verileri kullanarak integral hesaplar. :param veriler: İntegrali hesaplanacak veriler. Liste tipinde olmalı. """ a,b=5,len(veriler) deltax = 1 integral = 0 n = int((b - a) / deltax) for i in range(n-1): integral += deltax * (veriler[a] + veriler[a+deltax]) / 2 a += deltax return integral
468e02da8ff077b04456f71f0af6d77bf5a47d68
3,646,013
def _keep_extensions(files, extension): """ Filters by file extension, this can be more than the extension! E.g. .png is the extension, gray.png is a possible extension""" if isinstance(extension, str): extension = [extension] def one_equal_extension(some_string, extension_list): return any([some_string.endswith(one_extension) for one_extension in extension_list]) return list(filter(lambda x: one_equal_extension(x, extension), files))
009233e381e2015ff4d919338225057d94d40a82
3,646,014
from typing import Dict def make_all_rules( schema: "BaseOpenAPISchema", bundles: Dict[str, CaseInsensitiveDict], connections: EndpointConnections ) -> Dict[str, Rule]: """Create rules for all endpoints, based on the provided connections.""" return { f"rule {endpoint.verbose_name}": make_rule( endpoint, bundles[endpoint.path][endpoint.method.upper()], connections ) for endpoint in schema.get_all_endpoints() }
3b92fdea984b5bfbe2b869640b978398106f098b
3,646,015
def audio_to_magnitude_db_and_phase(n_fft, hop_length_fft, audio): """This function takes an audio and convert into spectrogram, it returns the magnitude in dB and the phase""" stftaudio = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length_fft) stftaudio_magnitude, stftaudio_phase = librosa.magphase(stftaudio) stftaudio_magnitude_db = librosa.amplitude_to_db( stftaudio_magnitude, ref=np.max) return stftaudio_magnitude_db, stftaudio_phase
b9927f11bc353610fe7edbee9b710bd78fc13899
3,646,016
def has_rc_object(rc_file, name): """ Read keys and values corresponding to one settings location to the qutiprc file. Parameters ---------- rc_file : str String specifying file location. section : str Tags for the saved data. """ config = ConfigParser() try: config.read(_full_path(rc_file)) except (MissingSectionHeaderError, ParsingError): return False return section in config
e7edd4ba8545257d7d489a7ac8f6e9595b4f087d
3,646,018
import torch def apply_transform_test(batch_size, image_data_dir, tensor_data_dir, limited_num = None, shuffle_seed = 123, dataset = None): """ """ std = [1.0, 1.0, 1.0] mean = [0.0, 0.0, 0.0] # if dataset is None: # std = [1.0, 1.0, 1.0] # mean = [0.0, 0.0, 0.0] # elif dataset == "cifar10": # std = [0.247, 0.243, 0.261] # mean = [0.4914, 0.4822, 0.4465] # elif dataset == "cifar100": # std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404] # mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343] # elif dataset == "imagenet": # std = [0.229, 0.224, 0.225] # mean = [0.485, 0.456, 0.406] # elif dataset == "facescrub": # std = [0.5, 0.5, 0.5] # mean = [0.5, 0.5, 0.5] trainTransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std) ]) dataset = ImageTensorFolder(img_path=image_data_dir, tensor_path=tensor_data_dir, label_path=tensor_data_dir, img_fmt="jpg", tns_fmt="pt", lbl_fmt="label", transform=trainTransform, limited_num = limited_num) # dataset_size = len(dataset) # indices = list(range(dataset_size)) # np.random.seed(shuffle_seed) # np.random.shuffle(indices) # test_indices = indices[0:] # test_sampler = SubsetRandomSampler(test_indices) testloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4) return testloader
664e9fadeb4897aee7ef26abeec9c128ec7cef56
3,646,019
def take_t(n): """ Transformation for Sequence.take :param n: number to take :return: transformation """ return Transformation( "take({0})".format(n), lambda sequence: islice(sequence, 0, n), None )
1e485cc59160dbec8d2fa3f358f51055115eafdd
3,646,021
def metric_fn(loss): """Evaluation metric Fn which runs on CPU.""" perplexity = tf.exp(tf.reduce_mean(loss)) return { "eval/loss": tf.metrics.mean(loss), "eval/perplexity": tf.metrics.mean(perplexity), }
3614ed0ccc3e390aeaf4036805dfeff351d4d150
3,646,022
def gensig_choi(distsmat, minlength=1, maxlength=None, rank=0): """ The two dimensional sigma function for the c99 splitting """ if rank: distsmat = rankify(distsmat, rank) def sigma(a, b): length = (b - a) beta = distsmat[a:b, a:b].sum() alpha = (b - a)**2 if minlength: if (b - a) < minlength: beta += np.inf if maxlength: if (b - a) > maxlength: beta += np.inf return (-beta, alpha) return sigma
75568f854eff9c3bbc13b4a46be8b1a2b9651b9b
3,646,023
from typing import Optional def check_ie_v3(base, add: Optional[str] = None) -> str: """Check country specific VAT-Id""" s = sum((w * int(c) for w, c in zip(range(8, 1, -1), base)), 9 * (ord(add) - ord('@'))) # 'A' - 'I' -> 1 - 9 i = s % 23 return _IE_CC_MAP[i]
19fd495db8301ed7193881cc637e3ce1b75e368c
3,646,024
def filter_experiment_model(faultgroup, faultmodel, interestlist=None): """ Filter for a specific fault model. If interestlist is given only experiments in this list will be analysed. 0 set 0 1 set 1 2 Toggle """ if not isinstance(faultmodel, int): if "set0" in faultmodel: faultmodel = 0 elif "set1" in faultmodel: faultmodel = 1 elif "toggle" in faultmodel: faultmodel = 2 else: raise ValueError("Faultmodel not understood") return generic_filter_faults(faultgroup, 'fault_model', faultmodel, None, interestlist)
7594f7f5a410c3bf9231996989259d1267ed250b
3,646,025
import inspect def _default_command(cmds, argv): """Evaluate the default command, handling ``**kwargs`` case. `argparse` and `argh` do not understand ``**kwargs``, i.e. pass through command. There's a case (`pykern.pkcli.pytest`) that requires pass through so we wrap the command and clear `argv` in the case of ``default_command(*args, **kwargs)``. Args: cmds (list): List of commands argv (list): arguments (may be edited) Returns: function: default command or None """ if len(cmds) != 1 or cmds[0].__name__ != DEFAULT_COMMAND: return None dc = cmds[0] spec = inspect.getargspec(dc) if not (spec.varargs and spec.keywords): return dc save_argv = argv[:] def _wrap_default_command(): return dc(*save_argv) del argv[:] return _wrap_default_command
aecaaba610ec473b41f1cd546cb5c551541d9fab
3,646,027
def nameable_op(node_factory_function): # type: (Callable) -> Callable """Set the name to the ngraph operator returned by the wrapped function.""" @wraps(node_factory_function) def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Node node = node_factory_function(*args, **kwargs) node = _set_node_name(node, **kwargs) return node return wrapper
0230c96e40b91772dc06a0b2c9cf358d1e0b08c7
3,646,029
import re def by_pattern(finding: finding.Entry, ignore: ignore_list.Entry) -> bool: """Process a regex ignore list entry.""" # Short circuit if no pattern is set. if not ignore.pattern: return False # If there's a match on the path, check whether the ignore is for the same module. if re.search(ignore.pattern, finding.path): if ignore.module != finding.source.module: return False # Then check whether the ignore is for the particular reference. if ignore.references: if finding.source.reference in ignore.references: return True return False # Or check whether the ignore is for the same offest. if ignore.offset is not None: if finding.location.offset == ignore.offset: return True return False # In this case this is a fairly permissive ignore. return True return False
bbeb7d8ab740273bd21c120ca7bc42dc205e4a2b
3,646,031
def hex2int(s: str): """Convert a hex-octets (a sequence of octets) to an integer""" return int(s, 16)
ecdb3152f8c661c944edd2811d016fce225c3d51
3,646,032
from operator import index def into_two(lhs, ctx): """Element I (num) -> push a spaces (str) -> equivlaent to `qp` (lst) -> split a list into two halves """ ts = vy_type(lhs, simple=True) return { NUMBER_TYPE: lambda: " " * int(lhs), str: lambda: quotify(lhs, ctx) + lhs, list: lambda: [ index(lhs, [None, int(len(lhs) / 2)], ctx), index(lhs, [int(len(lhs) / 2), None], ctx), ], }.get(ts)()
6fae5eb7c5ae58a0e7faef6e46334201ccc6df10
3,646,033
def find_renter_choice(par,sol,t,i_beta,i_ht_lag,i_p,a_lag, inv_v,inv_mu,v,mu,p,valid,do_mu=True): """ find renter choice - used in both solution and simulation """ v_agg = np.zeros(2) p_agg = np.zeros(2) # a. x iota_lag = -1 i_h_lag = -1 LTV_lag = np.nan _m,x,_LTV = misc.mx_func(t,iota_lag,i_h_lag,i_p,LTV_lag,a_lag,par) i_x = linear_interp.binary_search(0,par.Nx,par.grid_x,x) wx = (x-par.grid_x[i_x])/(par.grid_x[i_x+1]-par.grid_x[i_x]) # b. choices # 1. renter i = 0 j = i + par.Nrt inv_v0 = sol.rt_inv_v[t,i_beta,i_ht_lag,i_p,i_x,:].ravel() inv_v1 = sol.rt_inv_v[t,i_beta,i_ht_lag,i_p,i_x+1,:].ravel() inv_mu0 = sol.rt_inv_mu[t,i_beta,i_ht_lag,i_p,i_x,:] inv_mu1 = sol.rt_inv_mu[t,i_beta,i_ht_lag,i_p,i_x+1,:] v_agg[0] = update(par,i,j,inv_v0,inv_v1,inv_mu0,inv_mu1,inv_v,inv_mu,wx,valid,v,p,mu,do_mu) i_rt = i j_rt = j # 2. buyer i = j j = i + par.Nbt # = par.Ncr inv_v0 = sol.bt_inv_v[t,i_beta,i_p,i_x,:,:,:].ravel() inv_v1 = sol.bt_inv_v[t,i_beta,i_p,i_x+1,:,:,:].ravel() inv_mu0 = sol.bt_inv_mu[t,i_beta,i_p,i_x,:,:,:].ravel() inv_mu1 = sol.bt_inv_mu[t,i_beta,i_p,i_x+1,:,:,:].ravel() v_agg[1] = update(par,i,j,inv_v0,inv_v1,inv_mu0,inv_mu1,inv_v,inv_mu,wx,valid,v,p,mu,do_mu) i_bt = i j_bt = j # c. aggregate if np.any(~np.isinf(v_agg)): _logsum = logsum_and_choice_probabilities(v_agg,par.sigma_agg,p_agg) p[i_rt:j_rt] *= p_agg[0] p[i_bt:j_bt] *= p_agg[1] Ev = np.nansum(p*v) if do_mu: Emu = np.nansum(p*mu) else: Emu = np.nan else: p[:] = np.nan Ev = np.nan Emu = np.nan return Ev,Emu
f6cffb4ce6ed3ddaa98edefefdb6962536fbffb8
3,646,034
def met_zhengkl_gh(p, rx, cond_source, n, r): """ Zheng 2000 test implemented with Gauss Hermite quadrature. """ X, Y = sample_xy(rx, cond_source, n, r) rate = (cond_source.dx() + cond_source.dy()) * 4./5 # start timing with util.ContextTimer() as t: # the test zheng_gh = cgof.ZhengKLTestGaussHerm(p, alpha, rate=rate) result = zheng_gh.perform_test(X, Y) return { # 'test': zheng_test, 'test_result': result, 'time_secs': t.secs}
41bef090ccc515be895bd08eda451864c330327e
3,646,035
from typing import Optional from typing import Sequence def get_domains(admin_managed: Optional[bool] = None, include_unverified: Optional[bool] = None, only_default: Optional[bool] = None, only_initial: Optional[bool] = None, only_root: Optional[bool] = None, supports_services: Optional[Sequence[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainsResult: """ Use this data source to access information about existing Domains within Azure Active Directory. ## API Permissions The following API permissions are required in order to use this data source. When authenticated with a service principal, this data source requires one of the following application roles: `Domain.Read.All` or `Directory.Read.All` When authenticated with a user principal, this data source does not require any additional roles. ## Example Usage ```python import pulumi import pulumi_azuread as azuread aad_domains = azuread.get_domains() pulumi.export("domainNames", [__item.domain_name for __item in [aad_domains.domains]]) ``` :param bool admin_managed: Set to `true` to only return domains whose DNS is managed by Microsoft 365. Defaults to `false`. :param bool include_unverified: Set to `true` if unverified Azure AD domains should be included. Defaults to `false`. :param bool only_default: Set to `true` to only return the default domain. :param bool only_initial: Set to `true` to only return the initial domain, which is your primary Azure Active Directory tenant domain. Defaults to `false`. :param bool only_root: Set to `true` to only return verified root domains. Excludes subdomains and unverified domains. :param Sequence[str] supports_services: A list of supported services that must be supported by a domain. Possible values include `Email`, `Sharepoint`, `EmailInternalRelayOnly`, `OfficeCommunicationsOnline`, `SharePointDefaultDomain`, `FullRedelegation`, `SharePointPublic`, `OrgIdAuthentication`, `Yammer` and `Intune`. """ __args__ = dict() __args__['adminManaged'] = admin_managed __args__['includeUnverified'] = include_unverified __args__['onlyDefault'] = only_default __args__['onlyInitial'] = only_initial __args__['onlyRoot'] = only_root __args__['supportsServices'] = supports_services if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azuread:index/getDomains:getDomains', __args__, opts=opts, typ=GetDomainsResult).value return AwaitableGetDomainsResult( admin_managed=__ret__.admin_managed, domains=__ret__.domains, id=__ret__.id, include_unverified=__ret__.include_unverified, only_default=__ret__.only_default, only_initial=__ret__.only_initial, only_root=__ret__.only_root, supports_services=__ret__.supports_services)
e7cb3a42ec7be45153c67d860b4802669f8043e5
3,646,036
import six def get_writer(Writer=None, fast_writer=True, **kwargs): """ Initialize a table writer allowing for common customizations. Most of the default behavior for various parameters is determined by the Writer class. Parameters ---------- Writer : ``Writer`` Writer class (DEPRECATED). Defaults to :class:`Basic`. delimiter : str Column delimiter string comment : str String defining a comment line in table quotechar : str One-character string to quote fields containing special characters formats : dict Dictionary of format specifiers or formatting functions strip_whitespace : bool Strip surrounding whitespace from column values. names : list List of names corresponding to each data column include_names : list List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``) fast_writer : bool Whether to use the fast Cython writer. Returns ------- writer : `~astropy.io.ascii.BaseReader` subclass ASCII format writer instance """ if Writer is None: Writer = basic.Basic if 'strip_whitespace' not in kwargs: kwargs['strip_whitespace'] = True writer = core._get_writer(Writer, fast_writer, **kwargs) # Handle the corner case of wanting to disable writing table comments for the # commented_header format. This format *requires* a string for `write_comment` # because that is used for the header column row, so it is not possible to # set the input `comment` to None. Without adding a new keyword or assuming # a default comment character, there is no other option but to tell user to # simply remove the meta['comments']. if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader)) and not isinstance(kwargs.get('comment', ''), six.string_types)): raise ValueError("for the commented_header writer you must supply a string\n" "value for the `comment` keyword. In order to disable writing\n" "table comments use `del t.meta['comments']` prior to writing.") return writer
0e51b930d90585905b37ddd585cdfd53de111aa9
3,646,037
def find_last_service(obj): """Identify last service event for instrument""" return Service.objects.filter(equipment=obj).order_by('-date').first()
5c7c74b376568fc57268e8fe7c1970c03d61ad2c
3,646,038
def SectionsMenu(base_title=_("Sections"), section_items_key="all", ignore_options=True): """ displays the menu for all sections :return: """ items = get_all_items("sections") return dig_tree(SubFolderObjectContainer(title2=_("Sections"), no_cache=True, no_history=True), items, None, menu_determination_callback=determine_section_display, pass_kwargs={"base_title": base_title, "section_items_key": section_items_key, "ignore_options": ignore_options}, fill_args={"title": "section_title"})
ea3766d923337e1dffc07dec5e5d042e2c85050c
3,646,039
def perform_save_or_create_role(is_professor, created_user, req_main, is_creating): """Performs update or create Student or Professor for user""" response_verb = 'created' if is_creating else 'updated' if is_professor is True: professor_data = None if 'professor' in req_main.keys(): professor_data = req_main['professor'] if professor_data is not None: serialized_prof = CreateUpdateProfessorSerializer(data=professor_data) if serialized_prof.is_valid(): save_or_create_data_in_role(professor_data, True, is_creating, 'Professor', created_user) return 'success' else: return Response({"message": f"Professor account could not not be {response_verb}."}, status=status.HTTP_400_BAD_REQUEST) else: student_data = None if 'student' in req_main.keys(): student_data = req_main['student'] if student_data is not None: serialized_student = CreateUpdateStudentSerializer(data=student_data) if serialized_student.is_valid(): save_or_create_data_in_role(student_data, False, is_creating, 'Student', created_user) return 'success' else: return Response({"message": f"Student account could not not be {response_verb}."}, status=status.HTTP_400_BAD_REQUEST) return 'success'
6161aaf886b31209a8387426f812cda73b739df2
3,646,040
def ecg_rsp(ecg_rate, sampling_rate=1000, method="vangent2019"): """Extract ECG Derived Respiration (EDR). This implementation is far from being complete, as the information in the related papers prevents me from getting a full understanding of the procedure. Help is required! Parameters ---------- ecg_rate : array The heart rate signal as obtained via `ecg_rate()`. sampling_rate : int The sampling frequency of the signal that contains the R-peaks (in Hz, i.e., samples/second). Defaults to 1000Hz. method : str Can be one of 'vangent2019' (default), 'soni2019', 'charlton2016' or 'sarkar2015'. Returns ------- array A Numpy array containing the heart rate. Examples -------- >>> import neurokit2 as nk >>> import pandas as pd >>> >>> # Get heart rate >>> data = nk.data("bio_eventrelated_100hz") >>> rpeaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100) >>> ecg_rate = nk.signal_rate(rpeaks, sampling_rate=100, desired_length=len(rpeaks)) >>> >>> >>> # Get ECG Derived Respiration (EDR) >>> edr = nk.ecg_rsp(ecg_rate, sampling_rate=100) >>> nk.standardize(pd.DataFrame({"EDR": edr, "RSP": data["RSP"]})).plot() #doctest: +ELLIPSIS <AxesSubplot:> >>> >>> # Method comparison (the closer to 0 the better) >>> nk.standardize(pd.DataFrame({"True RSP": data["RSP"], ... "vangent2019": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="vangent2019"), ... "sarkar2015": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="sarkar2015"), ... "charlton2016": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="charlton2016"), ... "soni2019": nk.ecg_rsp(ecg_rate, sampling_rate=100, ... method="soni2019")})).plot() #doctest: +ELLIPSIS <AxesSubplot:> References ---------- - van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate algorithm for the analysis of noisy signals. Transportation research part F: traffic psychology and behaviour, 66, 368-378. - Sarkar, S., Bhattacherjee, S., & Pal, S. (2015). Extraction of respiration signal from ECG for respiratory rate estimation. - Charlton, P. H., Bonnici, T., Tarassenko, L., Clifton, D. A., Beale, R., & Watkinson, P. J. (2016). An assessment of algorithms to estimate respiratory rate from the electrocardiogram and photoplethysmogram. Physiological measurement, 37(4), 610. - Soni, R., & Muniyandi, M. (2019). Breath rate variability: a novel measure to study the meditation effects. International Journal of Yoga, 12(1), 45. """ method = method.lower() if method in [ "sarkar2015" ]: # https://www.researchgate.net/publication/304221962_Extraction_of_respiration_signal_from_ECG_for_respiratory_rate_estimation # noqa: E501 rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.7, order=6) elif method in ["charlton2016"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5390977/#__ffn_sectitle rsp = signal_filter(ecg_rate, sampling_rate, lowcut=4 / 60, highcut=60 / 60, order=6) elif method in ["soni2019"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6329220/ rsp = signal_filter(ecg_rate, sampling_rate, highcut=0.5, order=6) elif method in [ "vangent2019" ]: # https://github.com/paulvangentcom/heartrate_analysis_python/blob/1597e8c0b2602829428b22d8be88420cd335e939/heartpy/analysis.py#L541 # noqa: E501 rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.4, order=2) else: raise ValueError( "NeuroKit error: ecg_rsp(): 'method' should be " "one of 'sarkar2015', 'charlton2016', 'soni2019' or " "'vangent2019'." ) return rsp
ec4ecdbf4489216124ef82399c548461968ca45b
3,646,041
def bootstrap(request): """Concatenates bootstrap.js files from all installed Hue apps.""" # Has some None's for apps that don't have bootsraps. all_bootstraps = [(app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name)] # Iterator over the streams. concatenated = ["\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None] # HttpResponse can take an iteratable as the first argument, which # is what happens here. return HttpResponse(concatenated, content_type='text/javascript')
94be21562c383ad93c7a0530810bb08f41f3eb26
3,646,042
def get_selection(selection): """Return a valid model selection.""" if not isinstance(selection, str) and not isinstance(selection, list): raise TypeError('The selection setting must be a string or a list.') if isinstance(selection, str): if selection.lower() == 'all' or selection == '': selection = None elif selection.startswith('topics'): selection = [selection] return selection
996d0af844e7c1660bcc67e24b33c31861296d93
3,646,043
def getAllImageFilesInHierarchy(path): """ Returns a list of file paths relative to 'path' for all images under the given directory, recursively looking in subdirectories """ return [f for f in scan_tree(path)]
821147ac2def3f04cb9ecc7050afca85d54b6543
3,646,044
def list_package(connection, args): """List information about package contents""" package = sap.adt.Package(connection, args.name) for pkg, subpackages, objects in sap.adt.package.walk(package): basedir = '/'.join(pkg) if basedir: basedir += '/' if not args.recursive: for subpkg in subpackages: print(f'{basedir}{subpkg}') for obj in objects: print(f'{basedir}{obj.name}') if not args.recursive: break if not subpackages and not objects: print(f'{basedir}') return 0
7c8e0cb8a6d5e80a95ae216ad2b85309f0b4d45c
3,646,045
def calc_hebrew_bias(probs): """ :param probs: list of negative log likelihoods for a Hebrew corpus :return: gender bias in corpus """ bias = 0 for idx in range(0, len(probs), 16): bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13] bias += probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14] return bias / 4
565be51b51d857c671ee44e090c5243e4d207942
3,646,046
import pickle def load_wiki(size = 128, validate = True): """ Return malaya pretrained wikipedia ELMO size N. Parameters ---------- size: int, (default=128) validate: bool, (default=True) Returns ------- dictionary: dictionary of dictionary, reverse dictionary and vectors """ if not isinstance(size, int): raise ValueError('size must be an integer') if size not in [128, 256]: raise ValueError('size only support [128,256]') if validate: check_file(PATH_ELMO[size], S3_PATH_ELMO[size]) else: if not check_available(PATH_ELMO[size]): raise Exception( 'elmo-wiki is not available, please `validate = True`' ) with open(PATH_ELMO[size]['setting'], 'rb') as fopen: setting = pickle.load(fopen) g = load_graph(PATH_ELMO[size]['model']) return ELMO( g.get_tensor_by_name('import/tokens_characters:0'), g.get_tensor_by_name('import/tokens_characters_reverse:0'), g.get_tensor_by_name('import/softmax_score:0'), generate_session(graph = g), setting['dictionary'], setting['char_maxlen'], setting['steps'], setting['softmax_weight'], )
6daaa592000f8cb4ac54632729bda60c7325548d
3,646,047
def generate_pibindex_rois_fs(aparc_aseg): """ given an aparc aseg in pet space: generate wm, gm and pibindex rois make sure they are non-overlapping return 3 rois""" wm = mask_from_aseg(aparc_aseg, wm_aseg()) gm = mask_from_aseg(aparc_aseg, gm_aseg()) pibi = mask_from_aseg(aparc_aseg, pibindex_aseg()) # make non-overlapping wm[pibi==1] = 0 gm[pibi ==1] = 0 gm[wm==1] = 0 return wm, gm, pibi
35023b9084bb90f6b74f3f39b8fd79f03eb825d9
3,646,048
import logging def rescale(img, mask, factor): """Rescale image and mask.""" logging.info('Scaling: %s', array_info(img)) info = img.info img = ndimage.interpolation.zoom(img, factor + (1,), order=0) info['spacing'] = [s/f for s, f in zip(info['spacing'], factor)] mask = rescale_mask(mask, factor) assert img[..., 0].shape == mask.shape, (img.shape, mask.shape) img = dwi.image.Image(img, info=info) return img, mask
9f8676a34e58eec258227d8ba41891f4bab7e895
3,646,051
def get_data_all(path): """ Get all data of Nest and reorder them. :param path: the path of the Nest folder :return: """ nb = count_number_of_label(path+ 'labels.csv') data_pop = {} for i in range(nb): label, type = get_label_and_type(path + 'labels.csv', i) field, data = get_data(label, path) if type == 'spikes': data_pop[label]=reorder_data_spike_detector(data) else: data_pop[label]=reorder_data_multimeter(data) return data_pop
94409d671b287ce213088817a4ad41be86126508
3,646,052
def from_tfrecord_parse( record, pre_process_func=None, jpeg_encoded=False): """ This function is made to work with the prepare_data.TFRecordWriter class. It parses a single tf.Example records. Arguments: record : the tf.Example record with the features of prepare_data.TFRecordWriter pre_process_func: if not None, must be a pre-processing function that will be applied on the data. jpeg_encoded : is the data encoded in jpeg format? Returns: image: a properly shaped and encoded 2D image. label: its corresponding label. """ features = tf.io.parse_single_example(record, features={ 'shape': tf.io.FixedLenFeature([3], tf.int64), 'image': tf.io.FixedLenFeature([], tf.string), 'label': tf.io.FixedLenFeature([1], tf.int64)}) data = tf.io.decode_jpeg(features['image']) if jpeg_encoded else tf.io.decode_raw(features['image'], tf.uint8) data = tf.reshape(data, features['shape']) labels = features['label'] # data pre_processing if pre_process_func: data, labels = pre_process_func(data, labels) return data, labels
2e32accfe7485058fa4aff7c683265b939184c94
3,646,053
import json import re def reader_json_totals(list_filenames): """ This reads the json files with totals and returns them as a list of dicts. It will verify that the name of the file starts with totals.json to read it. This way, we can just send to the function all the files in the directory and it will take care of selecting the appropriate. Returns ---------- list_totals_dict: list dicts list of dictionaries with the totals """ list_totals_dict = [] for file in list_filenames: # if it is a json results file, we process it. if "totals.json" in file: with open(file, 'r') as fp: data = json.load(fp) try: data['1st_react_temp'] = float(re.findall(r"(\d+)C", file)[0]) except IndexError: data['1st_react_temp'] = np.nan try: data['2nd_react_temp'] = float(re.findall(r"(\d+)C", file)[1]) except IndexError: data['2nd_react_temp'] = np.nan try: data['mass ug'] = float(re.findall(r"(\d+) ug", file)[0]) except IndexError: data['mass ug'] = np.nan list_totals_dict.append(data) return list_totals_dict
57f27dbb3eabee014a23449b7975295b088b5e72
3,646,056
def is_hitachi(dicom_input): """ Use this function to detect if a dicom series is a hitachi dataset :param dicom_input: directory with dicom files for 1 scan of a dicom_header """ # read dicom header header = dicom_input[0] if 'Manufacturer' not in header or 'Modality' not in header: return False # we try generic conversion in these cases # check if Modality is mr if header.Modality.upper() != 'MR': return False # check if manufacturer is hitachi if 'HITACHI' not in header.Manufacturer.upper(): return False return True
c039c0535823edda2f66c3e445a5800a9890f155
3,646,057
def index_of_first_signal(evt_index, d, qsets, MAXT3): """ Check the evt_index of the last signal triplet (MC truth). Args: Returns: """ first_index = -1 k = 0 for tset in qsets: for ind in tset: # Pick first of alternatives and break #[HERE ADD THE OPTION TO CHOOSE e.g. THE BEST RECONSTRUCTION QUALITY !!] y = np.asarray(d['_BToKEE_is_signal'][evt_index])[ind] break if y == 1: first_index = k break k += 1 return first_index
cd156faceaf3cf3261b2ba217cda5a6c0e3ce4b8
3,646,058
import numpy def readcrd(filename, REAL): """ It reads the crd file, file that contains the charges information. Arguments ---------- filename : name of the file that contains the surface information. REAL : data type. Returns ------- pos : (Nqx3) array, positions of the charges. q : (Nqx1) array, value of the charges. Nq : int, number of charges. """ pos = [] q = [] start = 0 with open(filename, 'r') as f: lines = f.readlines() for line in lines: line = line.split() if len(line) > 8 and line[0] != '*': # and start==2: x = line[4] y = line[5] z = line[6] q.append(REAL(line[9])) pos.append([REAL(x), REAL(y), REAL(z)]) pos = numpy.array(pos) q = numpy.array(q) return pos, q
efafc2e53eebeacbe6a1a5b1e346d0e121fa7a62
3,646,059
def load_and_initialize_hub_module(module_path, signature='default'): """Loads graph of a TF-Hub module and initializes it into a session. Args: module_path: string Path to TF-Hub module. signature: string Signature to use when creating the apply graph. Return: graph: tf.Graph Graph of the module. session: tf.Session Session with initialized variables and tables. inputs: dict Dictionary of input tensors. outputs: dict Dictionary of output tensors. Raises: ValueError: If signature contains a SparseTensor on input or output. """ graph = tf.Graph() with graph.as_default(): tf.compat.v1.logging.info('Importing %s', module_path) module = hub.Module(module_path) signature_inputs = module.get_input_info_dict(signature) signature_outputs = module.get_output_info_dict(signature) # First check there are no SparseTensors in input or output. for key, info in list(signature_inputs.items()) + list( signature_outputs.items()): if info.is_sparse: raise ValueError( 'Signature "%s" has a SparseTensor on input/output "%s".' ' SparseTensors are not supported.' % (signature, key)) # Create placeholders to represent the input of the provided signature. inputs = {} for input_key, input_info in signature_inputs.items(): inputs[input_key] = tf.compat.v1.placeholder( shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key) outputs = module(inputs=inputs, signature=signature, as_dict=True) session = tf.compat.v1.Session(graph=graph) session.run(tf.compat.v1.global_variables_initializer()) session.run(tf.compat.v1.tables_initializer()) return graph, session, inputs, outputs
b04b5f77c7e0207d314ebb5910ec1c5e61f4755c
3,646,060
def get_mention_token_dist(m1, m2): """ Returns distance in tokens between two mentions """ succ = m1.tokens[0].doc_index < m2.tokens[0].doc_index first = m1 if succ else m2 second = m2 if succ else m1 return max(0, second.tokens[0].doc_index - first.tokens[-1].doc_index)
84052f805193b1d653bf8cc22f5d37b6f8de66f4
3,646,061
def shlcar3x3(x,y,z, ps): """ This subroutine returns the shielding field for the earth's dipole, represented by 2x3x3=18 "cartesian" harmonics, tilted with respect to the z=0 plane (nb#4, p.74) :param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km) :param ps: geo-dipole tilt angle in radius. :return: bx,by,bz. Field components in GSM system, in nT. """ # The 36 coefficients enter in pairs in the amplitudes of the "cartesian" harmonics (A(1)-A(36). # The 14 nonlinear parameters (A(37)-A(50) are the scales Pi,Ri,Qi,and Si entering the arguments of exponents, sines, and cosines in each of the # 18 "cartesian" harmonics plus two tilt angles for the cartesian harmonics (one for the psi=0 mode and another for the psi=90 mode) a = np.array([ -901.2327248,895.8011176,817.6208321,-845.5880889,-83.73539535, 86.58542841,336.8781402,-329.3619944,-311.2947120,308.6011161, 31.94469304,-31.30824526,125.8739681,-372.3384278,-235.4720434, 286.7594095,21.86305585,-27.42344605,-150.4874688,2.669338538, 1.395023949,-.5540427503,-56.85224007,3.681827033,-43.48705106, 5.103131905,1.073551279,-.6673083508,12.21404266,4.177465543, 5.799964188,-.3977802319,-1.044652977,.5703560010,3.536082962, -3.222069852,9.620648151,6.082014949,27.75216226,12.44199571, 5.122226936,6.982039615,20.12149582,6.150973118,4.663639687, 15.73319647,2.303504968,5.840511214,.8385953499E-01,.3477844929]) p1,p2,p3, r1,r2,r3, q1,q2,q3, s1,s2,s3 = a[36:48] t1,t2 = a[48:50] cps=np.cos(ps) sps=np.sin(ps) s2ps=2*cps # modified here (sin(2*ps) instead of sin(3*ps)) st1=np.sin(ps*t1) ct1=np.cos(ps*t1) st2=np.sin(ps*t2) ct2=np.cos(ps*t2) x1=x*ct1-z*st1 z1=x*st1+z*ct1 x2=x*ct2-z*st2 z2=x*st2+z*ct2 # make the terms in the 1st sum ("perpendicular" symmetry): # i=1: sqpr= np.sqrt(1/p1**2+1/r1**2) cyp = np.cos(y/p1) syp = np.sin(y/p1) czr = np.cos(z1/r1) szr = np.sin(z1/r1) expr= np.exp(sqpr*x1) fx1 =-sqpr*expr*cyp*szr hy1 = expr/p1*syp*szr fz1 =-expr*cyp/r1*czr hx1 = fx1*ct1+fz1*st1 hz1 =-fx1*st1+fz1*ct1 sqpr= np.sqrt(1/p1**2+1/r2**2) cyp = np.cos(y/p1) syp = np.sin(y/p1) czr = np.cos(z1/r2) szr = np.sin(z1/r2) expr= np.exp(sqpr*x1) fx2 =-sqpr*expr*cyp*szr hy2 = expr/p1*syp*szr fz2 =-expr*cyp/r2*czr hx2 = fx2*ct1+fz2*st1 hz2 =-fx2*st1+fz2*ct1 sqpr= np.sqrt(1/p1**2+1/r3**2) cyp = np.cos(y/p1) syp = np.sin(y/p1) czr = np.cos(z1/r3) szr = np.sin(z1/r3) expr= np.exp(sqpr*x1) fx3 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr)) hy3 = expr/p1*syp*(z1*czr+x1/r3*szr/sqpr) fz3 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr) hx3 = fx3*ct1+fz3*st1 hz3 =-fx3*st1+fz3*ct1 # i=2: sqpr= np.sqrt(1/p2**2+1/r1**2) cyp = np.cos(y/p2) syp = np.sin(y/p2) czr = np.cos(z1/r1) szr = np.sin(z1/r1) expr= np.exp(sqpr*x1) fx4 =-sqpr*expr*cyp*szr hy4 = expr/p2*syp*szr fz4 =-expr*cyp/r1*czr hx4 = fx4*ct1+fz4*st1 hz4 =-fx4*st1+fz4*ct1 sqpr= np.sqrt(1/p2**2+1/r2**2) cyp = np.cos(y/p2) syp = np.sin(y/p2) czr = np.cos(z1/r2) szr = np.sin(z1/r2) expr= np.exp(sqpr*x1) fx5 =-sqpr*expr*cyp*szr hy5 = expr/p2*syp*szr fz5 =-expr*cyp/r2*czr hx5 = fx5*ct1+fz5*st1 hz5 =-fx5*st1+fz5*ct1 sqpr= np.sqrt(1/p2**2+1/r3**2) cyp = np.cos(y/p2) syp = np.sin(y/p2) czr = np.cos(z1/r3) szr = np.sin(z1/r3) expr= np.exp(sqpr*x1) fx6 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr)) hy6 = expr/p2*syp*(z1*czr+x1/r3*szr/sqpr) fz6 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr) hx6 = fx6*ct1+fz6*st1 hz6 =-fx6*st1+fz6*ct1 # i=3: sqpr= np.sqrt(1/p3**2+1/r1**2) cyp = np.cos(y/p3) syp = np.sin(y/p3) czr = np.cos(z1/r1) szr = np.sin(z1/r1) expr= np.exp(sqpr*x1) fx7 =-sqpr*expr*cyp*szr hy7 = expr/p3*syp*szr fz7 =-expr*cyp/r1*czr hx7 = fx7*ct1+fz7*st1 hz7 =-fx7*st1+fz7*ct1 sqpr= np.sqrt(1/p3**2+1/r2**2) cyp = np.cos(y/p3) syp = np.sin(y/p3) czr = np.cos(z1/r2) szr = np.sin(z1/r2) expr= np.exp(sqpr*x1) fx8 =-sqpr*expr*cyp*szr hy8 = expr/p3*syp*szr fz8 =-expr*cyp/r2*czr hx8 = fx8*ct1+fz8*st1 hz8 =-fx8*st1+fz8*ct1 sqpr= np.sqrt(1/p3**2+1/r3**2) cyp = np.cos(y/p3) syp = np.sin(y/p3) czr = np.cos(z1/r3) szr = np.sin(z1/r3) expr= np.exp(sqpr*x1) fx9 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr)) hy9 = expr/p3*syp*(z1*czr+x1/r3*szr/sqpr) fz9 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr) hx9 = fx9*ct1+fz9*st1 hz9 =-fx9*st1+fz9*ct1 a1=a[0]+a[1]*cps a2=a[2]+a[3]*cps a3=a[4]+a[5]*cps a4=a[6]+a[7]*cps a5=a[8]+a[9]*cps a6=a[10]+a[11]*cps a7=a[12]+a[13]*cps a8=a[14]+a[15]*cps a9=a[16]+a[17]*cps bx=a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9 by=a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9 bz=a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9 # make the terms in the 2nd sum ("parallel" symmetry): # i=1 sqqs= np.sqrt(1/q1**2+1/s1**2) cyq = np.cos(y/q1) syq = np.sin(y/q1) czs = np.cos(z2/s1) szs = np.sin(z2/s1) exqs= np.exp(sqqs*x2) fx1 =-sqqs*exqs*cyq*czs *sps hy1 = exqs/q1*syq*czs *sps fz1 = exqs*cyq/s1*szs *sps hx1 = fx1*ct2+fz1*st2 hz1 =-fx1*st2+fz1*ct2 sqqs= np.sqrt(1/q1**2+1/s2**2) cyq = np.cos(y/q1) syq = np.sin(y/q1) czs = np.cos(z2/s2) szs = np.sin(z2/s2) exqs= np.exp(sqqs*x2) fx2 =-sqqs*exqs*cyq*czs *sps hy2 = exqs/q1*syq*czs *sps fz2 = exqs*cyq/s2*szs *sps hx2 = fx2*ct2+fz2*st2 hz2 =-fx2*st2+fz2*ct2 sqqs= np.sqrt(1/q1**2+1/s3**2) cyq = np.cos(y/q1) syq = np.sin(y/q1) czs = np.cos(z2/s3) szs = np.sin(z2/s3) exqs= np.exp(sqqs*x2) fx3 =-sqqs*exqs*cyq*czs *sps hy3 = exqs/q1*syq*czs *sps fz3 = exqs*cyq/s3*szs *sps hx3 = fx3*ct2+fz3*st2 hz3 =-fx3*st2+fz3*ct2 # i=2: sqqs= np.sqrt(1/q2**2+1/s1**2) cyq = np.cos(y/q2) syq = np.sin(y/q2) czs = np.cos(z2/s1) szs = np.sin(z2/s1) exqs= np.exp(sqqs*x2) fx4 =-sqqs*exqs*cyq*czs *sps hy4 = exqs/q2*syq*czs *sps fz4 = exqs*cyq/s1*szs *sps hx4 = fx4*ct2+fz4*st2 hz4 =-fx4*st2+fz4*ct2 sqqs= np.sqrt(1/q2**2+1/s2**2) cyq = np.cos(y/q2) syq = np.sin(y/q2) czs = np.cos(z2/s2) szs = np.sin(z2/s2) exqs= np.exp(sqqs*x2) fx5 =-sqqs*exqs*cyq*czs *sps hy5 = exqs/q2*syq*czs *sps fz5 = exqs*cyq/s2*szs *sps hx5 = fx5*ct2+fz5*st2 hz5 =-fx5*st2+fz5*ct2 sqqs= np.sqrt(1/q2**2+1/s3**2) cyq = np.cos(y/q2) syq = np.sin(y/q2) czs = np.cos(z2/s3) szs = np.sin(z2/s3) exqs= np.exp(sqqs*x2) fx6 =-sqqs*exqs*cyq*czs *sps hy6 = exqs/q2*syq*czs *sps fz6 = exqs*cyq/s3*szs *sps hx6 = fx6*ct2+fz6*st2 hz6 =-fx6*st2+fz6*ct2 # i=3: sqqs= np.sqrt(1/q3**2+1/s1**2) cyq = np.cos(y/q3) syq = np.sin(y/q3) czs = np.cos(z2/s1) szs = np.sin(z2/s1) exqs= np.exp(sqqs*x2) fx7 =-sqqs*exqs*cyq*czs *sps hy7 = exqs/q3*syq*czs *sps fz7 = exqs*cyq/s1*szs *sps hx7 = fx7*ct2+fz7*st2 hz7 =-fx7*st2+fz7*ct2 sqqs= np.sqrt(1/q3**2+1/s2**2) cyq = np.cos(y/q3) syq = np.sin(y/q3) czs = np.cos(z2/s2) szs = np.sin(z2/s2) exqs= np.exp(sqqs*x2) fx8 =-sqqs*exqs*cyq*czs *sps hy8 = exqs/q3*syq*czs *sps fz8 = exqs*cyq/s2*szs *sps hx8 = fx8*ct2+fz8*st2 hz8 =-fx8*st2+fz8*ct2 sqqs= np.sqrt(1/q3**2+1/s3**2) cyq = np.cos(y/q3) syq = np.sin(y/q3) czs = np.cos(z2/s3) szs = np.sin(z2/s3) exqs= np.exp(sqqs*x2) fx9 =-sqqs*exqs*cyq*czs *sps hy9 = exqs/q3*syq*czs *sps fz9 = exqs*cyq/s3*szs *sps hx9 = fx9*ct2+fz9*st2 hz9 =-fx9*st2+fz9*ct2 a1=a[18]+a[19]*s2ps a2=a[20]+a[21]*s2ps a3=a[22]+a[23]*s2ps a4=a[24]+a[25]*s2ps a5=a[26]+a[27]*s2ps a6=a[28]+a[29]*s2ps a7=a[30]+a[31]*s2ps a8=a[32]+a[33]*s2ps a9=a[34]+a[35]*s2ps bx=bx+a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9 by=by+a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9 bz=bz+a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9 return bx, by, bz
5729e0999ddefaf2ee39ad9588009cc58f983130
3,646,062
def raan2ltan(date, raan, type="mean"): """Conversion to True Local Time at Ascending Node (LTAN) Args: date (Date) : Date of the conversion raan (float) : RAAN in radians, in EME2000 type (str) : either "mean" or "true" Return: float : LTAN in hours """ if type == "mean": mean_solar_angle = raan - _mean_sun_raan(date) ltan = (12 + mean_solar_angle * 12 / np.pi) % 24 elif type == "true": theta_sun = ( get_body("Sun") .propagate(date) .copy(frame="EME2000", form="spherical") .theta ) ltan = ((24 * (raan - theta_sun) / (2 * np.pi)) + 12) % 24 else: # pragma: no cover raise ValueError("Unknwon Local Time type : {}".format(type)) return ltan
90956203d7b5787f5d49941f89b7871d021e5e74
3,646,063
def _extract_bbox_annotation(prediction, b, obj_i): """Constructs COCO format bounding box annotation.""" height = prediction['eval_height'][b] width = prediction['eval_width'][b] bbox = _denormalize_to_coco_bbox( prediction['groundtruth_boxes'][b][obj_i, :], height, width) if 'groundtruth_area' in prediction: area = float(prediction['groundtruth_area'][b][obj_i]) else: # Using the box area to replace the polygon area. This value will not affect # real evaluation but may fail the unit test. area = bbox[2] * bbox[3] annotation = { 'id': b * 1000 + obj_i, # place holder of annotation id. 'image_id': int(prediction['source_id'][b]), # source_id, 'category_id': int(prediction['groundtruth_classes'][b][obj_i]), 'bbox': bbox, 'iscrowd': int(prediction['groundtruth_is_crowd'][b][obj_i]), 'area': area, 'segmentation': [], } return annotation
c79a066b719e33704d50128f4d01420af0be27ce
3,646,064
def value_and_entropy(emax, F, bw, grid_size=1000): """ Compute the value function and entropy levels for a θ path increasing until it reaches the specified target entropy value. Parameters ========== emax: scalar The target entropy value F: array_like The policy function to be evaluated bw: str A string specifying whether the implied shock path follows best or worst assumptions. The only acceptable values are 'best' and 'worst'. Returns ======= df: pd.DataFrame A pandas DataFrame containing the value function and entropy values up to the emax parameter. The columns are 'value' and 'entropy'. """ if bw == 'worst': θs = 1 / np.linspace(1e-8, 1000, grid_size) else: θs = -1 / np.linspace(1e-8, 1000, grid_size) df = pd.DataFrame(index=θs, columns=('value', 'entropy')) for θ in θs: df.loc[θ] = evaluate_policy(θ, F) if df.loc[θ, 'entropy'] >= emax: break df = df.dropna(how='any') return df
c9b215d91c6a0affbb4ad8f344614a1f2b6b9a13
3,646,065
def _biorthogonal_window_loopy(analysis_window, shift): """ This version of the synthesis calculation is as close as possible to the Matlab implementation in terms of variable names. The results are equal. The implementation follows equation A.92 in Krueger, A. Modellbasierte Merkmalsverbesserung zur robusten automatischen Spracherkennung in Gegenwart von Nachhall und Hintergrundstoerungen Paderborn, Universitaet Paderborn, Diss., 2011, 2011 """ fft_size = len(analysis_window) assert np.mod(fft_size, shift) == 0 number_of_shifts = len(analysis_window) // shift sum_of_squares = np.zeros(shift) for synthesis_index in range(0, shift): for sample_index in range(0, number_of_shifts+1): analysis_index = synthesis_index + sample_index * shift if analysis_index + 1 < fft_size: sum_of_squares[synthesis_index] \ += analysis_window[analysis_index] ** 2 sum_of_squares = np.kron(np.ones(number_of_shifts), sum_of_squares) synthesis_window = analysis_window / sum_of_squares / fft_size # Why? Line created by Hai, Lukas does not know, why it exists. synthesis_window *= fft_size return synthesis_window
5fc5dd23cb0b01af93a02812210d3b44b2fe84ab
3,646,067
def depthwise(data, N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW, block_size, use_bias=False): """ Depthwise 5-D convolutions,every channel has its filter-kernel Args: data (list):a list,the size is 3 if use_bias else the size is 2; data[0] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI//C0, C0, H, W) data[1] tvm.tensor.Tensor of type float16 ,shape 6D(CI//(CI//C0)//C0, KH, KW, k_ch*CI//C0, C0, C0) data[2] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI*k_ch//C0, OH, OW, C0) N (int): batchsize H (int): height of featureMap W (int): width of featureMap CI (int): channel of featureMap k_ch (int): channel of Filter KH (int): height of Filter KW (int): width of Filter PAD_H (int): padding pixels in vertical direction PAD_W (int): padding pixels in horizontal direction SH (int): stride in vertical direction SW (int): stride in horizontal direction block_size (int): a int var also called "C0" use_bias (bool ): If True need add bias, else bias equal to zero. Returns: akg.tvm.Tensor of same type as data, shape is 5D(N, CI*k_ch//C0, OH, OW, C0) """ check_list = ["float16"] dtype = data[0].dtype if not (dtype in check_list): raise RuntimeError("depthwise only support %s while dtype is %s" % (",".join(check_list), dtype)) for i in range(len(data)): shape = data[i].shape utils.check_shape(shape) conv_dtype = 'float16' group = CI // block_size CO = CI * k_ch assert k_ch == 1 assert CO % group == 0 and CI % group == 0 assert CO % block_size == 0 and (CI // group) % block_size == 0 clear = False # if clear, use auto tiling # (N, CI, H, W) -> (N, C0, H, W, C1) A = data[0] # (CO, CI // group, KH, KW) -> (CI // group // block * KH * KW, CO // block, block, block) B = data[1] if use_bias: bias = data[2] bias_name = bias.op.name else: bias = None bias_name = "bias_name" key = [N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW] hash_key = str((tuple(key))) if hash_key in depthwise_set_dim_map: cutH, cutCo, cutM, cutK, cutN = depthwise_set_dim_map[hash_key] else: # raise RuntimeError("other can not find cutH, cutCo, cutM, cutK, cutN") cutH = (KH - 1) * KH + 1 cutCo = 16 cutM = 16 cutK = 16 * KH * KW cutN = 16 clear = True # use auto tiling OH = (H + 2 * PAD_H - KH) // SH + 1 OW = (W + 2 * PAD_W - KW) // SW + 1 kc1 = akg.tvm.reduce_axis((0, CI // block_size // group), name="kc1") kh = akg.tvm.reduce_axis((0, KH), name="kh") kw = akg.tvm.reduce_axis((0, KW), name="kw") kc0 = akg.tvm.reduce_axis((0, block_size), name="kc0") p_top, p_bottom, p_left, p_right = PAD_H, PAD_H, PAD_W, PAD_W output_name = "output" output_bias_name = "output_bias" attr = { "pragma_conv_kernel_n": CO, "pragma_conv_kernel_h": KH, "pragma_conv_kernel_w": KW, "pragma_conv_padding_top": p_top, "pragma_conv_padding_bottom": p_bottom, "pragma_conv_padding_left": p_left, "pragma_conv_padding_right": p_right, "pragma_conv_bypass_l1": 1, "pragma_conv_stride_h": SH, "pragma_conv_stride_w": SW, "pragma_conv_fm_n": N, "pragma_conv_fm_c": CI, "pragma_conv_fm_h": H, "pragma_conv_fm_w": W, "pragma_conv_dilation_h": 1, "pragma_conv_dilation_w": 1, "feature": A.op.name, "filter": B.op.name, "bias": bias_name, "res": output_name, "res_bias": output_bias_name } if not clear: attr["pragma_conv_h_cut"] = cutH attr["pragma_conv_w_cut"] = W + 2 * PAD_W attr["pragma_conv_co_cut"] = cutCo attr["pragma_conv_m_cut"] = cutM attr["pragma_conv_k_cut"] = cutK attr["pragma_conv_n_cut"] = cutN C = akg.tvm.compute((N, CO // block_size, OH, OW, block_size), lambda n, c1, h, w, c0: akg.lang.ascend.mmad( akg.tvm.if_then_else(akg.tvm.any((h * SH + kh) < p_top, (h * SH + kh) > (H + p_top - 1), (w * SW + kw) < p_left, (w * SW + kw) > (W + p_left - 1)), akg.tvm.const(0.0, conv_dtype), A[n, c1 // ((CO // block_size) // group) * ( (CI // block_size) // group) + kc1, ( h * SH + kh - p_top), (w * SW + kw - p_left), kc0]) # A[n, kc1, (h * SH + kh - p_top), (w * SW + kw - p_left), kc0]) * B[(kc1 * KH + kh) * KW + kw, c1, c0, kc0], axis=[kc1, kh, kw, kc0]), attrs=attr, name=output_name) if use_bias: out = akg.tvm.compute(C.shape, lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0], name=output_bias_name) else: out = C return out
c47ef1cc929ecc9b26550f1eabe1e52073b82028
3,646,068
def FindDescendantComponents(config, component_def): """Return a list of all nested components under the given component.""" path_plus_delim = component_def.path.lower() + '>' return [cd for cd in config.component_defs if cd.path.lower().startswith(path_plus_delim)]
f9734442bbe3a01460970b3521827dda4846f448
3,646,070
def _get_source(loader, fullname): """ This method is here as a replacement for SourceLoader.get_source. That method returns unicode, but we prefer bytes. """ path = loader.get_filename(fullname) try: return loader.get_data(path) except OSError: raise ImportError('source not available through get_data()', name=fullname)
af43b79fa1d90abbbdb66d7d1e3ead480e27cdd1
3,646,071
from pathlib import Path def get_source_files(sf: Path) -> list: """ Search for files ending in .FLAC/.flac and add them to a list. Args: sf (str/pathlib.Path): Folder location to search for files. Returns: list: List of file locations found to match .FLAC/.fladc. """ return re_file_search.get_list(sf, r".+\.[fF][lL][aA][cC]$")
3828d81528b144367c3d5a74ee212caf2a01b111
3,646,072
import io def extract_features(clip): """ Feature extraction from an audio clip Args: clip (): Returns: A list of feature vectors """ sr, clip_array = wav_read(io.BytesIO(clip)) if clip_array.ndim > 1: clip_array = clip_array[:, 0] segments = frame_breaker.get_frames(clip_array, sample_rate=sr) segments_encoded = [np2base64(s, sr) for s in segments] segment_features = [ [f.feature_value for f in extract_feats_for_segment(s).features] for s in segments_encoded ] # extracted_feats = speech_feat_client.extract_speech_features( # clip, # opensmile_config=emorec_pytorch_config.ModelBaseline.opensmile_config, # response_format='list' # ) # feats = np.array([f.feature_value for f in extracted_feats]) return segment_features
13c6c18be92067847eaabada17952a0dab142a3f
3,646,073
def comparison_func(target: TwoQubitWeylDecomposition, basis: TwoQubitBasisDecomposer, base_fid: float, comp_method: str): """ Decompose traces for arbitrary angle rotations. This assumes that the tq angles go from highest to lowest. """ dep_param = (4 * base_fid - 1)/3 if comp_method == 'fid': traces = fixed_traces(target, basis) values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16 for i, tr in enumerate(traces)] elif comp_method == 'arb_fid': traces = arb_traces(target) values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16 for i, tr in enumerate(traces)] elif comp_method == 'arb_total': traces = arb_traces(target) total_angles = [ 0, abs(target.a), abs(target.a) + abs(target.b), abs(target.a) + abs(target.b) + abs(target.c) ] values = [((abs(tr)**2 - 1) * dep_param**(a/np.pi) + 1)/ 16 for a, tr in zip(total_angles, traces)] elif comp_method == 'arb_total_quad': traces = arb_traces(target) total_angles = [ 0, abs(target.a), abs(target.a) + abs(target.b), abs(target.a) + abs(target.b) + abs(target.c) ] values = [((abs(tr)**2 - 1) * dep_param**((a/np.pi)**2) + 1) / 16 for a, tr in zip(total_angles, traces)] elif comp_method == 'arb_total_sqrt': traces = arb_traces(target) total_angles = [ 0, abs(target.a), abs(target.a) + abs(target.b), abs(target.a) + abs(target.b) + abs(target.c) ] values = [((abs(tr)**2 - 1) * dep_param**(np.sqrt(a/np.pi)) + 1) / 16 for a, tr in zip(total_angles, traces)] elif comp_method == 'total_angle': traces = arb_traces(target) # negate to find smallest total angle (uses max later) values = [-10, -10, -10, -abs(target.a) - abs(target.b) - abs(target.c)] return values
a222138a8c3a01aaf6c3657c6bbbaca284332b76
3,646,074
from bs4 import BeautifulSoup def create_bs4_obj(connection): """Creates a beautiful Soup object""" soup = BeautifulSoup(connection, 'html.parser') return soup
b3956b13756e29cd57a0e12457a2d665959fb03d
3,646,075
def __create_dataframe_from_cassandra(query,con): """ Function to query into Cassandra and Create Pandas DataFrame Parameter --------- query : String - Cassandra Query con : cassandra connection object Return ------ df : pd.DataFrame - DataFrame created using the cassandra query output """ all_records = list(con.execute(query)) df = pd.DataFrame(all_records) return df
af839ce372af100b4a496350ed6e21ae12b82444
3,646,076
from typing import List from typing import Tuple from typing import Optional import ssl from typing import Iterable from typing import Union async def post( url: str, content: bytes, *, headers: List[Tuple[bytes, bytes]] = None, loop: Optional[AbstractEventLoop] = None, cafile: Optional[str] = None, capath: Optional[str] = None, cadata: Optional[str] = None, ssl_context: Optional[ssl.SSLContext] = None, protocols: Iterable[str] = DEFAULT_PROTOCOLS, ciphers: Iterable[str] = DEFAULT_CIPHERS, options: Iterable[int] = DEFAULT_OPTIONS, chunk_size: int = -1, connect_timeout: Optional[Union[int, float]] = None, middleware: Optional[List[HttpClientMiddlewareCallback]] = None ) -> Optional[bytes]: """Issues a POST request Args: url (str): The url content (bytes): The body content headers (List[Tuple[bytes, bytes]], optional): Any extra headers required. Defaults to None. loop (Optional[AbstractEventLoop], optional): The optional asyncio event loop.. Defaults to None. cafile (Optional[str], optional): The path to a file of concatenated CA certificates in PEM format. Defaults to None. capath (Optional[str], optional): The path to a directory containing several CA certificates in PEM format. Defaults to None. cadata (Optional[str], optional): Either an ASCII string of one or more PEM-encoded certificates or a bytes-like object of DER-encoded certificates. Defaults to None. ssl_context (Optional[SSLContext], optional): An ssl context to be used instead of generating one from the certificates. protocols (Iterable[str], optional): The supported protocols. Defaults to DEFAULT_PROTOCOLS. ciphers (Iterable[str], optional): The supported ciphers. Defaults to DEFAULT_CIPHERS. options (Iterable[int], optional): The ssl.SSLContext.options. Defaults to DEFAULT_OPTIONS. chunk_size (int, optional): The size of each chunk to send or -1 to send as a single chunk.. Defaults to -1. connect_timeout (Optional[Union[int, float]], optional): The number of seconds to wait for the connection. Defaults to None. middleware (Optional[List[HttpClientMiddlewareCallback]], optional): Optional middleware. Defaults to None. Raises: HTTPError: Is the status code is not ok. asyncio.TimeoutError: If the connect times out. Returns: Optional[bytes]: The response body """ data = bytes_writer(content, chunk_size) if content else None async with HttpClient( url, method='POST', headers=headers, body=data, loop=loop, cafile=cafile, capath=capath, cadata=cadata, ssl_context=ssl_context, protocols=protocols, ciphers=ciphers, options=options, connect_timeout=connect_timeout, middleware=middleware ) as response: await response.raise_for_status() return await response.raw()
8aa95bb43f3937ea09ffa0a55b107bf367ffa5bc
3,646,078
import toml def parse_config_file(path): """Parse TOML config file and return dictionary""" try: with open(path, 'r') as f: return toml.loads(f.read()) except: open(path,'a').close() return {}
599164f023c0db5bffa0b6c4de07654daae1b995
3,646,079
def wordnet_pos(tag): """ Transforms nltk part-of-speech tag strings to wordnet part-of-speech tag string. :param tag: nltk part-of-speech tag string :type: str :return: the corresponding wordnet tag :type: wordnet part-of-speech tag string """ return getattr(nltk_wordnet_pos_dict, tag[0], nltk_wordnet_pos_dict["N"])
66b915cb63036553d765af5474d690ea4e0f3859
3,646,080
import requests def call_telegram_api(function: str, data: dict): """Make a raw call to Telegram API.""" return requests.post( f'https://api.telegram.org/bot{TELEGRAM_TOKEN}/{function}', data=data)
547626545942b290dc64cd4f1d75277205751eaf
3,646,081
def test_POMDP(POMDP, policy, test_data, status): """simulation""" # Basic settings p = POMDP ind_iter = 0 horizon = len(test_data) state = status action = p.actions[0] belief = p.init_belief reward = 0 state_set = [state] action_set = [] observation_set = ["null"] alpha_length = len(p.states) while True: # make an action ind_key = np.argmax([ np.dot( policy[key][:alpha_length], belief ) for key in policy.keys() ]) action = policy[list(policy.keys())[ind_key]][alpha_length] action_set.append(action) # get a reward reward = reward + p.reward_func(state=state, action=action) # check stop condition ind_iter = ind_iter + 1 if ind_iter >= horizon: break # state doesn't change state = state state_set.append(state) # make an observation observation = test_data.iloc[ind_iter] observation_set.append(observation) # update belief belief = [ p.observ_func(observation, s_new, action) * np.sum([ p.trans_func(s_new, s_old, action) * belief[p.states.index(s_old)] for s_old in p.states ]) for s_new in p.states ] normalize_const = 1 / sum(belief) belief = np.multiply(belief, normalize_const) return action_set
0f2dfe7c18d254ca0b2953b11aaac2386d4fe920
3,646,082
def courses_to_take(input): """ Time complexity: O(n) (we process each course only once) Space complexity: O(n) (array to store the result) """ # Normalize the dependencies, using a set to track the # dependencies more efficiently course_with_deps = {} to_take = [] for course, deps in input.items(): if not deps: # Course with no dependencies: # candidate to start the search to_take.append(course) else: course_with_deps[course] = set(deps) result = [] while to_take: course = to_take.pop() # Add course to journey result.append(course) # Iterate through courses and remove this course from # dependencies for prereq_course, prereq_deps in course_with_deps.items(): if course in prereq_deps: prereq_deps.remove(course) if not prereq_deps: # Course has all the dependencies solved: # add to the "to_take" queue to_take.append(prereq_course) del course_with_deps[prereq_course] return result if len(result) == len(input) else None
eb0fe7271497fb8c5429360d37741d20f691ff3c
3,646,084
def _merge_GlyphOrders(font, lst, values_lst=None, default=None): """Takes font and list of glyph lists (must be sorted by glyph id), and returns two things: - Combined glyph list, - If values_lst is None, return input glyph lists, but padded with None when a glyph was missing in a list. Otherwise, return values_lst list-of-list, padded with None to match combined glyph lists. """ if values_lst is None: dict_sets = [set(l) for l in lst] else: dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)] combined = set() combined.update(*dict_sets) sortKey = font.getReverseGlyphMap().__getitem__ order = sorted(combined, key=sortKey) # Make sure all input glyphsets were in proper order assert all(sorted(vs, key=sortKey) == vs for vs in lst) del combined paddedValues = None if values_lst is None: padded = [[glyph if glyph in dict_set else default for glyph in order] for dict_set in dict_sets] else: assert len(lst) == len(values_lst) padded = [[dict_set[glyph] if glyph in dict_set else default for glyph in order] for dict_set in dict_sets] return order, padded
cc671625bcaa2016cb8562c5727d7afa624699a9
3,646,085
def sig_for_ops(opname): """sig_for_ops(opname : str) -> List[str] Returns signatures for operator special functions (__add__ etc.)""" # we have to do this by hand, because they are hand-bound in Python assert opname.endswith('__') and opname.startswith('__'), "Unexpected op {}".format(opname) name = opname[2:-2] if name in binary_ops: return ['def {}(self, other: Any) -> Tensor: ...'.format(opname)] elif name in comparison_ops: # unsafe override https://github.com/python/mypy/issues/5704 return ['def {}(self, other: Any) -> Tensor: ... # type: ignore'.format(opname)] elif name in unary_ops: return ['def {}(self) -> Tensor: ...'.format(opname)] elif name in to_py_type_ops: if name in {'bool', 'float', 'complex'}: tname = name elif name == 'nonzero': tname = 'bool' else: tname = 'int' if tname in {'float', 'int', 'bool', 'complex'}: tname = 'builtins.' + tname return ['def {}(self) -> {}: ...'.format(opname, tname)] else: raise Exception("unknown op", opname)
7f5850c5719ed631d4aabc22b757969d1161eee2
3,646,086
def lstm2(hidden_nodes, steps_in=5, steps_out=1, features=1): """ A custom LSTM model. :param hidden_nodes: number of hidden nodes :param steps_in: number of (look back) time steps for each sample input :param steps_out: number of (look front) time steps for each sample output :param features: number of features for each sample input (e.g. 1 for univariate or 2+ for multivariate time series) :return: simple LSTM model """ model = Sequential() model.add(LSTM(hidden_nodes, input_shape=(steps_in, features), return_sequences=True)) # default activation: tanh model.add(LSTM(hidden_nodes)) # default activation: tanh model.add(Dense(steps_out)) # default activation: None model.compile(optimizer='adam', loss='mse') return model
c84c1d4a3cb31ca74fd3d135a9c48c72f5f6c715
3,646,088
def build_carousel_scroller(items): """ Usage: item_layout = widgets.Layout(height='120px', min_width='40px') items = [pn.Row(a_widget, layout=item_layout, margin=0, background='black') for a_widget in single_pf_output_panels] # items = [widgets.Button(layout=item_layout, description=str(i), button_style='success') for i in range(40)] # build_carousel_scroller(items) build_carousel_scroller(single_pf_output_panels) """ box_layout = pn.widgets.Layout(overflow_x='scroll', border='3px solid black', width='1024px', height='', flex_flow='row', display='flex') carousel = pn.widgets.Box(children=items, layout=box_layout) return pn.widgets.VBox([pn.widgets.Label('Scroll horizontally:'), carousel])
2c48ef11de7647320833c74e5dc155365c0ae847
3,646,089
def base_round(x, base): """ This function takes in a value 'x' and rounds it to the nearest multiple of the value 'base'. Parameters ---------- x : int Value to be rounded base : int Tase for x to be rounded to Returns ------- int The rounded value """ return base*round(x/base)
e5b1a1b81c7baf990b7921fe27a20075c0305935
3,646,091
def _update_schema_1_to_2(table_metadata, table_path): """ Given a `table_metadata` of version 1, update it to version 2. :param table_metadata: Table Metadata :param table_path: [String, ...] :return: Table Metadata """ table_metadata['path'] = tuple(table_path) table_metadata['schema_version'] = 2 table_metadata.pop('table_mappings', None) return table_metadata
6b0c8bc72100cceeb1b9da5552e53bc3c9bad3fa
3,646,092
def train_cnn_7layer(data, file_name, params, num_epochs=10, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"): """ Train a 7-layer cnn network for MNIST and CIFAR (same as the cnn model in Clever) mnist: 32 32 64 64 200 200 cifar: 64 64 128 128 256 256 """ # create a Keras sequential model model = Sequential() print("training data shape = {}".format(data.train_data.shape)) params = [int(p) for p in params] # define model structure model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:])) model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation)) model.add(Conv2D(params[1], (3, 3))) model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(params[2], (3, 3))) model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation)) model.add(Conv2D(params[3], (3, 3))) model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(params[4])) model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation)) model.add(Dropout(0.5)) model.add(Dense(params[5])) model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation)) model.add(Dense(200)) # load initial weights when given if init != None: model.load_weights(init) # define the loss function which is the cross entropy between prediction and true label def fn(correct, predicted): return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted/train_temp) if optimizer_name == "sgd": # initiate the SGD optimizer with given hyper parameters optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True) elif optimizer_name == "adam": optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False) # compile the Keras model, given the specified loss and optimizer model.compile(loss=fn, optimizer=optimizer, metrics=['accuracy']) model.summary() print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name)) # run training with given dataset, and print progress history = model.fit(data.train_data, data.train_labels, batch_size=batch_size, validation_data=(data.validation_data, data.validation_labels), epochs=num_epochs, shuffle=True) # save model to a file if file_name != None: model.save(file_name) print('model saved to ', file_name) return {'model':model, 'history':history}
c2d020b4390aca6bb18de6f7e83c801475ac1a03
3,646,093
def validate(): """ Goes over all season, make sure they are all there, and they should have length 52, except last one """ input_file = processedDatafileName clusters_file = 'data/SeasonClustersFinal' seasonDic = {} allSeasons = {} for line in open(clusters_file): arr = line.strip().split() year = int(arr[0]) season = int(arr[1]) seasonDic[year] = season allSeasons[season] = True # indexed by region all_data = {} in_f = open(input_file) in_f.readline() in_f.readline() for line in in_f: raw = line.strip().split(',') region = raw[1].strip() year = int(raw[2].strip()) week = int(raw[3].strip()) ## upto 20th week belongs to last years cycle if(week <= 20): year -= 1 infection = raw[4].strip() inf = 0 if is_number(infection): inf = float(infection) if region not in all_data: all_data[region]={} if year not in all_data[region]: all_data[region][year] = [] all_data[region][year].append(inf) isValid = True region_order = [] for region, raw in all_data.items(): region_order.append(region) keylist = list(raw.keys()) keylist.sort() for year in keylist: if year>=1998 and year<=2018 and len(raw[year]) != 52: print(region, year) isValid = False return isValid
883d21b656730c3e26a94ba8581107f359b337b4
3,646,094
def get_registry(): # noqa: E501 """Get registry information Get information about the registry # noqa: E501 :rtype: Registry """ try: res = Registry( name="Challenge Registry", description="A great challenge registry", user_count=DbUser.objects.count(), org_count=DbOrg.objects.count(), challenge_count=DbChallenge.objects.count(), ) status = 200 except Exception as error: status = 500 res = Error("Internal error", status, str(error)) return res, status
7798da55bee2ad1d6edce37f7c00e4597412491d
3,646,095
def get_fraction_vaccinated(model, trajectories, area=None, include_recovered=True): """Get fraction of individuals that are vaccinated or immune (by area) by state. Parameters ---------- model : amici.model Amici model which should be evaluated. trajectories : pd.DataFrame Trajectories of the model simulation. areas : list List of area names as strings. include_recovered : bool If True, recovered individuals are counted as well. Returns ------- percentage_vaccinated: pd.Series Trajectories of the fraction that is vaccinated or immune. """ vaccinated = get_vaccinated_model(model, area=area) sus_inf = get_alive_model(model, area=area) df_vaccinated = trajectories[vaccinated] df_sus_inf = trajectories[sus_inf] total_vaccinated = df_vaccinated.sum(axis=1) sus_inf = vaccinated = df_sus_inf.sum(axis=1) percentage_vaccinated = total_vaccinated / sus_inf return percentage_vaccinated
f11cbeb737c8592528441293b9fd25fed4bee37f
3,646,096
def test_function(client: Client) -> str: """ Performs test connectivity by valid http response :param client: client object which is used to get response from api :return: raise ValueError if any error occurred during connection """ client.http_request(method='GET', url_suffix=URL_SUFFIX['TEST_MODULE']) return 'ok'
8c773fd9a87a45270157f682cb3229b83ba4a9e0
3,646,097
import pkgutil import doctest def load_tests(loader, tests, ignore): """Create tests from all docstrings by walking the package hierarchy.""" modules = pkgutil.walk_packages(rowan.__path__, rowan.__name__ + ".") for _, module_name, _ in modules: tests.addTests(doctest.DocTestSuite(module_name, globs={"rowan": rowan})) return tests
d8495b32c6cb95a94857f611700f07b9183a9b63
3,646,098
def k_hot_array_from_string_list(context, typename, entity_names): """Create a numpy array encoding a k-hot set. Args: context: a NeuralExpressionContext typename: type of entity_names entity_names: list of names of type typename Returns: A k-hot-array representation of the set of entity_names. For frozen dictionaries, unknown entity names are mapped to the unknown_id of their type or discarded if the unknown_value of the type is None. Unknown entity names will throw an nql.EntityNameException for non-frozen dictionaries. It is possible for this method to return an all-zeros array. """ # Empty string is not a valid entity_name. ids = [context.get_id(e, typename) for e in entity_names if e] # None is not a valid id. valid_ids = [x for x in ids if x is not None] max_id = context.get_max_id(typename) result = np.zeros((max_id,), dtype='float32') if valid_ids: result[valid_ids] = 1. return result
66c987f7c5d1e3af2b419d0db301ad811a8df5b7
3,646,099
from typing import List from typing import Tuple def get_validation_data_iter(data_loader: RawParallelDatasetLoader, validation_sources: List[str], validation_target: str, buckets: List[Tuple[int, int]], bucket_batch_sizes: List[BucketBatchSize], source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, max_seq_len_source: int, max_seq_len_target: int, batch_size: int, fill_up: str) -> 'ParallelSampleIter': """ Returns a ParallelSampleIter for the validation data. """ logger.info("=================================") logger.info("Creating validation data iterator") logger.info("=================================") validation_length_statistics = analyze_sequence_lengths(validation_sources, validation_target, source_vocabs, target_vocab, max_seq_len_source, max_seq_len_target) validation_sources_sentences = [SequenceReader(source, vocab, add_bos=False) for source, vocab in zip(validation_sources, source_vocabs)] validation_target_sentences = SequenceReader(validation_target, target_vocab, add_bos=True, limit=None) validation_data_statistics = get_data_statistics(validation_sources_sentences, validation_target_sentences, buckets, validation_length_statistics.length_ratio_mean, validation_length_statistics.length_ratio_std, source_vocabs, target_vocab) validation_data_statistics.log(bucket_batch_sizes) validation_data = data_loader.load(validation_sources_sentences, validation_target_sentences, validation_data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes, fill_up) return ParallelSampleIter(data=validation_data, buckets=buckets, batch_size=batch_size, bucket_batch_sizes=bucket_batch_sizes, num_factors=len(validation_sources))
826b5847fb55e61ba3a0b416643fbbc356a23b07
3,646,100
def _serialize_property( target_expr: str, value_expr: str, a_property: mapry.Property, auto_id: _AutoID, cpp: mapry.Cpp) -> str: """ Generate the code to serialize the property. The value as the property is given as ``value_expr`` and serialized into the ``target_expr``. :param target_expr: C++ expression of the Json::Value to be set :param value_expr: C++ expression of the value to be serialized :param a_property: the property definition :param auto_id: generator of unique identifiers :param cpp: C++ settings :return: generated serialization code """ if not a_property.optional: return _serialize_value( target_expr=target_expr, value_expr=value_expr, a_type=a_property.type, auto_id=auto_id, cpp=cpp) ## # Handle optional property ## deref_value_expr = "(*{})".format(value_expr) serialization = _serialize_value( target_expr=target_expr, value_expr=deref_value_expr, a_type=a_property.type, auto_id=auto_id, cpp=cpp) return _SERIALIZE_OPTIONAL_PROPERTY_TPL.render( value_expr=value_expr, serialization=serialization)
3a5a7d7795dd771224d1fd5de8304844cd260fad
3,646,101
def read_raw_data(pattern): """:return X""" if isinstance(pattern, basestring): fpaths = glob.glob(pattern) elif isinstance(pattern, list): fpaths = pattern X = [] for fpath in fpaths: print 'loading file {} ... ' . format(fpath) X.extend(loadtxt(fpath)) return X
ae3f503db4b7f31a043dc4b611d9bf2393d7a352
3,646,102
def warp_affine_rio(src: np.ndarray, dst: np.ndarray, A: Affine, resampling: Resampling, src_nodata: Nodata = None, dst_nodata: Nodata = None, **kwargs) -> np.ndarray: """ Perform Affine warp using rasterio as backend library. :param src: image as ndarray :param dst: image as ndarray :param A: Affine transformm, maps from dst_coords to src_coords :param resampling: str|rasterio.warp.Resampling resampling strategy :param src_nodata: Value representing "no data" in the source image :param dst_nodata: Value to represent "no data" in the destination image **kwargs -- any other args to pass to ``rasterio.warp.reproject`` :returns: dst """ crs = _WRP_CRS src_transform = Affine.identity() dst_transform = A if isinstance(resampling, str): resampling = resampling_s2rio(resampling) # GDAL support for int8 is patchy, warp doesn't support it, so we need to convert to int16 if src.dtype.name == 'int8': src = src.astype('int16') if dst.dtype.name == 'int8': _dst = dst.astype('int16') else: _dst = dst rasterio.warp.reproject(src, _dst, src_transform=src_transform, dst_transform=dst_transform, src_crs=crs, dst_crs=crs, resampling=resampling, src_nodata=src_nodata, dst_nodata=dst_nodata, **kwargs) if dst is not _dst: # int8 workaround copy pixels back to int8 np.copyto(dst, _dst, casting='unsafe') return dst
4843ce222535a93b1fa7d0fee10161dadaba290b
3,646,103
def encode_integer_leb128(value: int) -> bytes: """Encode an integer with signed LEB128 encoding. :param int value: The value to encode. :return: ``value`` encoded as a variable-length integer in LEB128 format. :rtype: bytes """ if value == 0: return b"\0" # Calculate the number of bits in the integer and round up to the nearest multiple # of 7. We need to add 1 bit because bit_length() only returns the number of bits # required to encode the magnitude, but not the sign. n_bits = value.bit_length() + 1 if n_bits % 7: n_bits += 7 - (n_bits % 7) # Bit operations force a negative integer to its unsigned two's-complement # representation, e.g. -127 & 0xff = 0x80, -10 & 0xfff = 0xff6, etc. We use this to # sign-extend the number *and* make it unsigned. Once it's unsigned, we can use # ULEB128. mask = (1 << n_bits) - 1 value &= mask output = bytearray(n_bits // 7) for i in range(n_bits // 7): output[i] = 0x80 | (value & 0x7F) value >>= 7 # Last byte shouldn't have the high bit set. output[-1] &= 0x7F return bytes(output)
b74832115a58248f4a45a880f657de6dd38b0d8d
3,646,104
def google_sen_new(text_content): """ Analyzing Entity Sentiment in a String Args: text_content The text content to analyze """ # text_content = 'Grapes are good. Bananas are bad.' Available types: PLAIN_TEXT, HTML client = language_v1.LanguageServiceClient() type_ = enums.Document.Type.PLAIN_TEXT language = "en" document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = enums.EncodingType.UTF8 response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) result_dict = {} # "entity":[] for entity in response.entities: result_list = [] result_list.append(entity.name) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al result_list.append(enums.Entity.Type( entity.type).name) # Get the salience score associated with the entity in the [0, 1.0] range result_list.append( entity.salience) # Get the aggregate sentiment expressed for this entity in the provided document. sentiment = entity.sentiment result_list.append(sentiment.score) result_list.append(sentiment.magnitude) result_dict[entity] = result_list return result_dict
57c4020f35a344d7f264453571e3f8825e00206f
3,646,105
def simplify_polygon_by(points, is_higher, should_stop, refresh_node): """ Simplify the given polygon by greedily removing vertices using a given priority. This is generalized from Visvalingam's algorithm, which is described well here: http://bost.ocks.org/mike/simplify/ is_higher = function(a,b) returns node higher in priority to be removed. should_stop = function(a) returns True if given highest priority node stops simplification. refresh_node = function(a) refreshes attributes dependent on adjacent vertices. """ length = len(points) # build nodes nodes = [VertexNode(p) for p in points] # connect nodes for i in xrange(length): prev_i = (i+length-1) % length next_i = (i+1) % length node = nodes[i] node.prev_node = nodes[prev_i] node.next_node = nodes[next_i] refresh_node(node) node.orig_index = i def on_index_change(node,i): """Callback that allows a node to know its location in the heap.""" node.heap_index = i heap = Heap(nodes, is_higher, on_index_change) while True: node = heap.peek() if should_stop(node): break heap.pop() # Close gap in doubly-linked list. prev_node, next_node = node.prev_node, node.next_node prev_node.next_node = next_node next_node.prev_node = prev_node # Refresh vertices that have new adjacents. refresh_node(prev_node) heap.reorder_node(prev_node.heap_index) refresh_node(next_node) heap.reorder_node(next_node.heap_index) # Return remaining points in their original order. return [node.point for node in sorted(heap.array, key=(lambda node: node.orig_index))]
b9ae05b2d146e78dbed36cc48df6cbd24c33fcbc
3,646,107
def get_builder(slug): """ Get the Builder object for a given slug name. Args: slug - The slug name of the installable software """ for builder in Index().index: if builder.slug == slug: return builder return False
d6013fb55d11be7a153b7a9e9f2bdd991b2a6304
3,646,108
def get_contact_lookup_list(): """get contact lookup list""" try: return jsonify(Contact.get_contact_choices()) except Exception as e: return e.message
e8d3a8f813366a16e86cf2eadf5acb7e235218de
3,646,110
def argmax(X, axis=None): """ Return tuple (values, indices) of the maximum entries of matrix :param:`X` along axis :param:`axis`. Row major order. :param X: Target matrix. :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix` :param axis: Specify axis along which to operate. If not specified, whole matrix :param:`X` is considered. :type axis: `int` """ if sp.isspmatrix(X): X = X.tocsr() assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number." res = [[float('-inf'), 0] for _ in range(X.shape[1 - axis])] if axis is not None else [float('-inf'), 0] def _caxis(row, col): if X[row, col] > res[col][0]: res[col] = (X[row, col], row) def _raxis(row, col): if X[row, col] > res[row][0]: res[row] = (X[row, col], col) def _naxis(row, col): if X[row, col] > res[0]: res[0] = X[row, col] res[1] = row * X.shape[0] + col check = _caxis if axis == 0 else _raxis if axis == 1 else _naxis [check(row, col) for row in range(X.shape[0]) for col in range(X.shape[1])] if axis is None: return res elif axis == 0: t = list(zip(*res)) return list(t[0]), np.mat(t[1]) else: t = list(zip(*res)) return list(t[0]), np.mat(t[1]).T else: idxX = np.asmatrix(X).argmax(axis) if axis is None: eX = X[idxX // X.shape[1], idxX % X.shape[1]] elif axis == 0: eX = [X[idxX[0, idx], col] for idx, col in zip(range(X.shape[1]), range(X.shape[1]))] else: eX = [X[row, idxX[idx, 0]] for row, idx in zip(range(X.shape[0]), range(X.shape[0]))] return eX, idxX
b97cb16798e0d726fc21a76a2a3c6a02d284e313
3,646,111
import hashlib from re import T from re import A def posts(): """ Function accessed by AJAX to handle a Series of Posts """ try: series_id = request.args[0] except: raise HTTP(400) try: recent = request.args[1] except: recent = 5 table = s3db.cms_post # List of Posts in this Series query = (table.series_id == series_id) posts = db(query).select(table.name, table.body, table.avatar, table.created_by, table.created_on, limitby=(0, recent)) output = UL(_id="comments") for post in posts: author = B(T("Anonymous")) if post.created_by: utable = s3db.auth_user ptable = s3db.pr_person ltable = s3db.pr_person_user query = (utable.id == post.created_by) left = [ltable.on(ltable.user_id == utable.id), ptable.on(ptable.pe_id == ltable.pe_id)] row = db(query).select(utable.email, ptable.first_name, ptable.middle_name, ptable.last_name, left=left, limitby=(0, 1)).first() if row: person = row.pr_person user = row[utable._tablename] username = s3_fullname(person) email = user.email.strip().lower() hash = hashlib.md5(email).hexdigest() url = "http://www.gravatar.com/%s" % hash author = B(A(username, _href=url, _target="top")) header = H4(post.name) if post.avatar: avatar = s3base.s3_avatar_represent(post.created_by) else: avatar = "" row = LI(DIV(avatar, DIV(DIV(header, _class="comment-header"), DIV(XML(post.body), _class="comment-body"), _class="comment-text"), DIV(DIV(post.created_on, _class="comment-date"), _class="fright"), DIV(author, _class="comment-footer"), _class="comment-box")) output.append(row) return XML(output)
dc5b43016c3e50c52969c91505856941c951911b
3,646,112
def resolve_game_object_y_collision(moving, static): """Resolves a collision by moving an object along the y axis. Args: moving (:obj:`engine.game_object.PhysicalGameObject`): The object to move along the y axis. static (:obj:`engine.game_object.PhysicalGameObject`): The object to leave as-is. Returns: The change in the velocity of the object along the y axis. """ has_overlap = geometry.detect_overlap_1d( moving.x, moving.width, static.x, static.width) if has_overlap: # Overlap detected along x-axis, resolve collision on y-axis return _resolve_game_object_axis_collision(moving, static, 'y') return 0
24d63b4fd9e4a37d22aceee01e6accc2a74e8ee4
3,646,113
def filter_all(fn, *l): """ Runs the filter function on all items in a list of lists :param fn: Filter function :param l: list of lists to filter :return: list of filtered lists >>> filter_all(lambda x: x != "", ['a'], ['b'], [""], ["d"]) [['a'], ['b'], [], ['d']] """ return [filter(fn, lst) for lst in chain(*l)]
114b7b9bf9b22d55bd891a654318d4a49e30be51
3,646,114
def get_test_runners(args): """ Get Test Runners """ res = list() qitest_jsons = args.qitest_jsons or list() # first case: qitest.json in current working directory test_runner = get_test_runner(args) if test_runner: res.append(test_runner) # second case: qitest.json specified with --qitest-json for qitest_json in qitest_jsons: test_runner = get_test_runner(args, qitest_json=qitest_json) res.append(test_runner) # third case: parsing build projects build_projects_runners = parse_build_projects(args) # avoid appending a test_runner guessed from a build project # when res already contains a test runner computed from a # --qitest-json argument known_cwds = [x.cwd for x in res] for test_runner in build_projects_runners: if test_runner.cwd not in known_cwds: res.append(test_runner) if args.coverage and not build_projects_runners: raise Exception("""--coverage can only be used from a qibuild CMake project\n""") elif args.coverage: return build_projects_runners if not res: raise EmptyTestListException("Nothing found to test") return res
73d9b4e73935cd2c41c24bd1376ade9ea274f23d
3,646,115
def get_preselected_facets(params, all_categories): """ Resolve all facets that have been determined by the GET parameters. Args: params: Contains the categories/facets all_categories: Returns: dict: Contains all sorted facets """ ret_arr = {} iso_cat = params.get("isoCategories", "") custom_cat = params.get("customCategories", "") inspire_cat = params.get("inspireThemes", "") org_cat = params.get("registratingDepartments", "") # resolve ids by iterating all_categories all_iso_cat = all_categories[0] all_inspire_cat = all_categories[1] all_custom_cat = all_categories[2] all_org_cat = all_categories[3] iso_preselect = __resolve_single_facet(iso_cat, all_iso_cat) inspire_preselect = __resolve_single_facet(inspire_cat, all_inspire_cat) custom_preselect = __resolve_single_facet(custom_cat, all_custom_cat) org_preselect = __resolve_single_facet(org_cat, all_org_cat) if len(iso_preselect) > 0: ret_arr["ISO 19115"] = iso_preselect if len(inspire_preselect) > 0: ret_arr["INSPIRE"] = inspire_preselect if len(custom_preselect) > 0: ret_arr["Custom"] = custom_preselect if len(org_preselect) > 0: ret_arr["Organizations"] = org_preselect return ret_arr
878f3ef05aaaa643782c6d50c6796bc503c0f8e6
3,646,116
def has_joined(*args: list, **kwargs) -> str: """ Validates the user's joining the channel after being required to join. :param args: *[0] -> first name :param kwargs: :return: Generated validation message """ first_name = args[0] text = f"{_star_struck}{_smiling_face_with_heart} بسیار خب " \ f"<b>{first_name}</b> " \ f", حالا تمام دسترسی ها رو داری{_party_popper}{_confetti_ball}\n\n" \ f"تبریک از طرف @chromusic_fa {_red_heart}\n" \ f"با خیال راحت هر فایل صوتی رو سرچ کن {_face_blowing_a_kiss}" return text
d446da88a362c3821e25d0bee0e110ec0a906423
3,646,117
def depth_residual_regresssion_subnet(x, flg, regular, subnet_num): """Build a U-Net architecture""" """ Args: x is the input, 4-D tensor (BxHxWxC) flg represent weather add the BN regular represent the regularizer number Return: output is 4-D Tensor (BxHxWxC) """ pref = 'depth_regression_subnet_' + str(subnet_num) + '_' # whether to train flag train_ae = flg # define initializer for the network keys = ['conv', 'upsample'] keys_avoid = ['OptimizeLoss'] inits = [] init_net = None if init_net != None: for name in init_net.get_variable_names(): # select certain variables flag_init = False for key in keys: if key in name: flag_init = True for key in keys_avoid: if key in name: flag_init = False if flag_init: name_f = name.replace('/', '_') num = str(init_net.get_variable_value(name).tolist()) # self define the initializer function exec( "class " + name_f + "(Initializer):\n def __init__(self,dtype=tf.float32): self.dtype=dtype \n def __call__(self,shape,dtype=None,partition_info=None): return tf.cast(np.array(" + num + "),dtype=self.dtype)\n def get_config(self):return {\"dtype\": self.dtype.name}") inits.append(name_f) # autoencoder n_filters = [ 128, 96, 64, 32, 16, 1, ] filter_sizes = [ 3, 3, 3, 3, 3, 3, ] pool_sizes = [ \ 1, 1, 1, 1, 1, 1, ] pool_strides = [ 1, 1, 1, 1, 1, 1, ] skips = [ \ False, False, False, False, False, False, ] # change space ae_inputs = tf.identity(x, name='ae_inputs') # prepare input current_input = tf.identity(ae_inputs, name="input") #################################################################################################################### # convolutional layers: depth regression feature = [] for i in range(0, len(n_filters)): name = pref + "conv_" + str(i) # define the initializer if name + '_bias' in inits: bias_init = eval(name + '_bias()') else: bias_init = tf.zeros_initializer() if name + '_kernel' in inits: kernel_init = eval(name + '_kernel()') else: kernel_init = None if i == (len(n_filters) - 1): activation = None else: activation = relu # convolution current_input = tf.layers.conv2d( inputs=current_input, filters=n_filters[i], kernel_size=[filter_sizes[i], filter_sizes[i]], padding="same", activation=activation, trainable=train_ae, kernel_initializer=kernel_init, bias_initializer=bias_init, name=name, ) if pool_sizes[i] == 1 and pool_strides[i] == 1: feature.append(current_input) else: feature.append( tf.layers.max_pooling2d( \ inputs=current_input, pool_size=[pool_sizes[i], pool_sizes[i]], strides=pool_strides[i], name=pref + "pool_" + str(i) ) ) current_input = feature[-1] depth_coarse = tf.identity(feature[-1], name='depth_coarse_output') return depth_coarse
2c5d2cb03f60acc92f981d108b791a0e1215f5f6
3,646,118
def dist2(x, c): """ Calculates squared distance between two sets of points. Parameters ---------- x: numpy.ndarray Data of shape `(ndata, dimx)` c: numpy.ndarray Centers of shape `(ncenters, dimc)` Returns ------- n2: numpy.ndarray Squared distances between each pair of data from x and c, of shape `(ndata, ncenters)` """ assert x.shape[1] == c.shape[1], \ 'Data dimension does not match dimension of centers' x = np.expand_dims(x, axis=0) # new shape will be `(1, ndata, dimx)` c = np.expand_dims(c, axis=1) # new shape will be `(ncenters, 1, dimc)` # We will now use broadcasting to easily calculate pairwise distances n2 = np.sum((x - c) ** 2, axis=-1) return n2
24a1b9a368d2086a923cd656923dc799726ed7f0
3,646,119
def process_name(i: int, of: int) -> str: """Return e.g. '| | 2 |': an n-track name with track `i` (here i=2) marked. This makes it easy to follow each process's log messages, because you just go down the line until you encounter the same number again. Example: The interleaved log of four processes that each simulate a car visiting a charging station. The processes have been named with `process_name()`, and their log messages start with their `self.name`. (Car #2 does not turn up in this snippet.) | | | 3 arriving at 6 | 1 | | starting to charge at 7 0 | | | starting to charge at 7 | 1 | | leaving the bcs at 9 """ lines = ["|"] * of lines[i] = str(i) return " ".join(lines)
bc3e0d06544b61249a583b6fa0a010ec917c0428
3,646,120
def cards_db(db): """ CardsDB object that's empty. """ db.delete_all() return db
9649b309990325eca38ed89c6e9d499b41786dab
3,646,121
def _geo_connected(geo, rxn): """ Assess if geometry is connected. Right now only works for minima """ # Determine connectivity (only for minima) if rxn is not None: gra = automol.geom.graph(geo) conns = automol.graph.connected_components(gra) lconns = len(conns) else: lconns = 1 # Check connectivity if lconns == 1: connected = True else: ioprinter.bad_conformer('disconnected') connected = False return connected
d5993b7083703746214e70d6d100857da99c6c02
3,646,122
def scale_to_range(image, dest_range=(0,1)): """ Scale an image to the given range. """ return np.interp(image, xp=(image.min(), image.max()), fp=dest_range)
a225b44dc05d71d8ccc380d26fb61d96116414da
3,646,123
def files(): """Hypothesis strategy for generating objects pyswagger can use as file handles to populate `file` format parameters. Generated values take the format: `dict('data': <file object>)`""" return file_objects().map(lambda x: {"data": x})
04e787502a043ffba08912724c9e29f84a6a416c
3,646,124
from typing import Optional from typing import Generator def get_histograms( query: Optional[str] = None, delta: Optional[bool] = None ) -> Generator[dict, dict, list[Histogram]]: """Get Chrome histograms. Parameters ---------- query: Optional[str] Requested substring in name. Only histograms which have query as a substring in their name are extracted. An empty or absent query returns all histograms. delta: Optional[bool] If true, retrieve delta since last call. Returns ------- histograms: list[Histogram] Histograms. **Experimental** """ response = yield { "method": "Browser.getHistograms", "params": filter_none({"query": query, "delta": delta}), } return [Histogram.from_json(h) for h in response["histograms"]]
059792d045bdea84ff636a27de9a1d9812ae4c24
3,646,125
def energy_decay_curve_chu_lundeby( data, sampling_rate, freq='broadband', noise_level='auto', is_energy=False, time_shift=True, channel_independent=False, normalize=True, plot=False): """ This function combines Chu's and Lundeby's methods: The estimated noise level is subtracted before backward integration, the impulse response is truncated at the intersection time, and the correction for the truncation is applied [1, 2, 3]_ Parameters ---------- data : ndarray, double The room impulse response with dimension [..., n_samples] sampling_rate: integer The sampling rate of the room impulse response. freq: integer OR string The frequency band. If set to 'broadband', the time window of the Lundeby-algorithm will not be set in dependence of frequency. noise_level: ndarray, double OR string If not specified, the noise level is calculated based on the last 10 percent of the RIR. Otherwise specify manually for each channel as array. is_energy: boolean Defines, if the data is already squared. time_shift : boolean Defines, if the silence at beginning of the RIR should be removed. channel_independent : boolean Defines, if the time shift and normalizsation is done channel-independently or not. normalize : boolean Defines, if the energy decay curve should be normalized in the end or not. plot: Boolean Specifies, whether the results should be visualized or not. Returns ------- energy_decay_curve: ndarray, double Returns the noise handeled edc. References ---------- .. [1] Lundeby, Virgran, Bietz and Vorlaender - Uncertainties of Measurements in Room Acoustics - ACUSTICA Vol. 81 (1995) .. [2] W. T. Chu. “Comparison of reverberation measurements using Schroeder’s impulse method and decay-curve averaging method”. In: Journal of the Acoustical Society of America 63.5 (1978), pp. 1444–1450. .. [3] M. Guski, “Influences of external error sources on measurements of room acoustic parameters,” 2015. """ energy_data, n_channels, data_shape = preprocess_rir( data, is_energy=is_energy, time_shift=time_shift, channel_independent=channel_independent) n_samples = energy_data.shape[-1] subtraction = subtract_noise_from_squared_rir( energy_data, noise_level=noise_level) intersection_time, late_reverberation_time, noise_level = \ intersection_time_lundeby( energy_data, sampling_rate=sampling_rate, freq=freq, initial_noise_power=noise_level, is_energy=True, time_shift=False, channel_independent=False, plot=False) time_vector = smooth_rir(energy_data, sampling_rate)[2] energy_decay_curve = np.zeros([n_channels, n_samples]) for idx_channel in range(0, n_channels): intersection_time_idx = np.argmin(np.abs( time_vector - intersection_time[idx_channel])) if noise_level == 'auto': p_square_at_intersection = estimate_noise_energy( energy_data[idx_channel], is_energy=True) else: p_square_at_intersection = noise_level[idx_channel] # calculate correction term according to DIN EN ISO 3382 correction = (p_square_at_intersection * late_reverberation_time[idx_channel] * (1 / (6*np.log(10))) * sampling_rate) energy_decay_curve[idx_channel, :intersection_time_idx] = \ ra.schroeder_integration( subtraction[idx_channel, :intersection_time_idx], is_energy=True) energy_decay_curve[idx_channel] += correction if normalize: # Normalize the EDC... if not channel_independent: # ...by the first element of each channel. energy_decay_curve = (energy_decay_curve.T / energy_decay_curve[..., 0]).T else: # ...by the maximum first element of each channel. max_start_value = np.amax(energy_decay_curve[..., 0]) energy_decay_curve /= max_start_value energy_decay_curve[..., intersection_time_idx:] = np.nan if plot: plt.figure(figsize=(15, 3)) plt.subplot(131) plt.plot(time_vector, 10*np.log10(energy_data.T)) plt.xlabel('Time [s]') plt.ylabel('Squared IR [dB]') plt.subplot(132) plt.plot(time_vector, 10*np.log10(subtraction.T)) plt.xlabel('Time [s]') plt.ylabel('Noise subtracted IR [dB]') plt.subplot(133) plt.plot(time_vector[0:energy_decay_curve.shape[-1]], 10*np.log10( energy_decay_curve.T)) plt.xlabel('Time [s]') plt.ylabel('Tr. EDC with corr. & subt. [dB]') plt.tight_layout() # Recover original data shape: energy_decay_curve = np.reshape(energy_decay_curve, data_shape) energy_decay_curve = np.squeeze(energy_decay_curve) return energy_decay_curve
3e9711fbc47442a27fc339b3fb18ad6f21a44c91
3,646,126
def sinkhorn( p, q, metric="euclidean", ): """ Returns the earth mover's distance between two point clouds Parameters ---------- cloud1 : 2-D array First point cloud cloud2 : 2-D array Second point cloud Returns ------- distance : float The distance between the two point clouds """ p_weights = np.ones(len(p)) / len(p) q_weights = np.ones(len(q)) / len(q) pairwise_dist = np.ascontiguousarray( pairwise_distances(p, Y=q, metric=metric, n_jobs=-1) ) result = pot.sinkhorn2( p_weights, q_weights, pairwise_dist, reg=0.05, numItermax=100, return_matrix=False, ) return np.sqrt(result)
93a4eb2383cfc4a2f462daf1b984d773c339aee7
3,646,127
def generate_s3_events(cluster_name, cluster_dict, config): """Add the S3 Events module to the Terraform cluster dict. Args: cluster_name (str): The name of the currently generating cluster cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster. config (dict): The loaded config from the 'conf/' directory Returns: bool: Result of applying the s3_events module """ s3_event_buckets = config['clusters'][cluster_name]['modules']['s3_events'] generate_s3_events_by_bucket(cluster_name, cluster_dict, config, s3_event_buckets) return True
93af74bcd9b0c16fdfd1a3495bb709d84edb10a6
3,646,128
def cluster_seg(bt, seg_list, radius): """ Fetch segments which align themself for a given tolerance. """ cluster, seen_ix = [], set() for i, seg in enumerate(seg_list): if i not in seen_ix: sim_seg_ix = list(bt.query_radius([seg], radius)[0]) seen_ix |= set(sim_seg_ix) cluster.append(sim_seg_ix) return _find_connected_components(cluster)
37308331b41cd7d1e1b8717bf4c0d5a4ced55385
3,646,129