content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_bin_values(base_dataset, bin_value): """Gets the values to be used when sorting into bins for the given dataset, from the configured options.""" values = None if bin_value == "results": values = base_dataset.get_output() elif bin_value == "all": # We set all values to 0, assuming single bin will also set its value to 0. values = [0] * base_dataset.get_number_of_samples() else: raise Exception(f"Invalid bin value configured: {bin_value}") return values
cf2419066d6e642e65d9a8747081ebfee417ed64
3,643,784
def get_reviews(revision_range): """Returns the list of reviews found in the commits in the revision range. """ log = check_output(['git', '--no-pager', 'log', '--no-color', '--reverse', revision_range]).strip() review_ids = [] for line in log.split('\n'): pos = line.find('Review: ') if pos != -1: pattern = re.compile('Review: ({url})$'.format( url=os.path.join(REVIEWBOARD_URL, 'r', '[0-9]+'))) match = pattern.search(line.strip().strip('/')) if match is None: print "\nInvalid ReviewBoard URL: '{}'".format(line[pos:]) sys.exit(1) url = match.group(1) review_ids.append(os.path.basename(url)) return review_ids
0ff81eef45fb123e25dc7662f320e49fac7aa378
3,643,785
def create_cert_req(keyType=crypto.TYPE_RSA, bits=1024, messageDigest="md5"): """ Create certificate request. Returns: certificate request PEM text, private key PEM text """ # Create certificate request req = crypto.X509Req() # Generate private key pkey = crypto.PKey() pkey.generate_key(keyType, bits) req.set_pubkey(pkey) req.sign(pkey, messageDigest) return (crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req), crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
168fd8c7cde30730cdc9e74e5fbf7619783b29c9
3,643,786
def large_xyz_to_lab_star(large_xyz, white=const_d50_large_xyz): """ # 概要 L*a*b* から XYZ値を算出する # 入力データ numpy形式。shape = (N, M, 3) # 参考 https://en.wikipedia.org/wiki/Lab_color_space """ if not common.is_img_shape(large_xyz): raise TypeError('large_xyz shape must be (N, M, 3)') x, y, z = np.dsplit(large_xyz, 3) white = [x / white[1] for x in white] l = 116 * _func_t(y/white[1]) - 16 a = 500 * (_func_t(x/white[0]) - _func_t(y/white[1])) b = 200 * (_func_t(y/white[1]) - _func_t(z/white[2])) return np.dstack((l, a, b))
aec3cb423698954aa07a61bf484e1acd8e38d5db
3,643,787
from typing import Any def return_value(value: Any) -> ObservableBase: """Returns an observable sequence that contains a single element, using the specified scheduler to send out observer messages. There is an alias called 'just'. example res = rx.Observable.return(42) res = rx.Observable.return(42, rx.Scheduler.timeout) Keyword arguments: value -- Single element in the resulting observable sequence. Returns an observable sequence containing the single specified element. """ def subscribe(observer, scheduler=None): scheduler = scheduler or current_thread_scheduler def action(scheduler, state=None): observer.on_next(value) observer.on_completed() return scheduler.schedule(action) return AnonymousObservable(subscribe)
e14ac3a08a3f127b77f57b7192a8f362ec3485b2
3,643,788
def compare_policies(current_policy, new_policy): """ Compares the existing policy and the updated policy Returns True if there is a difference between policies. """ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
e69ecaa051602e2d9eab0695f62b391a9aca17ad
3,643,789
def meanPSD(d0,win=np.hanning,dx=1.,axis=0,irregular=False,returnInd=False,minpx=10): """Return the 1D PSD averaged over a surface. Axis indicates the axis over which to FFT If irregular is True, each slice will be stripped and then the power spectra interpolated to common frequency grid Presume image has already been interpolated internally If returnInd is true, return array of power spectra Ignores slices with less than minpx non-nans """ #Handle which axis is transformed if axis==0: d0 = np.transpose(d0) #Create list of slices if irregular is True: d0 = [stripnans(di) for di in d0] else: d0 = [di for di in d0] #Create power spectra from each slice pows = [realPSD(s,win=win,dx=dx,minpx=minpx) for s in d0 \ if np.sum(~np.isnan(s)) >= minpx] #Interpolate onto common frequency grid of shortest slice if irregular is True: #Determine smallest frequency grid ln = [len(s[0]) for s in pows] freq = pows[np.argmin(ln)][0] #Interpolate pp = [griddata(p[0],p[1],freq) for p in pows] else: pp = [p[1] for p in pows] freq = pows[0][0] #Average pa = np.mean(pp,axis=0) if returnInd is True: return freq,pp return freq,pa
99d6ab3e8ef505f031346db10762a195904b455e
3,643,790
async def get_temperatures(obj): """Get temperatures as read by the thermostat.""" return await obj["madoka"].temperatures.query()
b4643d9c40f6aa8953c598dd572d291948ef34a4
3,643,791
import itertools def get_zero_to_2pi_input(label, required, placeholder=None, initial=None, validators=()): """ Method to get a custom positive float number field :param label: String label of the field :param required: Boolean to define whether the field is required or not :param placeholder: Placeholder to appear in the field :param initial: Default input value for the field :param validators: validators that should be attached with the field :return: A custom floating number field that accepts only numbers greater than zero and less than 2pi(Math.pi) """ default_validators = [validate_positive_float, validate_less_than_2pi, ] return CustomFloatField( label=label, required=required, initial=initial, placeholder=placeholder, validators=list(itertools.chain(default_validators, validators)), )
d1349088d8b2c29ecc07bdb6900ff335384e3c30
3,643,792
def compile_math(math): """ Compile a mathematical expression Args: math (:obj:`str`): mathematical expression Returns: :obj:`_ast.Expression`: compiled expression """ math_node = evalidate.evalidate(math, addnodes=[ 'Eq', 'NotEq', 'Gt', 'Lt', 'GtE', 'LtE', 'Sub', 'Mult', 'Div' 'Pow', 'And', 'Or', 'Not', 'BitAnd', 'BitOr', 'BitXor', 'Call', ], funcs=MATHEMATICAL_FUNCTIONS.keys()) compiled_math = compile(math_node, '<math>', 'eval') return compiled_math
511c281a03591ed5b84e216f3edb1503537cbb86
3,643,793
from typing import Optional from typing import Union from typing import List import click def colfilter( data, skip: Optional[Union[str, List[str]]] = None, only: Optional[Union[str, List[str]]] = None, ): """ Remove some variables (skip) or keep only certain variables (only) Parameters ---------- data: pd.DataFrame The DataFrame to be processed and returned skip: str, list or None (default is None) List of variables to remove only: str, list or None (default is None) List of variables to keep Returns ------- data: pd.DataFrame The filtered DataFrame Examples -------- >>> import clarite >>> female_logBMI = clarite.modify.colfilter(nhanes, only=['BMXBMI', 'female']) ================================================================================ Running colfilter -------------------------------------------------------------------------------- Keeping 2 of 945 variables: 0 of 0 binary variables 0 of 0 categorical variables 2 of 945 continuous variables 0 of 0 unknown variables ================================================================================ """ boolean_keep = _validate_skip_only(data, skip, only) dtypes = _get_dtypes(data) click.echo(f"Keeping {boolean_keep.sum():,} of {len(data.columns):,} variables:") for kind in ["binary", "categorical", "continuous", "unknown"]: is_kind = dtypes == kind is_kept = is_kind & boolean_keep click.echo(f"\t{is_kept.sum():,} of {is_kind.sum():,} {kind} variables") return data.loc[:, boolean_keep]
16c901f514afb1990e43c470c7e089eab5b4eb56
3,643,794
import math def acos(x): """ """ return math.acos(x)
0a8ca8f716f0ea54b558ca27021830480dac662d
3,643,795
def get_callable_from_string(f_name): """Takes a string containing a function name (optionally module qualified) and returns a callable object""" try: mod_name, func_name = get_mod_func(f_name) if mod_name == "" and func_name == "": raise AttributeError("%s couldn't be converted to a module or function name" % f_name) module = __import__(mod_name) if func_name == "": func_name = mod_name # The common case is an eponymous class return getattr(module, func_name) except (ImportError, AttributeError), exc: raise RuntimeError("Unable to create a callable object for '%s': %s" % (f_name, exc))
ef1ae8d4c1da06e38a6029e0caa51b4e3fb5b95c
3,643,796
from typing import List import bisect def binary_get_bucket_for_node(buckets: List[KBucket], node: Node) -> KBucket: """Given a list of ordered buckets, returns the bucket for a given node.""" bucket_ends = [bucket.end for bucket in buckets] bucket_position = bisect.bisect_left(bucket_ends, node.id) # Prevents edge cases where bisect_left returns an out of range index try: bucket = buckets[bucket_position] assert bucket.start <= node.id <= bucket.end return bucket except (IndexError, AssertionError): raise ValueError("No bucket found for node with id {}".format(node.id))
ff1fc765c56e67af3c33798b403779f7aafb6bb0
3,643,797
def darken(color, factor=0.7): """Return darkened color as a ReportLab RGB color. Take a passed color and returns a Reportlab color that is darker by the factor indicated in the parameter. """ newcol = color_to_reportlab(color) for a in ["red", "green", "blue"]: setattr(newcol, a, factor * getattr(newcol, a)) return newcol
bcb937409a6790c6ac04a1550654e9b4fc398f9f
3,643,798
def fetch_all_tiles(session): """Fetch all tiles.""" return session.query(Tile).all()
15e21dff372859ad07f76d97944b9a002f44a35e
3,643,799
def transaction_update_spents(txs, address): """ Update spent information for list of transactions for a specific address. This method assumes the list of transaction complete and up-to-date. This methods loops through all the transaction and update all transaction outputs for given address, checks if the output is spent and add the spending transaction ID and index number to the outputs. The same list of transactions with updates outputs will be returned :param txs: Complete list of transactions for given address :type txs: list of Transaction :param address: Address string :type address: str :return list of Transaction: """ spend_list = {} for t in txs: for inp in t.inputs: if inp.address == address: spend_list.update({(inp.prev_txid.hex(), inp.output_n_int): t}) address_inputs = list(spend_list.keys()) for t in txs: for to in t.outputs: if to.address != address: continue spent = True if (t.txid, to.output_n) in address_inputs else False txs[txs.index(t)].outputs[to.output_n].spent = spent if spent: spending_tx = spend_list[(t.txid, to.output_n)] spending_index_n = \ [inp for inp in txs[txs.index(spending_tx)].inputs if inp.prev_txid.hex() == t.txid and inp.output_n_int == to.output_n][0].index_n txs[txs.index(t)].outputs[to.output_n].spending_txid = spending_tx.txid txs[txs.index(t)].outputs[to.output_n].spending_index_n = spending_index_n return txs
6ac33306cafd5c75b37e73c405fff4bcc732226f
3,643,800
def count_tilings(n: int) -> int: """Returns the number of unique ways to tile a row of length n >= 1.""" if n < 5: # handle recursive base case return 2**(n - 1) else: # place each tile at end of row and recurse on remainder return (count_tilings(n - 1) + count_tilings(n - 2) + count_tilings(n - 3) + count_tilings(n - 4))
70f9caa9a27c65c73862dd8c415d93f5a7122632
3,643,801
import math def _meters_per_pixel(zoom, lat=0.0, tilesize=256): """ Return the pixel resolution for a given mercator tile zoom and lattitude. Parameters ---------- zoom: int Mercator zoom level lat: float, optional Latitude in decimal degree (default: 0) tilesize: int, optional Mercator tile size (default: 256). Returns ------- Pixel resolution in meters """ return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / ( tilesize * 2 ** zoom )
467d23bd437f153345c67c8c1cab1a086fde4995
3,643,802
import time import random def _generate_submit_id(): """Generates a submit id in form of <timestamp>-##### where ##### are 5 random digits.""" timestamp = int(time()) return "%d-%05d" % (timestamp, random.randint(0, 99999))
285c975e626f0ef1ffe9482432c70b981c9bdea7
3,643,803
def draw_from_simplex(ndim: int, nsample: int = 1) -> np.ndarray: """Draw uniformly from an n-dimensional simplex. Args: ndim: Dimensionality of simplex to draw from. nsample: Number of samples to draw from the simplex. Returns: A matrix of shape (nsample, ndim) that sums to one along axis 1. """ if ndim < 1: raise ValueError("Cannot generate less than 1D samples") if nsample < 1: raise ValueError("Generating less than one sample doesn't make sense") rand = np.random.uniform(size=(nsample, ndim-1)) unsorted = np.concatenate( [np.zeros(shape=(nsample,1)), rand, np.ones(shape=(nsample,1))], axis=1 ) sorted = np.sort(unsorted, axis=1) diff_arr = np.concatenate([[-1., 1.], np.zeros(ndim-1)]) diff_mat = np.array([np.roll(diff_arr, i) for i in range(ndim)]).T res = sorted @ diff_mat return res
8dac53212a7ccdab7ed9e6cbbffdf437442de393
3,643,804
def manhattanDistance( xy1, xy2 ): """Returns the Manhattan distance between points xy1 and xy2""" return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
ce0ee21237f253b1af33fbf088292405fd046fe3
3,643,805
import math def Linear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return nn.utils.weight_norm(m)
38decbeda35ef9a6ab5d1397af224b77d49b3342
3,643,806
def homogeneous_type(obj): """ Checks that the type is "homogeneous" in that all lists are of objects of the same type, etc. """ return same_types(obj, obj)
e44a29de0651175f543cb9dc0d64a01e5a495e42
3,643,807
def crosscorr(f, g): """ Takes two vectors of the same size, subtracts the vector elements by their respective means, and passes one over the other to construct a cross-correlation vector """ N = len(f) r = np.array([], dtype=np.single) r1 = np.array([], dtype=np.single) r2 = np.array([], dtype=np.single) f = f - np.mean(f) g = g - np.mean(g) for i in range(N-1): r1i = np.dot(f[N-i-1:N], g[0:i+1]) r2i = np.dot(f[0:N-i-1], g[i+1:N]) r1 = np.append(r1, r1i) r2 = np.append(r2, r2i) r = np.append(r, r1) r = np.append(r, np.dot(f, g)) r = np.append(r, r2) return r/N
6a4fec358404b7ca4f1df764c38518d39f635ed9
3,643,808
def nearest_neighbors(point_cloud_A, point_cloud_B, alg='knn'): """Find the nearest (Euclidean) neighbor in point_cloud_B (model) for each point in point_cloud_A (data). Parameters ---------- point_cloud_A: Nx3 numpy array data points point_cloud_B: Mx3 numpy array model points Returns ------- distances: (N, ) numpy array Euclidean distances from each point in point_cloud_A to its nearest neighbor in point_cloud_B. indices: (N, ) numpy array indices in point_cloud_B of each point_cloud_A point's nearest neighbor - these are the c_i's """ assert 3 == point_cloud_A.shape[1] and 3 == point_cloud_B.shape[1] n, m = point_cloud_A.shape[0], point_cloud_B.shape[0] assert n == m distances = np.zeros(n) indices = np.zeros(n) if alg == 'knn': nbrs = NearestNeighbors(n_neighbors=1).fit(point_cloud_B) d, ids = nbrs.kneighbors(point_cloud_A) distances = np.array(d).flatten() indices = np.array(ids).flatten() elif alg == 'hungarian': cost = np.zeros((n, m)) for i, j in product(range(n), range(m)): cost[i,j] = norm(point_cloud_A[i,:]- point_cloud_B[j,:]) row_ids, indices = linear_sum_assignment(cost) distances = cost[row_ids, indices] else: raise NotImplementedError('NN algorithm must be one of: {}'.format(NN_ALGS)) return distances, indices
0849c372c6358ded16c7907631a3bdd3c53385c6
3,643,809
def us_1040(form_values, year="latest"): """Compute US federal tax return.""" _dispatch = { "latest": (ots_2020.us_main, data.US_1040_2020), "2020": (ots_2020.us_main, data.US_1040_2020), "2019": (ots_2019.us_main, data.US_1040_2019), "2018": (ots_2018.us_main, data.US_1040_2018), "2017": (ots_2017.us_main, data.US_1040_2017), } main_fn, schema = _dispatch[str(year)] return helpers.parse_ots_return( main_fn(helpers.generate_ots_return(form_values, schema["input_wrap"])), schema["output_wrap"], )
8056ea5dfae8698dd1e695b96680251f1fb45b63
3,643,810
def resolve_service_deps(services: list) -> dict: """loop through services and handle needed_by""" needed_by = {} for name in services: service = services.get(name) needs = service.get_tasks_needed_by() for need, provides in needs.items(): needed_by[need] = list(set(needed_by.get(need, []) + provides)) for name in services: service = services.get(name) service.update_task_requires(needed_by) return services
4979d24aa6105579c3208f2953f8bdc276ad127b
3,643,811
def rolling_window(series, window_size): """ Transforms an array of series into an array of sliding window arrays. If the passed in series is a matrix, each column will be transformed into an array of sliding windows. """ return np.array( [ series[i : (i + window_size)] for i in range(0, series.shape[0] - window_size + 1) ] )
dfa95d12f287aeeb2f328919979376c0c890c0eb
3,643,812
def ldns_key_set_inception(*args): """LDNS buffer.""" return _ldns.ldns_key_set_inception(*args)
0411dd40b6d61740d872f1e4ac4f50683540de57
3,643,813
def verifyIP(ip): """Verifies an IP is valid""" try: #Split ip and integer-ize it octets = [int(x) for x in ip.split('.')] except ValueError: return False #First verify length if len(octets) != 4: return False #Then check octet values for octet in octets: if octet < 0 or octet > 255: return False return True
72c373099a75adb2a1e776c863b6a2d1cb2698df
3,643,814
from datetime import datetime def get_datetime_now(t=None, fmt='%Y_%m%d_%H%M_%S'): """Return timestamp as a string; default: current time, format: YYYY_DDMM_hhmm_ss.""" if t is None: t = datetime.now() return t.strftime(fmt)
c4fc830b7ede9d6f52ee81c014c03bb2ef5552dc
3,643,815
def is_firstline(text, medicine, disease): """Detect if first-line treatment is mentioned with a medicine in a sentence. Use keyword matching to detect if the keywords "first-line treatment" or "first-or second-line treatment", medicine name, and disease name all appear in the sentence. Parameters ---------- text : str A single sentence. medicine : str A medicine's name. Returns ------- bool Return True if the medicine and first-line treatment are mentioned in the sentence, False otherwise. Examples -------- Import the module >>> from biomarker_nlp import biomarker_extraction Example >>> txt = "TECENTRIQ, in combination with carboplatin and etoposide, is indicated for the first-line treatment of adult patients with extensive-stage small cell lung cancer (ES-SCLC)." >>> medicine = "TECENTRIQ" >>> disease = "small cell lung cancer" >>> biomarker_extraction.is_firstline(text = txt, medicine = medicine, disease = disease) True """ text = text.lower() medicine = medicine.lower() disease = disease.lower() if medicine in text and ('first-line treatment' in text or 'first-or second-line treatment' in text) and disease in text: return True else: return False
c9f8a31c6089c4f7545780028ccb1a033372c284
3,643,816
def mac_address(addr): """ mac_address checks that a given string is in MAC address format """ mac = addr.upper() if not _mac_address_pattern.fullmatch(mac): raise TypeError('{} does not match a MAC address pattern'.format(addr)) return mac
201d32bd73f50c2818feef7c9c9be5371739dfcf
3,643,817
def py3_classifiers(): """Fetch the Python 3-related trove classifiers.""" url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers' response = urllib_request.urlopen(url) try: try: status = response.status except AttributeError: #pragma: no cover status = response.code if status != 200: #pragma: no cover msg = 'PyPI responded with status {0} for {1}'.format(status, url) raise ValueError(msg) data = response.read() finally: response.close() classifiers = data.decode('utf-8').splitlines() base_classifier = 'Programming Language :: Python :: 3' return (classifier for classifier in classifiers if classifier.startswith(base_classifier))
70e769811758bef05a9e3d8722eca13808acd514
3,643,818
def match(i, j): """ returns (red, white) count, where red is matches in color and position, and white is a match in color but not position """ red_count = 0 # these are counts only of the items that are not exact matches i_colors = [0]*6 j_colors = [0]*6 for i_c, j_c in zip(color_inds(i), color_inds(j)): if i_c == j_c: red_count += 1 else: i_colors[i_c] += 1 j_colors[j_c] += 1 white_count = 0 for i_c, j_c in zip(i_colors, j_colors): white_count += min(i_c, j_c) return (red_count, white_count)
06ddf17b6de367cd9158a33834431f3bc1c9e821
3,643,819
def time_delay_runge_kutta_4(fun, t_0, y_0, tau, history=None, steps=1000, width=1): """ apply the classic Runge Kutta method to a time delay differential equation f: t, y(t), y(t-tau) -> y'(t) """ width = float(width) if not isinstance(y_0, np.ndarray): y_0 = np.ones((1,), dtype=np.float)*y_0 dim = len(y_0) hist_steps = np.floor(tau/width) assert tau/width == hist_steps, "tau must be a multiple of width" hist_steps = int(hist_steps) if history is None: history = np.zeros((hist_steps, dim), dtype=np.float) else: assert len(history) == hist_steps fun_eval = np.zeros((steps+1+hist_steps, dim), dtype=y_0.dtype) fun_eval[:hist_steps] = history fun_eval[hist_steps] = y_0 for step in range(steps): k_1 = fun(t_0, y_0, fun_eval[step]) k_2 = fun(t_0 + width/2, y_0 + width/2*k_1, fun_eval[step]) k_3 = fun(t_0 + width/2, y_0 + width/2*k_2, fun_eval[step]) k_4 = fun(t_0 + width, y_0 + width*k_3, fun_eval[step]) t_0 += width y_0 += width*(k_1 + 2*k_2 + 2*k_3 + k_4)/6 fun_eval[step+1+hist_steps] = y_0 return fun_eval[hist_steps:]
02905a447e07857fdacc4c6b3e34ddf15726b141
3,643,820
def Vstagger_to_mass(V): """ V are the data on the top and bottom of a grid box A simple conversion of the V stagger grid to the mass points. Calculates the average of the top and bottom value of a grid box. Looping over all rows reduces the staggered grid to the same dimensions as the mass point. Useful for converting V, XLAT_V, and XLONG_V to masspoints Differnce between XLAT_V and XLAT is usually small, on order of 10e-5 (row_j1+row_j2)/2 = masspoint_inrow Input: Vgrid with size (##+1, ##) Output: V on mass points with size (##,##) """ # create the first column manually to initialize the array with correct dimensions V_masspoint = (V[0,:]+V[1,:])/2. # average of first and second column V_num_rows = int(V.shape[0])-1 # we want one less row than we have # Loop through the rest of the rows # We want the same number of rows as we have columns. # Take the first and second row, average them, and store in first row in V_masspoint for row in range(1,V_num_rows): row_avg = (V[row,:]+V[row+1,:])/2. # Stack those onto the previous for the final array V_masspoint = np.row_stack((V_masspoint,row_avg)) return V_masspoint
f3dbb75506f05acb9f65ff0fe0335f4fe139127b
3,643,821
import base64 def verify_l4_block_pow(hash_type: SupportedHashes, block: "l4_block_model.L4BlockModel", complexity: int = 8) -> bool: """Verify a level 4 block with proof of work scheme Args: hash_type: SupportedHashes enum type block: L4BlockModel with appropriate data to verify Returns: Boolean if valid hashed block with appropriate nonce """ # Get hash for PoW calculation to compare hash_bytes = hash_l4_block(hash_type, block, block.nonce) # Make sure it matches complexity requirements if not check_complexity(hash_bytes, complexity): return False # Check that the hash bytes match what the block provided return hash_bytes == base64.b64decode(block.proof)
301ea1c4e74ae34fb61610a7e614ac1af437a6c3
3,643,822
def file_reader(file_name): """file_reader""" data = None with open(file_name, "r") as f: for line in f.readlines(): data = eval(line) f.close() return data
6d3d63840cc48ccfdd5beefedf0d3a60c0f44cf9
3,643,824
def check_auth(username, password): """This function is called to check if a username / password combination is valid. """ account = model.authenticate(username, password) if account is None: return AuthResponse.no_account if not model.hasAssignedBlock(account): return AuthResponse.no_block return AuthResponse.success
5c735f354ed56a5bc3960de96a76eacbc5a3bdd1
3,643,826
def plot_energy_ratio( reference_power_baseline, test_power_baseline, wind_speed_array_baseline, wind_direction_array_baseline, reference_power_controlled, test_power_controlled, wind_speed_array_controlled, wind_direction_array_controlled, wind_direction_bins, confidence=95, n_boostrap=None, wind_direction_bin_p_overlap=None, axarr=None, base_color="b", con_color="g", label_array=None, label_pchange=None, plot_simple=False, plot_ratio_scatter=False, marker_scale=1.0, show_count=True, hide_controlled_case=False, ls="--", marker=None, ): """ Plot the balanced energy ratio. Function mainly acts as a wrapper to call calculate_balanced_energy_ratio and plot the results. Args: reference_power_baseline (np.array): Array of power of reference turbine in baseline conditions. test_power_baseline (np.array): Array of power of test turbine in baseline conditions. wind_speed_array_baseline (np.array): Array of wind speeds in baseline conditions. wind_direction_array_baseline (np.array): Array of wind directions in baseline case. reference_power_controlled (np.array): Array of power of reference turbine in controlled conditions. test_power_controlled (np.array): Array of power of test turbine in controlled conditions. wind_speed_array_controlled (np.array): Array of wind speeds in controlled conditions. wind_direction_array_controlled (np.array): Array of wind directions in controlled case. wind_direction_bins (np.array): Wind directions bins. confidence (int, optional): Confidence level to use. Defaults to 95. n_boostrap (int, optional): Number of bootstaps, if none, _calculate_bootstrap_iterations is called. Defaults to None. wind_direction_bin_p_overlap (np.array, optional): Percentage overlap between wind direction bin. Defaults to None. axarr ([axes], optional): list of axes to plot to. Defaults to None. base_color (str, optional): Color of baseline in plots. Defaults to 'b'. con_color (str, optional): Color of controlled in plots. Defaults to 'g'. label_array ([str], optional): List of labels to apply Defaults to None. label_pchange ([type], optional): Label for percentage change. Defaults to None. plot_simple (bool, optional): Plot only the ratio, no confidence. Defaults to False. plot_ratio_scatter (bool, optional): Include scatter plot of values, sized to indicate counts. Defaults to False. marker_scale ([type], optional): Marker scale. Defaults to 1. show_count (bool, optional): Show the counts as scatter plot hide_controlled_case (bool, optional): Option to hide the control case from plots, for demonstration """ if axarr is None: fig, axarr = plt.subplots(3, 1, sharex=True) if label_array is None: label_array = ["Baseline", "Controlled"] if label_pchange is None: label_pchange = "Energy Gain" ( ratio_array_base, lower_ratio_array_base, upper_ratio_array_base, counts_ratio_array_base, ratio_array_con, lower_ratio_array_con, upper_ratio_array_con, counts_ratio_array_con, diff_array, lower_diff_array, upper_diff_array, counts_diff_array, p_change_array, lower_p_change_array, upper_p_change_array, counts_p_change_array, ) = calculate_balanced_energy_ratio( reference_power_baseline, test_power_baseline, wind_speed_array_baseline, wind_direction_array_baseline, reference_power_controlled, test_power_controlled, wind_speed_array_controlled, wind_direction_array_controlled, wind_direction_bins, confidence=95, n_boostrap=n_boostrap, wind_direction_bin_p_overlap=wind_direction_bin_p_overlap, ) if plot_simple: ax = axarr[0] ax.plot( wind_direction_bins, ratio_array_base, label=label_array[0], color=base_color, ls=ls, marker=marker, ) if not hide_controlled_case: ax.plot( wind_direction_bins, ratio_array_con, label=label_array[1], color=con_color, ls=ls, marker=marker, ) ax.axhline(1, color="k") ax.set_ylabel("Energy Ratio (-)") ax = axarr[1] ax.plot( wind_direction_bins, diff_array, label=label_pchange, color=con_color, ls=ls, marker=marker, ) ax.axhline(0, color="k") ax.set_ylabel("Change in Energy Ratio (-)") ax = axarr[2] ax.plot( wind_direction_bins, p_change_array, label=label_pchange, color=con_color, ls=ls, marker=marker, ) ax.axhline(0, color="k") ax.set_ylabel("% Change in Energy Ratio (-)") else: ax = axarr[0] ax.plot( wind_direction_bins, ratio_array_base, label=label_array[0], color=base_color, ls="-", marker=".", ) ax.fill_between( wind_direction_bins, lower_ratio_array_base, upper_ratio_array_base, alpha=0.3, color=base_color, label="_nolegend_", ) if show_count: ax.scatter( wind_direction_bins, ratio_array_base, s=counts_ratio_array_base * marker_scale, label="_nolegend_", color=base_color, marker="o", alpha=0.2, ) if not hide_controlled_case: ax.plot( wind_direction_bins, ratio_array_con, label=label_array[1], color=con_color, ls="-", marker=".", ) ax.fill_between( wind_direction_bins, lower_ratio_array_con, upper_ratio_array_con, alpha=0.3, color=con_color, label="_nolegend_", ) if show_count: ax.scatter( wind_direction_bins, ratio_array_con, s=counts_ratio_array_con * marker_scale, label="_nolegend_", color=con_color, marker="o", alpha=0.2, ) ax.axhline(1, color="k") ax.set_ylabel("Energy Ratio (-)") ax = axarr[1] ax.plot( wind_direction_bins, diff_array, label=label_pchange, color=con_color, ls="-", marker=".", ) ax.fill_between( wind_direction_bins, lower_diff_array, upper_diff_array, alpha=0.3, color=con_color, label="_nolegend_", ) if show_count: ax.scatter( wind_direction_bins, diff_array, s=counts_diff_array * marker_scale, label="_nolegend_", color=con_color, marker="o", alpha=0.2, ) ax.axhline(0, color="k") ax.set_ylabel("Change in Energy Ratio (-)") ax = axarr[2] ax.plot( wind_direction_bins, p_change_array, label=label_pchange, color=con_color, ls="-", marker=".", ) ax.fill_between( wind_direction_bins, lower_p_change_array, upper_p_change_array, alpha=0.3, color=con_color, label="_nolegend_", ) if show_count: ax.scatter( wind_direction_bins, p_change_array, s=counts_p_change_array * marker_scale, label="_nolegend_", color=con_color, marker="o", alpha=0.2, ) ax.axhline(0, color="k") ax.set_ylabel("% Change in Energy Ratio (-)") for ax in axarr: ax.grid(True) ax.set_xlabel("Wind Direction (Deg)") return diff_array
2ccdfa20dc8a475ab6c65086ab1f39d6db5e211f
3,643,827
def first_position(): """Sets up two positions in the Upper left .X.Xo. X.Xoo. XXX... ...... Lower right ...... ..oooo .oooXX .oXXX. (X = black, o = white) They do not overlap as the Positions are size_limit 9 or greater. """ def position_moves(s): rest_of_row = '.'*(s-5) first_three = rest_of_row.join([ '.X.Xo', 'X.Xoo', 'XXX..','']) last_three = rest_of_row.join(['', '.oooo', 'oooXX', 'oXXX.',]) board = first_three + '.'*s*(s-6) + last_three position = go.Position(size=s) moves_played = defaultdict() for pt, symbol in enumerate(board): if symbol == 'X': position.move(move_pt=pt, colour=go.BLACK) moves_played[pt] = go.BLACK elif symbol == 'o': position.move(move_pt=pt, colour=go.WHITE) moves_played[pt] = go.WHITE return position, moves_played return position_moves
029e965fe20f550030ece305975e96f7d1cd9115
3,643,829
def _create_teams( pool: pd.DataFrame, n_iterations: int = 500, n_teams: int = 10, n_players: int = 10, probcol: str = 'probs' ) -> np.ndarray: """Creates initial set of teams Returns: np.ndarray of shape axis 0 - number of iterations axis 1 - number of teams in league axis 2 - number of players on team """ # get the teams, which are represented as 3D array # axis 0 = number of iterations (leagues) # axis 1 = number of teams in league # axis 2 = number of players on team arr = _multidimensional_shifting( elements=pool.index.values, num_samples=n_iterations, sample_size=n_teams * n_players, probs=pool[probcol] ) return arr.reshape(n_iterations, n_teams, n_players)
5889cc356a812c65ca7825e26c835b520cad1680
3,643,830
def calculate_magnitude(data: np.ndarray) -> np.ndarray: """Calculates the magnitude for given (x,y,z) axes stored in numpy array""" assert data.shape[1] == 3, f"Numpy array should have 3 axes, got {data.shape[1]}" return np.sqrt(np.square(data).sum(axis=1))
6493660467154d3e45c10a7a4350e87fa73c9719
3,643,831
def clean_str(string: str) -> str: """ Cleans strings for SQL insertion """ return string.replace('\n', ' ').replace("'", "’")
d3833293163114642b4762ee25ea7c8f850e9d54
3,643,832
def zeros(shape, name=None): """All zeros.""" return tf.get_variable(name=name, shape=shape, dtype=tf.float32, initializer=tf.zeros_initializer())
2c20b960bd17a0dc752883e65f7a18e77a7cde32
3,643,833
import io def parseTemplate(bStream): """Parse the Template in current byte stream, it terminates when meets an object. :param bStream: Byte stream :return: The template. """ template = Template() eof = endPos(bStream) while True: currPos = bStream.tell() if currPos <eof: desc = '{0:08b}'.format(readUSHORT(bStream)) bStream.seek(currPos, io.SEEK_SET) if ComponentRole[desc[:3]] == OBJECT: return template else: assert(int(desc[3])) # all components in Template must have label. template._attrList.append(parseAttributeInTemplate(bStream)) else: logger.warning("Encounter a Set without Objects") break
716858cde357be4036b62824ac17ba60cf71eea1
3,643,834
def load_circuit(filename:str): """ Reads a MNSensitivity cicuit file (.mc) and returns a Circuit list (format is 1D array of tuples, the first element contains a Component object, the 2nd a SER/PAL string). Format of the .mc file is: * each line contains a Component object init string (See Component class doc string to see format) after an orientation string (SER or PAL, specifies if the component is series or parallel to ground). * Comments can be specified by '#' * Blank lines are skipped * Components with earliest line number is assumed closest to source, last line number closest to load, and progressively inbetween. """ circuit = [] lnum = 0 #Open file... with open(filename) as file: #For each line... while True: #Read line... line = file.readline() lnum += 1; if not line: break; #Break into tokens... words = line.split() if len(words) == 0: continue #Skip comments if words[0] == "#" or words[0][0] == '#': continue if len(words) < 5: print(f"ERROR: Fewer than 5 words on line {lnum}.") print(words) return [] try: idx = line.find(" ") new_comp = Component(line[idx+1:]) except: print(f"Failed to interpret component string on line {lnum}.") return [] if words[0].upper() == "SER": circuit.append( (new_comp, "SER") ) elif words[0].upper() == "PAL": circuit.append( (new_comp, "PAL") ) else: unrectok = words[0] print(f"ERROR: Unrecognized orientation token '{unrectok}' on line {lnum}. Acceptable tokens are 'SER' and 'PAL'.") return [] return circuit
c77aa31f9a1c1f6803795c19de509ea967f65077
3,643,837
def get_output_attribute(out, attribute_name, cuda_device, reduction="sum"): """ This function handles processing/reduction of output for both DataParallel or non-DataParallel situations. For the case of multiple GPUs, This function will sum all values for a certain output attribute in various batches together. Parameters --------------------- :param out: Dictionary, output of model during forward pass, :param attribute_name: str, :param cuda_device: list or int :param reduction: (string, optional) reduction to apply to the output. Default: 'sum'. """ if isinstance(cuda_device, list): if reduction == "sum": return out[attribute_name].sum() elif reduction == "mean": return out[attribute_name].sum() / float(len(out[attribute_name])) else: raise ValueError("invalid reduction type argument") else: return out[attribute_name]
c09ff6a3dd4ae2371b1bbec12d4617e9ed6c6e1e
3,643,838
def get_ref_aidxs(df_fs): """Part of the hotfix for redundant FCGs. I did not record the occurrence id in the graphs, which was stupid. So now I need to use the df_fs to get the information instead. Needs to be used with fid col, which is defined in filter_out_fcgs_ffs_all. """ return {k: v for k, v in zip(df_fs['fid'], df_fs['_aidxf'])}
9b57d7297d96f6b711bb9d3c37f85a17c4ccacd5
3,643,839
def format_info(info): """ Print info neatly """ sec_width = 64 eq = ' = ' # find key width key_widths = [] for section, properties in info.items(): for prop_key, prop_val in properties.items(): if type(prop_val) is dict: key_widths.append(len(max(list(prop_val.keys()), key=len)) + 4) else: key_widths.append(len(prop_key)) key_width = max(key_widths) # format items msg = [] for section, properties in info.items(): n0 = (sec_width - 2 - len(section)) // 2 n1 = n0 if n0 * 2 + 2 + len(section) == sec_width else n0 + 1 msg.append('\n' + '=' * n0 + f' {section} ' + '=' * n1) for prop_key, prop_val in properties.items(): if type(prop_val) is dict: msg.append((prop_key + ' ').ljust(sec_width, '_')) for sub_key, sub_val in prop_val.items(): msg.append(' ' * 4 + sub_key.ljust(key_width - 4) + eq + str(sub_val)) else: msg.append(prop_key.ljust(key_width) + eq + str(prop_val)) msg.append('=' * (n0 + n1 + 2 + len(section))) return '\n'.join(msg)
9dd3a6ef15909230725f2be6eb698e7ca08a2d8b
3,643,840
import itertools import copy def server_handle_hallu_message( msg_output, controller, mi_info, options, curr_iter): """ Petridish server handles the return message of a forked process that watches over a halluciniation job. """ log_dir_root = logger.get_logger_dir() q_child = controller.q_child model_str, model_iter, _parent_iter, search_depth = msg_output # Record performance in the main log jr = parse_remote_stop_file(_mi_to_dn(log_dir_root, model_iter)) if jr is None: # job failure: reap the virtual resource and move on. logger.info('Failed mi={}'.format(model_iter)) return curr_iter (fp, ve, te, hallu_stats, l_op_indices, l_op_omega) = ( jr['fp'], jr['ve'], jr['te'], jr['l_stats'], jr['l_op_indices'], jr['l_op_omega'] ) logger.info( ("HALLU : mi={} val_err={} test_err={} " "Gflops={} hallu_stats={}").format( model_iter, ve, te, fp * 1e-9, hallu_stats)) mi_info[model_iter].ve = ve mi_info[model_iter].fp = fp ## compute hallucination related info in net_info net_info = net_info_from_str(model_str) hallu_locs = net_info.contained_hallucination() # contained hallu_indices = net_info.sorted_hallu_indices(hallu_locs) # feature selection based on params l_fs_ops, l_fs_omega = feature_selection_cutoff( l_op_indices, l_op_omega, options) separated_hallu_info = net_info.separate_hallu_info_by_cname( hallu_locs, hallu_indices, l_fs_ops, l_fs_omega) ## Select a subset of hallucination to add to child model l_selected = [] # sort by -cos(grad, hallu) for the indices, 0,1,2,...,n_hallu-1. processed_stats = [process_hallu_stats_for_critic_feat([stats]) \ for stats in hallu_stats] logger.info('processed_stats={}'.format(processed_stats)) logger.info('separated_hallu_info={}'.format(separated_hallu_info)) # greedy select with gradient boosting l_greedy_selected = [] if options.n_greed_select_per_init: greedy_order = sorted( range(len(hallu_indices)), key=lambda i : - processed_stats[i][0]) min_select = options.n_hallus_per_select max_select = max(min_select, len(hallu_indices) // 2) for selected_len in range(min_select, max_select + 1): selected = greedy_order[:selected_len] l_greedy_selected.append(selected) n_greedy_select = len(l_greedy_selected) if n_greedy_select > options.n_greed_select_per_init: # random choose l_greedy_selected = list(np.random.choice( l_greedy_selected, options.n_greed_select_per_init, replace=False)) # random select a subset l_random_selected = [] if options.n_rand_select_per_init: # also try some random samples l_random_selected = online_sampling( itertools.combinations( range(len(hallu_indices)), options.n_hallus_per_select ), options.n_rand_select_per_init) np.random.shuffle(l_random_selected) l_selected = l_greedy_selected + l_random_selected ## for each selected subset of hallu, make a model for q_child # since more recent ones tend to be better, # we insert in reverse order, so greedy are inserted later. for selected in reversed(l_selected): # new model description child_info = copy.deepcopy(net_info) l_hi = [ hallu_indices[s] for s in selected ] child_info = child_info.select_hallucination( l_hi, separated_hallu_info) # Compute initialization stat stat = process_hallu_stats_for_critic_feat( [hallu_stats[s] for s in selected]) # update mi_info curr_iter += 1 child_str = child_info.to_str() mi_info.append(ModelSearchInfo( curr_iter, model_iter, search_depth+1, None, None, child_str, stat)) controller.add_one_to_queue( q_child, mi_info, curr_iter, child_info) return curr_iter
a4dc3da855066d719ca8a798a691864ed9d04e7f
3,643,841
def pBottleneckSparse_model(inputs, train=True, norm=True, **kwargs): """ A pooled shallow bottleneck convolutional autoencoder model.. """ # propagate input targets outputs = inputs # dropout = .5 if train else None input_to_network = inputs['images'] shape = input_to_network.get_shape().as_list() stride = 16 hidden_size = 2#np.ceil(shape[1]/stride) deconv_size = 12#(shape[1]/hidden_size).astype(int) ### YOUR CODE HERE with tf.variable_scope('conv1') as scope: convweights = tf.get_variable(shape=[7, 7, 3, 64], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(), name='weights') conv = tf.nn.conv2d(input_to_network, convweights,[1, 4, 4, 1], padding='SAME') biases = tf.get_variable(initializer=tf.constant_initializer(0), shape=[64], dtype=tf.float32, trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) relu = tf.nn.relu(bias, name='relu') pool = tf.nn.max_pool(value=relu, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME', name='pool') # assign layers to output outputs['input'] = input_to_network outputs['conv1_kernel'] = convweights outputs['conv1'] = relu outputs['pool1'] = pool outputs['convweights'] = convweights print(outputs['input'].shape) print(outputs['conv1'].shape) print(outputs['pool1'].shape) with tf.variable_scope('deconv2') as scope: deconvweights = tf.get_variable(shape=[deconv_size, deconv_size, 3, 64], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(), name='weights') deconvRegularizer = tf.nn.l2_loss(deconvweights) deconv = tf.nn.conv2d_transpose(outputs['pool1'], deconvweights, outputs['input'].shape, [1, 12, 12, 1], padding='VALID', name=None) # assign layers to output outputs['deconv2'] = deconv outputs['deconvweights'] = deconvweights ### END OF YOUR CODE for k in ['input','conv1', 'deconv2']: assert k in outputs, '%s was not found in outputs' % k return outputs, {}
0a9609b776a9373f28bacf10f9f6aa9dcfbb17d2
3,643,842
def CoarseDropout(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False, random_state=None, mask=None): """ Augmenter that sets rectangular areas within images to zero. In contrast to Dropout, these areas can have larger sizes. (E.g. you might end up with three large black rectangles in an image.) Note that the current implementation leads to correlated sizes, so when there is one large area that is dropped, there is a high likelihood that all other dropped areas are also large. This method is implemented by generating the dropout mask at a lower resolution (than the image has) and then upsampling the mask before dropping the pixels. dtype support:: See ``imgaug.augmenters.arithmetic.MultiplyElementwise``. Parameters ---------- p : float or tuple of float or imgaug.parameters.StochasticParameter, optional The probability of any pixel being dropped (i.e. set to zero). * If a float, then that value will be used for all pixels. A value of 1.0 would mean, that all pixels will be dropped. A value of 0.0 would lead to no pixels being dropped. * If a tuple ``(a, b)``, then a value p will be sampled from the range ``a <= p <= b`` per image and be used as the pixel's dropout probability. * If a StochasticParameter, then this parameter will be used to determine per pixel whether it should be dropped (sampled value of 0) or shouldn't (sampled value of 1). size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional The size of the lower resolution image from which to sample the dropout mask in absolute pixel dimensions. * If an integer, then that size will be used for both height and width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then upsampled to ``HxW``, where ``H`` is the image size and W the image width. * If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the range ``[a..b]`` and the mask will be generated at size ``MxN``, then upsampled to ``HxW``. * If a StochasticParameter, then this parameter will be used to determine the sizes. It is expected to be discrete. size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional The size of the lower resolution image from which to sample the dropout mask *in percent* of the input image. * If a float, then that value will be used as the percentage of the height and width (relative to the original size). E.g. for value p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled to ``HxW``. * If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the interval ``(a, b)`` and used as the percentages, i.e the mask size will be ``(m*H)x(n*W)``. * If a StochasticParameter, then this parameter will be used to sample the percentage values. It is expected to be continuous. per_channel : bool or float, optional Whether to use the same value (is dropped / is not dropped) for all channels of a pixel (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. min_size : int, optional Minimum size of the low resolution mask, both width and height. If `size_percent` or `size_px` leads to a lower value than this, `min_size` will be used instead. This should never have a value of less than 2, otherwise one may end up with a ``1x1`` low resolution mask, leading easily to the whole image being dropped. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.CoarseDropout(0.02, size_percent=0.5) drops 2 percent of all pixels on an lower-resolution image that has 50 percent of the original image's size, leading to dropped areas that have roughly 2x2 pixels size. >>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5)) generates a dropout mask at 5 to 50 percent of image's size. In that mask, 0 to 5 percent of all pixels are dropped (random per image). >>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16)) same as previous example, but the lower resolution image has 2 to 16 pixels size. >>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True) drops 2 percent of all pixels at 50 percent resolution (2x2 sizes) in a channel-wise fashion, i.e. it is unlikely for any pixel to have all channels set to zero (black pixels). >>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5) same as previous example, but the `per_channel` feature is only active for 50 percent of all images. """ if ia.is_single_number(p): p2 = iap.Binomial(1 - p) elif ia.is_iterable(p): ia.do_assert(len(p) == 2) ia.do_assert(p[0] < p[1]) ia.do_assert(0 <= p[0] <= 1.0) ia.do_assert(0 <= p[1] <= 1.0) p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0])) elif isinstance(p, iap.StochasticParameter): p2 = p else: raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),)) if size_px is not None: p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size) elif size_percent is not None: p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size) else: raise Exception("Either size_px or size_percent must be set.") if name is None: name = "Unnamed%s" % (ia.caller_name(),) return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state, mask=mask)
c60828aa2a81459ef0a84440305f6d73939e2eb5
3,643,843
def chenneling(x): """ This function makes the dataset suitable for training. Especially, gray scale image does not have channel information. This function forces one channel to be created for gray scale images. """ # if grayscale image if(len(x.shape) == 3): C = 1 N, H, W = x.shape x = np.asarray(x).reshape((N, H, W, C)) else: # color image pass x = x.transpose(0, 3, 1, 2) x = x.astype(float) return x
c47c1690affbb52c98343185cae7e0679bfff41a
3,643,844
import collections def _get_ordered_label_map(label_map): """Gets label_map as an OrderedDict instance with ids sorted.""" if not label_map: return label_map ordered_label_map = collections.OrderedDict() for idx in sorted(label_map.keys()): ordered_label_map[idx] = label_map[idx] return ordered_label_map
4c5e56789f57edda61409f0693c3bccb57ddc7cf
3,643,845
def eight_interp(x, a0, a1, a2, a3, a4, a5, a6, a7): """``Approximation degree = 8`` """ return ( a0 + a1 * x + a2 * (x ** 2) + a3 * (x ** 3) + a4 * (x ** 4) + a5 * (x ** 5) + a6 * (x ** 6) + a7 * (x ** 7) )
98be2259c9e0fae214234b635a3ff55608f707d1
3,643,846
import logging def create_ec2_instance(image_id, instance_type, keypair_name, user_data): """Provision and launch an EC2 instance The method returns without waiting for the instance to reach a running state. :param image_id: ID of AMI to launch, such as 'ami-XXXX' :param instance_type: string, such as 't2.micro' :param keypair_name: string, name of the key pair :return Dictionary containing information about the instance. If error, returns None. """ # Provision and launch the EC2 instance ec2_client = boto3.client('ec2') try: response = ec2_client.run_instances(ImageId=image_id, InstanceType=instance_type, KeyName=keypair_name, MinCount=1, MaxCount=1, UserData=user_data, SecurityGroups=[ 'AllowSSHandOSB', ] ) instance = response['Instances'][0] except ClientError as e: logging.error(e) return None return response['Instances'][0]
4c1edda4b2aed0179026aacb6f5a95a0b550ef66
3,643,847
def get_pop(state): """Returns the population of the passed in state Args: - state: state in which to get the population """ abbrev = get_abbrev(state) return int(us_areas[abbrev][1]) if abbrev != '' else -1
0d44a033eaff65c1430aab806a93686c68f5c490
3,643,848
import requests import json def GitHub_post(data, url, *, headers): """ POST the data ``data`` to GitHub. Returns the json response from the server, or raises on error status. """ r = requests.post(url, headers=headers, data=json.dumps(data)) GitHub_raise_for_status(r) return r.json()
7dbdbd3beed6e39ff3e20509114a11761a05ab52
3,643,849
def subsample(inputs, factor, scope=None): """Subsample the input along the spatial dimensions. Args: inputs: A `Tensor` of size [batch, height_in, width_in, channels]. factor: The subsampling factor. scope: Optional variable_scope. Returns: output: A `Tensor` of size [batch, height_out, width_out, channels] with the input, either intact (if factor == 1) or subsampled (if factor > 1). """ if factor == 1: return inputs else: return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
32df6bccbb016d572bbff227cf42aadeb07c6242
3,643,850
def password_reset(*args, **kwargs): """ Override view to use a custom Form """ kwargs['password_reset_form'] = PasswordResetFormAccounts return password_reset_base(*args, **kwargs)
a2764365118cc0264fbeddf0b79457a0f7bf3c62
3,643,851
def update_tab_six_two( var, time_filter, month, hour, data_filter, filter_var, min_val, max_val, normalize, global_local, df, ): """Update the contents of tab size. Passing in the info from the dropdown and the general info.""" df = pd.read_json(df, orient="split") time_filter_info = [time_filter, month, hour] data_filter_info = [data_filter, filter_var, min_val, max_val] heat_map = custom_heatmap(df, global_local, var, time_filter_info, data_filter_info) no_display = {"display": "none"} if data_filter: return ( heat_map, {}, barchart(df, var, time_filter_info, data_filter_info, normalize), {}, ) return heat_map, no_display, {"data": [], "layout": {}, "frames": []}, no_display
0ce47fc30c088eae245de1da8bc4392408f16e26
3,643,852
import json async def blog_api(request: Request, year: int, month: int, day: int, title: str) -> json: """Handle blog.""" blog_date = {"year": year, "month": month, "day": day} req_blog = app.blog.get(xxh64(unquote(title)).hexdigest()) if req_blog: if all( map(lambda x: req_blog["date"][x] == blog_date[x], req_blog["date"])): return json( { "message": f"Hope you enjoy \"{unquote(title)}\"", "status": request.headers, "error": None, "results": req_blog }, status = 200) else: return redirect(f"/{req_blog['blog_path']}") else: raise BlogNotFound(f"Blog \"{unquote(title)}\" Not Found!")
6c497a9280c8c8a1301f407c06065846267743f8
3,643,853
def coherence_score_umass(X, inv_vocabulary, top_words, normalized=False): """ Extrinsic UMass coherence measure Parameter ---------- X : array-like, shape=(n_samples, n_features) Document word matrix. inv_vocabulary: dict Dictionary of index and vocabulary from vectorizer. top_words: list List of top words for each topic-sentiment pair normalized: bool If true, return to NPMI Returns ----------- score: float """ wordoccurances = (X > 0).astype(int) N = X.shape[0] totalcnt = 0 PMI = 0 NPMI = 0 for allwords in top_words: for word1 in allwords: for word2 in allwords: if word1 != word2: ind1 = inv_vocabulary[word1] ind2 = inv_vocabulary[word2] if ind1 > ind2: denominator = (np.count_nonzero(wordoccurances > 0, axis=0)[ ind1]/N) * (np.count_nonzero(wordoccurances > 0, axis=0)[ind2]/N) numerator = ( (np.matmul(wordoccurances[:, ind1], wordoccurances[:, ind2])) + 1) / N PMI += np.log(numerator) - np.log(denominator) NPMI += (np.log(denominator) / np.log(numerator)) - 1 totalcnt += 1 if normalized: score = NPMI / totalcnt else: score = PMI / totalcnt return score
185cfa1e6df64e799ae07116c8f88ef9cd37c94b
3,643,854
def _splitaddr(addr): """ splits address into character and decimal :param addr: :return: """ col='';rown=0 for i in range(len(addr)): if addr[i].isdigit(): col = addr[:i] rown = int(addr[i:]) break elif i==len(addr)-1: col=addr return col,rown
6f4ef43ed926a468ae5ae22fc062fe2b2701a18a
3,643,855
def checksum(data): """ :return: int """ assert isinstance(data, bytes) assert len(data) >= MINIMUM_MESSAGE_SIZE - 2 assert len(data) <= MAXIMUM_MESSAGE_SIZE - 2 __checksum = 0 for data_byte in data: __checksum += data_byte __checksum = -(__checksum % 256) + 256 try: __checksum = bytes([__checksum]) except ValueError: __checksum = bytes([0]) return __checksum
105bb5a9fe748ee352c080939ea33936c661e77b
3,643,856
def as_character( x, str_dtype=str, _na=np.nan, ): """Convert an object or elements of an iterable into string Aliases `as_str` and `as_string` Args: x: The object str_dtype: The string dtype to convert to _na: How NAs should be casted. Specify np.nan will keep them unchanged. But the dtype will be object then. Returns: When x is an array or a series, return x.astype(str). When x is iterable, convert elements of it into strings Otherwise, convert x to string. """ return _as_type(x, str_dtype, na=_na)
ed8653f5c713fd257062580e03d26d48aaac3421
3,643,857
def test_logger(request: HttpRequest) -> HttpResponse: """ Generate a log to test logging setup. Use a GET parameter to specify level, default to INFO if absent. Value can be INFO, WARNING, ERROR, EXCEPTION, UNCATCHED_EXCEPTION. Use a GET parameter to specify message, default to "Test logger" Example: test_logger?level=INFO&message=Test1 :param request: HttpRequest request :return: HttpResponse web response """ message = request.GET.get("message", "Test logger") level = request.GET.get("level", "INFO") if level not in ("INFO", "WARNING", "ERROR", "EXCEPTION", "UNCATCHED_EXCEPTION"): level = "INFO" if level == "INFO": logger.info(message) elif level == "WARNING": logger.warning(message) elif level == "ERROR": logger.error(message) elif level == "EXCEPTION": try: raise Exception(message) except Exception: logger.exception("test_logger") else: assert level == "UNCATCHED_EXCEPTION", "should never happen" raise Exception(message) return HttpResponse("ok")
04ef0d03d85402b5005660d9a06ae6ec775cb712
3,643,858
def remoteness(N): """ Compute the remoteness of N. Parameters ---------- N : Nimber The nimber of interest. Returns ------- remote : int The remoteness of N. """ if N.n == 0: return 0 remotes = {remoteness(n) for n in N.left} if all(remote % 2 == 1 for remote in remotes): return 1 + max(remotes) else: return 1 + min(remote for remote in remotes if remote % 2 == 0)
6ea40df2a79a2188b3d7c9db69ee9038ec2e6462
3,643,860
def breakfast_analysis_variability(in_path,identifier, date_col, time_col, min_log_num=2, min_separation=4, plot=True): """ Description:\n This function calculates the variability of loggings in good logging day by subtracting 5%,10%,25%,50%,75%,90%,95% quantile of breakfast time from the 50% breakfast time. It can also make a histogram that represents the 90%-10% interval for all subjects.\n Input:\n - in_path (str, pandas df): input path, file in pickle, csv or panda dataframe format. - identitfier(str) : participants' unique identifier such as id, name, etc. - date_col(str) : the column that represents the dates. - time_col(str) : the column that represents the float time. - min_log_num (count,int): filtration criteria on the minimum number of loggings each day. - min_seperation(hours,int): filtration criteria on the minimum separations between the earliest and latest loggings each day. - plot(bool) : Whether generating a histogram for breakfast variability. Default = True. Return:\n - A dataframe that contains 5%,10%,25%,50%,75%,90%,95% quantile of breakfast time minus 50% time for each subjects from the in_path file.\n Requirements:\n in_path file must have the following columns:\n - unique_code\n - date\n - local_time\n """ df = universal_key(in_path) # leave only the loggings in a good logging day df['in_good_logging_day'] = in_good_logging_day(df, identifier, time_col, min_log_num, min_separation) df = df[df['in_good_logging_day']==True] breakfast_series = df.groupby(['unique_code', 'date'])['local_time'].min().groupby('unique_code').quantile([0.05, 0.10, 0.25, 0.5, 0.75, 0.90, 0.95]) breakfast_df = pd.DataFrame(breakfast_series) all_rows = [] for index in breakfast_df.index: tmp_dict = dict(breakfast_series[index[0]]) tmp_dict['id'] = index[0] all_rows.append(tmp_dict) breakfast_summary_df = pd.DataFrame(all_rows, columns = ['id', 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95])\ .rename(columns = {0.05: '5%', 0.1: '10%', 0.25: '25%', 0.5: '50%', 0.75: '75%', 0.9: '90%', 0.95: '95%'})\ .drop_duplicates().reset_index(drop = True) breakfast_variability_df = breakfast_summary_df.copy() for col in breakfast_variability_df.columns: if col == 'id' or col == '50%': continue breakfast_variability_df[col] = breakfast_variability_df[col] - breakfast_variability_df['50%'] breakfast_variability_df['50%'] = breakfast_variability_df['50%'] - breakfast_variability_df['50%'] if plot == True: fig, ax = plt.subplots(1, 1, figsize = (10, 10), dpi=80) sns_plot = sns.distplot( breakfast_variability_df['90%'] - breakfast_variability_df['10%'] ) ax.set(xlabel='Variation Distribution for Breakfast (90% - 10%)', ylabel='Kernel Density Estimation') return breakfast_variability_df
e174f57fd146e07d41f0fc21c028711ae581a580
3,643,861
def _sdss_wcs_to_log_wcs(old_wcs): """ The WCS in the SDSS files does not appear to follow the WCS standard - it claims to be linear, but is logarithmic in base-10. The wavelength is given by: λ = 10^(w0 + w1 * i) with i being the pixel index starting from 0. The FITS standard uses a natural log with a sightly different formulation, see WCS Paper 3 (which discusses spectral WCS). This function does the conversion from the SDSS WCS to FITS WCS. """ w0 = old_wcs.wcs.crval[0] w1 = old_wcs.wcs.cd[0,0] crval = 10 ** w0 cdelt = crval * w1 * np.log(10) cunit = old_wcs.wcs.cunit[0] or Unit('Angstrom') ctype = "WAVE-LOG" w = WCS(naxis=1) w.wcs.crval[0] = crval w.wcs.cdelt[0] = cdelt w.wcs.ctype[0] = ctype w.wcs.cunit[0] = cunit w.wcs.set() return w
b4b4427d5563e85f80ddc2200e9c323098ad35ae
3,643,862
def request_records(request): """show the datacap request records""" address = request.POST.get('address') page_index = request.POST.get('page_index', '1') page_size = request.POST.get('page_size', '5') page_size = interface.handle_page(page_size, 5) page_index = interface.handle_page(page_index, 1) msg_code, msg_data = interface.request_record(address=address) obj = Page(msg_data, page_size).page(page_index) data_list = [] for i in obj.get('objects'): msg_cid = i.msg_cid assignee = i.assignee comments_url = i.comments_url data_list.append({ 'assignee': assignee, 'created_at': i.created_at.strftime('%Y-%m-%d %H:%M:%S') if i.created_at else i.created_at, 'region': i.region, 'request_datacap': i.request_datacap, 'status': i.status, 'allocated_datacap': i.allocated_datacap, 'msg_cid': msg_cid, 'url': interface.get_req_url(i.comments_url), 'height': get_height(msg_cid), 'name': i.name, 'media': i.media, 'github_url': get_github_url(comments_url), 'issue_id': get_api_issue_id(comments_url), 'notary': get_notary_by_github_account(assignee), }) return format_return(0, data={"objs": data_list, "total_page": obj.get('total_page'), "total_count": obj.get('total_count')})
6eac819ab78afa6e7df00be8e47b87344a129abc
3,643,863
def extendCorrespondingAtomsDictionary(names, str1, str2): """ extends the pairs based on list1 & list2 """ list1 = str1.split() list2 = str2.split() for i in range(1, len(list1)): names[list1[0]][list2[0]].append([list1[i], list2[i]]) names[list2[0]][list1[0]].append([list2[i], list1[i]]) return None
cb586be8dcf7a21af556b332cfedbdce0be6882a
3,643,864
def _device_name(data): """Return name of device tracker.""" if ATTR_BEACON_ID in data: return "{}_{}".format(BEACON_DEV_PREFIX, data['name']) return data['device']
7a3dd5765d12c7f1b78c87c6188d3afefd4228ee
3,643,865
def get_share_path( storage_server: StorageServer, storage_index: bytes, sharenum: int ) -> FilePath: """ Get the path to the given storage server's storage for the given share. """ return ( FilePath(storage_server.sharedir) .preauthChild(storage_index_to_dir(storage_index)) .child("{}".format(sharenum)) )
e37566e0cb09bf6c490e6e0faf024cedf91c4576
3,643,866
import torch def focal_loss_with_prob(prob, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """A variant of Focal Loss used in TOOD.""" target_one_hot = prob.new_zeros(len(prob), len(prob[0]) + 1) target_one_hot = target_one_hot.scatter_(1, target.unsqueeze(1), 1)[:, :-1] flatten_alpha = torch.empty_like(prob).fill_(1 - alpha) flatten_alpha[target_one_hot == 1] = alpha pt = torch.where(target_one_hot == 1, prob, 1 - prob) ce_loss = F.binary_cross_entropy(prob, target_one_hot, reduction='none') loss = flatten_alpha * torch.pow(1 - pt, gamma) * ce_loss if weight is not None: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
0c730a1eef5487d3ce5b79c06fda5d8a0e8542a7
3,643,867
def root_key_from_seed(seed): """This derives your master key the given seed. Implemented in ripple-lib as ``Seed.prototype.get_key``, and further is described here: https://ripple.com/wiki/Account_Family#Root_Key_.28GenerateRootDeterministicKey.29 """ seq = 0 while True: private_gen = from_bytes(first_half_of_sha512( b''.join([seed, to_bytes(seq, 4)]))) seq += 1 if curves.SECP256k1.order >= private_gen: break public_gen = curves.SECP256k1.generator * private_gen # Now that we have the private and public generators, we apparently # have to calculate a secret from them that can be used as a ECDSA # signing key. secret = i = 0 public_gen_compressed = ecc_point_to_bytes_compressed(public_gen) while True: secret = from_bytes(first_half_of_sha512( b"".join([ public_gen_compressed, to_bytes(0, 4), to_bytes(i, 4)]))) i += 1 if curves.SECP256k1.order >= secret: break secret = (secret + private_gen) % curves.SECP256k1.order # The ECDSA signing key object will, given this secret, then expose # the actual private and public key we are supposed to work with. key = SigningKey.from_secret_exponent(secret, curves.SECP256k1) # Attach the generators as supplemental data key.private_gen = private_gen key.public_gen = public_gen return key
b93cfa8c31ab061f6496f8e12f5c3d7ba5f0d7a7
3,643,868
def fake_login(request): """Contrived version of a login form.""" if getattr(request, 'limited', False): raise RateLimitError if request.method == 'POST': password = request.POST.get('password', 'fail') if password is not 'correct': return False return True
41b2621b38a302837c9f8ab1fafa0a4f45ca2c26
3,643,870
def split_to_sentences(data): """ Split data by linebreak "\n" Args: data: str Returns: A list of sentences """ sentences = data.split('\n') # Additional clearning (This part is already implemented) # - Remove leading and trailing spaces from each sentence # - Drop sentences if they are empty strings. sentences = [s.strip() for s in sentences] sentences = [s for s in sentences if len(s) > 0] return sentences
56540da88e982615e3874ab9f6fd22229a076565
3,643,871
def read_config_file(fp: str, mode='r', encoding='utf8', prefix='#') -> dict: """ 读取文本文件,忽略空行,忽略prefix开头的行,返回字典 :param fp: 配置文件路径 :param mode: :param encoding: :param prefix: :return: """ with open(fp, mode, encoding=encoding) as f: ll = f.readlines() ll = [i for i in ll if all([i.strip(), i.startswith(prefix) == False])] params = {i.split('=')[0].strip(): i.split('=')[1].strip() for i in ll} print(params) return params
94e6130de22b05ca9dd6855206ec748e63dad8ad
3,643,872
def PrepareForMakeGridData( allowed_results, starred_iid_set, x_attr, grid_col_values, y_attr, grid_row_values, users_by_id, all_label_values, config, related_issues, hotlist_context_dict=None): """Return all data needed for EZT to render the body of the grid view.""" def IssueViewFactory(issue): return template_helpers.EZTItem( summary=issue.summary, local_id=issue.local_id, issue_id=issue.issue_id, status=issue.status or issue.derived_status, starred=None, data_idx=0, project_name=issue.project_name) grid_data = MakeGridData( allowed_results, x_attr, grid_col_values, y_attr, grid_row_values, users_by_id, IssueViewFactory, all_label_values, config, related_issues, hotlist_context_dict=hotlist_context_dict) issue_dict = {issue.issue_id: issue for issue in allowed_results} for grid_row in grid_data: for grid_cell in grid_row.cells_in_row: for tile in grid_cell.tiles: if tile.issue_id in starred_iid_set: tile.starred = ezt.boolean(True) issue = issue_dict[tile.issue_id] tile.issue_url = tracker_helpers.FormatRelativeIssueURL( issue.project_name, urls.ISSUE_DETAIL, id=tile.local_id) tile.issue_ref = issue.project_name + ':' + str(tile.local_id) return grid_data
a8e8a70f56001398e75f1ab2e82c8e995e164203
3,643,873
def custom_address_validator(value, context): """ Address not required at all for this example, skip default (required) validation. """ return value
06ec3af3b6103c06be5fc9cf30d1af28bd072193
3,643,874
from typing import Tuple def get_model(args) -> Tuple: """Choose the type of VQC to train. The normal vqc takes the latent space data produced by a chosen auto-encoder. The hybrid vqc takes the same data that an auto-encoder would take, since it has an encoder or a full auto-encoder attached to it. Args: args: Dictionary of hyperparameters for the vqc. Returns: An instance of the vqc object with the given specifications (hyperparams). """ qdevice = get_qdevice( args["run_type"], wires=args["nqubits"], backend_name=args["backend_name"], config=args["config"], ) if args["hybrid"]: vqc_hybrid = VQCHybrid(qdevice, device="cpu", hpars=args) return vqc_hybrid vqc = VQC(qdevice, args) return vqc
fb50a114efdd1f4f358edf2906aad861688056de
3,643,876
def tail_ratio(returns): """ Determines the ratio between the right (95%) and left tail (5%). For example, a ratio of 0.25 means that losses are four times as bad as profits. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. Returns ------- float tail ratio """ return ep.tail_ratio(returns)
620fa7b5f5887f80b3fd56e2fb24077cbc3dcf86
3,643,877
def get_trajectory_for_weight(simulation_object, weight): """ :param weight: :return: """ print(simulation_object.name+" - get trajectory for w=", weight) controls, features, _ = simulation_object.find_optimal_path(weight) weight = list(weight) features = list(features) return {"w": weight, "phi": features, "controls": controls}
e68827fc3631d4467ae1eb82b3c319a4e45d6a9b
3,643,878
def UnNT(X, Z, N, T, sampling_type): """Computes reshuffled block-wise complete U-statistic.""" return np.mean([UnN(X, Z, N, sampling_type=sampling_type) for _ in range(T)])
e250de27fc9bfcd2244269630591ab8f925b29af
3,643,879
def boolean_matrix_of_image(image_mat, cutoff=0.5): """ Make a bool matrix from the input image_mat :param image_mat: a 2d or 3d matrix of ints or floats :param cutoff: The threshold to use to make the image pure black and white. Is applied to the max-normalized matrix. :return: """ if not isinstance(image_mat, np.ndarray): image_mat = np.array(image_mat) if image_mat.ndim == 3: image_mat = image_mat.sum(axis=2) elif image_mat.ndim > 3 or image_mat.ndim == 1: raise ValueError("The image_mat needs to have 2 or 3 dimensions") if image_mat.dtype != np.dtype('bool'): image_mat = image_mat.astype('float') image_mat = image_mat / image_mat.max() image_mat = image_mat > cutoff return image_mat
3b23c946709cde552a8c2c2e2bee0a3c91107e85
3,643,880
import torch def global_pool_1d(inputs, pooling_type="MAX", mask=None): """Pool elements across the last dimension. Useful to convert a list of vectors into a single vector so as to get a representation of a set. Args: inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. pooling_type: the pooling type to use, MAX or AVR mask: A tensor of shape [batch_size, sequence_length] containing a mask for the inputs with 1's for existing elements, and 0's elsewhere. Returns: A tensor of shape [batch_size, input_dims] containing the sequences of transformed vectors. """ if mask is not None: mask = mask.unsqueeze_(2) inputs = torch.matmul(inputs, mask) if pooling_type == "MAX": output, indices = torch.max(inputs, 1, keepdim=False, out=None) elif pooling_type == "AVR": if mask is not None: output = torch.sum(inputs, 1, keepdim=False, dtype=None) num_elems = torch.sum(mask, 1, keepdim=True) output = torch.div(output, torch.max(num_elems, 1)) else: output = torch.mean(inputs, axis=1) return output
a8c7d51c76efaaae64a8725ae9296894fdc9b933
3,643,881
def _monte_carlo_trajectory_sampler( time_horizon: int = None, env: DynamicalSystem = None, policy: BasePolicy = None, state: np.ndarray = None, ): """Monte-Carlo trajectory sampler. Args: env: The system to sample from. policy: The policy applied to the system during sampling. sample_space: The space where initial conditions are drawn from. Returns: A generator function that yields system observations as tuples. """ @sample_generator def _sample_generator(): state_sequence = [] state_sequence.append(state) env.state = state time = 0 for t in range(time_horizon): action = policy(time=time, state=env.state) next_state, cost, done, _ = env.step(time=t, action=action) state_sequence.append(next_state) time += 1 yield state_sequence return _sample_generator
9107289e89a37bd29bc96d2d549b74f15d3008e0
3,643,882
def pi_mult(diff: float) -> int: """ Функция, вычисляющая множитель, на который нужно домножить 2 pi, чтобы компенсировать разрыв фазы :param diff: разность фазы в двух ячейках матрицы :return : целое число """ return int(0.5 * (diff / pi + 1)) if diff > 0 else int(0.5 * (diff / pi - 1))
041c4740fba4b9983ec927d3fb3d8f5421e4919c
3,643,883
import warnings def get_integer(val=None, name="value", min_value=0, default_value=0): """Returns integer value from input, with basic validation Parameters ---------- val : `float` or None, default None Value to convert to integer. name : `str`, default "value" What the value represents. min_value : `float`, default 0 Minimum allowed value. default_value : `float` , default 0 Value to be used if ``val`` is None. Returns ------- val : `int` Value parsed as an integer. """ if val is None: val = default_value try: orig = val val = int(val) except ValueError: raise ValueError(f"{name} must be an integer") else: if val != orig: warnings.warn(f"{name} converted to integer {val} from {orig}") if not val >= min_value: raise ValueError(f"{name} must be >= {min_value}") return val
9c967a415eaac58a4a4778239859d1f6d0a87820
3,643,884
def release(cohesin, occupied, args): """ AN opposite to capture - releasing cohesins from CTCF """ if not cohesin.any("CTCF"): return cohesin # no CTCF: no release necessary # attempting to release either side for side in [-1, 1]: if (np.random.random() < args["ctcfRelease"][side].get(cohesin[side].pos, 0)) and (cohesin[side].attrs["CTCF"]): cohesin[side].attrs["CTCF"] = False return cohesin
89d0d1446f1c5ee45a8e190dff76b91ea59a3bcf
3,643,886
def cosine(u, v): """ d = cosine(u, v) Computes the Cosine distance between two n-vectors u and v, (1-uv^T)/(||u||_2 * ||v||_2). """ u = np.asarray(u) v = np.asarray(v) return (1.0 - (np.dot(u, v.T) / \ (np.sqrt(np.dot(u, u.T)) * np.sqrt(np.dot(v, v.T)))))
139b38f674bc19e50bf37714b3593e7f055c5b7f
3,643,887
from typing import Iterator from typing import Tuple from typing import Any def _train_model( train_iter: Iterator[DataBatch], test_iter: Iterator[DataBatch], model_type: str, num_train_iterations: int = 10000, learning_rate: float = 1e-5 ) -> Tuple[Tuple[Any, Any], Tuple[onp.ndarray, onp.ndarray]]: """Train a model and return weights and train/test loss.""" batch = next(train_iter) key = jax.random.PRNGKey(0) loss_fns = _loss_fns_for_model_type(model_type) p, s = loss_fns.init(key, batch["feats"], batch["time"]) opt = opt_base.Adam(learning_rate=learning_rate) opt_state = opt.init(p, s) @jax.jit def update(opt_state, key, feats, times): key, key1 = jax.random.split(key) p, s = opt.get_params_state(opt_state) value_and_grad_fn = jax.value_and_grad(loss_fns.apply, has_aux=True) (loss, s), g = value_and_grad_fn(p, s, key1, feats, times) next_opt_state = opt.update(opt_state, g, loss=loss, model_state=s, key=key) return next_opt_state, key, loss train_loss = [] test_loss = [] for i in range(num_train_iterations): batch = next(train_iter) opt_state, key, unused_loss = update(opt_state, key, batch["feats"], batch["time"]) if (i < 100 and i % 10 == 0) or i % 100 == 0: p, s = opt.get_params_state(opt_state) train_loss.append( onp.asarray(eval_many(p, s, key, train_iter, model_type=model_type))) test_loss.append( onp.asarray(eval_many(p, s, key, test_iter, model_type=model_type))) print(i, train_loss[-1], test_loss[-1]) return (p, s), (onp.asarray(train_loss), onp.asarray(test_loss))
46043beaf170f164f13e91fec3a30d024ede6dc8
3,643,889
def swig_base_TRGBPixel_getMin(): """swig_base_TRGBPixel_getMin() -> CRGBPixel""" return _Core.swig_base_TRGBPixel_getMin()
454de4b9f3014b950ebe609ab80d15f0c71cd175
3,643,891
def archive_deleted_rows(context, max_rows=None): """Move up to max_rows rows from production tables to the corresponding shadow tables. :returns: Number of rows archived. """ # The context argument is only used for the decorator. tablenames = [] for model_class in models.__dict__.itervalues(): if hasattr(model_class, "__tablename__"): tablenames.append(model_class.__tablename__) rows_archived = 0 for tablename in tablenames: rows_archived += archive_deleted_rows_for_table(context, tablename, max_rows=max_rows - rows_archived) if rows_archived >= max_rows: break return rows_archived
c2c26191824edfe3d31ed5b0f321022f5bac85a5
3,643,892
from typing import TextIO import json def load_wavefunction(file: TextIO) -> Wavefunction: """Load a qubit wavefunction from a file. Args: file (str or file-like object): the name of the file, or a file-like object. Returns: wavefunction (pyquil.wavefunction.Wavefunction): the wavefunction object """ if isinstance(file, str): with open(file, 'r') as f: data = json.load(f) else: data = json.load(file) wavefunction = Wavefunction(convert_dict_to_array(data['amplitudes'])) return wavefunction
23b38e0739f655e5625775c80baa81874b48d45f
3,643,893
import requests def delete_alias(request, DOMAIN, ID): """ Delete Alias based on ID ENDPOINT : /api/v1/alias/:domain/:id """ FORWARD_EMAIL_ENDPOINT = f"https://api.forwardemail.net/v1/domains/{DOMAIN}/aliases/{ID}" res = requests.delete(FORWARD_EMAIL_ENDPOINT, auth=(USERNAME, '')) if res.status_code == 200: print("Deleted") return JsonResponse(res.json())
ca59eccef303461b3be562c6167753959ad3eb67
3,643,894