content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def plot_heatmap(df, title=""): """ Plotly heatmap wrapper :param df: pd.DataFrame :param title: str """ fig = go.Figure( data=go.Heatmap(z=df.values, x=df.columns, y=df.index, colorscale="RdBu") ) fig.update_layout(template=_TEMPLATE, title=title, legend_orientation="h") return fig
46c3d362bdbe742b54ad09a56f4638ef1497bcc2
3,646,357
def shift_transactions_forward(index, tindex, file, pos, opos): """Copy transactions forward in the data file This might be done as part of a recovery effort """ # Cache a bunch of methods seek=file.seek read=file.read write=file.write index_get=index.get # Initialize, pv=z64 p1=opos p2=pos offset=p2-p1 # Copy the data in two stages. In the packing stage, # we skip records that are non-current or that are for # unreferenced objects. We also skip undone transactions. # # After the packing stage, we copy everything but undone # transactions, however, we have to update various back pointers. # We have to have the storage lock in the second phase to keep # data from being changed while we're copying. pnv=None while 1: # Read the transaction record seek(pos) h=read(TRANS_HDR_LEN) if len(h) < TRANS_HDR_LEN: break tid, stl, status, ul, dl, el = unpack(TRANS_HDR,h) status = as_text(status) if status=='c': break # Oops. we found a checkpoint flag. tl=u64(stl) tpos=pos tend=tpos+tl otpos=opos # start pos of output trans thl=ul+dl+el h2=read(thl) if len(h2) != thl: raise PackError(opos) # write out the transaction record seek(opos) write(h) write(h2) thl=TRANS_HDR_LEN+thl pos=tpos+thl opos=otpos+thl while pos < tend: # Read the data records for this transaction seek(pos) h=read(DATA_HDR_LEN) oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h) assert not vlen plen=u64(splen) dlen=DATA_HDR_LEN+(plen or 8) tindex[oid]=opos if plen: p=read(plen) else: p=read(8) p=u64(p) if p >= p2: p=p-offset elif p >= p1: # Ick, we're in trouble. Let's bail # to the index and hope for the best p=index_get(oid, 0) p=p64(p) # WRITE seek(opos) sprev=p64(index_get(oid, 0)) write(pack(DATA_HDR, oid, serial, sprev, p64(otpos), 0, splen)) write(p) opos=opos+dlen pos=pos+dlen # skip the (intentionally redundant) transaction length pos=pos+8 if status != 'u': index.update(tindex) # Record the position tindex.clear() write(stl) opos=opos+8 return opos
c19009c15a04b4a55389b584fad1744ebde03187
3,646,360
def draw_disturbances(seed, shocks_cov, num_periods, num_draws): """Creates desired number of draws of a multivariate standard normal distribution.""" # Set seed np.random.seed(seed) # Input parameters of the distribution mean = [0, 0, 0] shocks_cov_matrix = np.zeros((3, 3), float) np.fill_diagonal(shocks_cov_matrix, shocks_cov) # Create draws from the standard normal distribution draws = np.random.multivariate_normal( mean, shocks_cov_matrix, (num_periods, num_draws) ) # Return function output return draws
d467a1d5fde3eb32debca2711597ef24dc117aaa
3,646,361
def wheel(pos): """Generate rainbow colors across 0-255 positions.""" if pos>1280: pos = 1280 if pos <= 255: r = 255-pos g = 0 b = 255 else: pos = pos-256 if pos <= 255: r = 0 g = pos b = 255 else: pos = pos-256 if pos <= 255: r = 0 g = 255 b = 255-pos else: pos = pos-256 if pos <= 255: r = pos g = 255 b = 0 else: pos = pos-256 if pos <= 255: r = 255 g = 255-pos b = 0 return (r, g, b)
765df4262ce3b04fb8b06f9256ca51670e2f5bfb
3,646,363
def optimize_profile(diff_matrix, x_points, dc_init, exp_norm_profiles, display_result=True, labels=None): """ Fit the diffusion matrix Parameters ---------- diff_matrix : tuple tuple of (eigenvalues, eigenvectors) in reduced basis (dim n-1) x_points : 1-D array_like spatial coordinates dc_init : array concentration difference between endmembers exp_norm_profiles : list of arrays profiles to be fitted, of length the nb of experiments, with n profiles for each experiment. Profiles are normalized, that is, an estimation of the estimated mean concentration should be substracted. """ n_comp = len(dc_init[0]) - 1 n_exp = len(x_points) def cost_function(coeffs, x_points, dc_init, exp_norm_profiles): n_comp = len(dc_init[0]) - 1 diag = coeffs[:n_comp] n_exp = len(x_points) P = np.matrix(coeffs[n_comp: n_comp + n_comp**2].reshape((n_comp, n_comp))) adjust_cmeans = coeffs[n_comp + n_comp**2: n_comp + n_comp**2 + (n_comp) * n_exp].reshape((n_exp, n_comp)) adjust_dc = coeffs[n_comp + n_comp**2 + (n_comp) * n_exp: n_comp + n_comp**2 + 2 * (n_comp) * n_exp].reshape((n_exp, n_comp)) errors = np.array([]) for i in range(n_exp): dc_corr = np.copy(dc_init[i]) dc_corr[:-1] -= adjust_dc[i] profile_corr = np.copy(exp_norm_profiles[i]) profile_corr[:-1, :] -= adjust_cmeans[i][:, None] error = evolve_profile((diag, P), x_points[i], dc_corr, profile_corr, plot=False) errors = np.concatenate((errors, error)) return errors diag, P = diff_matrix coeffs = np.concatenate((diag, np.array(P).ravel(), np.zeros(2 * n_exp * n_comp))) res = optimize.leastsq(cost_function, coeffs, args=(x_points, dc_init, exp_norm_profiles), full_output=True, factor=10)[0] diags, eigvecs, shifts = res[:n_comp], \ res[n_comp: n_comp + n_comp**2].reshape((n_comp, n_comp)), \ res[n_comp + n_comp**2:].reshape((2, n_exp, n_comp)) if display_result: for i in range(n_exp): dc_corr = np.copy(dc_init[i]) dc_corr[:-1] -= shifts[1, i] prof_corr = np.copy(exp_norm_profiles[i]) prof_corr[:-1] -= shifts[0, i][:, None] _ = evolve_profile((diags, eigvecs), x_points[i], dc_corr, exp_norm_profiles=prof_corr, labels=labels) return diags, eigvecs, shifts
f2550f6fe4cb267559676d30ef0156ce528178cf
3,646,365
def getargsfromdoc(obj): """Get arguments from object doc""" if obj.__doc__ is not None: return getargsfromtext(obj.__doc__, obj.__name__)
d49510388be36a60259683f4560b1d01fe9f9bf6
3,646,366
def nms(dets, thresh): """Dispatch to either CPU or GPU NMS implementations.\ Accept dets as tensor""" return pth_nms(dets, thresh)
e6dbe7b44e1975c080e58d02d6e07ef22b2d3711
3,646,367
def QFont_from_Font(font): """ Convert the given Enaml Font into a QFont. Parameters ---------- font : Font The Enaml Font object. Returns ------- result : QFont The QFont instance for the given Enaml font. """ qfont = QFont(font.family, font.pointsize, font.weight) qfont.setStyle(FONT_STYLES[font.style]) qfont.setCapitalization(FONT_CAPS[font.caps]) qfont.setStretch(FONT_STRETCH[font.stretch]) return qfont
bb62daf4d46315a7a55135894dc78e1d2898fee2
3,646,370
from typing import OrderedDict def _find_in_iterable_case_insensitive(iterable, name): """ Return the value matching ``name``, case insensitive, from an iterable. """ iterable = list(OrderedDict.fromkeys([k for k in iterable])) iterupper = [k.upper() for k in iterable] try: match = iterable[iterupper.index(name.upper())] except (ValueError, AttributeError): match = None return match
548c951b08fb07251fda1b8918282462c8d0351a
3,646,371
def predict_all_points(data, order, coefficients): """ :param data: input data to create least squares prediction of order(order) of :param order: order for least squares prediction :param coefficients: coefficients of LPC :return: returns estimation of entire data set. Will be of length (len(data) - order) """ predicted_set = np.zeros((1, len(data) - order)) index = 0 for i in np.arange(order, len(data)): y = data[i - order:i] predicted_set[0][index] = np.sum(np.multiply(data[i - order:i], -coefficients)) index += 1 return predicted_set[0]
4725c735241f439bf986743cafdee0e995373966
3,646,373
def _unpack(msg, decode=True): """Unpack and decode a FETCHed message dictionary.""" if 'UID' in msg and 'BODY[]' in msg: uid = msg['UID'] body = msg['BODY[]'] if decode: idate = msg.get('INTERNALDATE', None) flags = msg.get('FLAGS', ()) return (uid, IMAP4Message(body, uid, idate, flags)) else: return (uid, body) return (None, None)
5c027dcd54d29f6d95647b66ad2d28998866dc3c
3,646,374
import logging def video_in(filename=INPUTPATH): """reads (max.20sec!) video file and stores every frame as PNG image for processing returns image name and image files (as np array?)""" #create video capture object cap = cv2.VideoCapture(filename) name = filename.split('/')[-1].split('.')[0] i=0 if (cap.isOpened()==False): logging.error('Error opening video stream or file') while(cap.isOpened()): #capture frame-by-frame ret, frame = cap.read() if ret == True: i=i+1 cv2.imshow('Frame', frame) Image.fromarray(frame).save(f"images/{name}_{i}.png") # Press Q on keyboard to exit if cv2.waitKey(25) & 0xFF == ord('q'): break # Break the loop else: break return f'Frame count of {name}: {i}'
cb82d7c6865c3bfe5f3f52f9cb7adc55a8d2e002
3,646,375
from typing import List def convert_all_timestamps(results: List[ResponseResult]) -> List[ResponseResult]: """Replace all date/time info with datetime objects, where possible""" results = [convert_generic_timestamps(result) for result in results] results = [convert_observation_timestamps(result) for result in results] return results
f81121fcd387626a2baa0ecfb342d3381f6def7f
3,646,376
def convert(s): """ Take full markdown string and swap all math spans with img. """ matches = find_inline_equations(s) + find_display_equations(s) for match in matches: full = match[0] latex = match[1] img = makeimg(latex) s = s.replace(full, img) return s
684a6be3812aad8b602631c45af407ca878f9453
3,646,377
def _amplify_ep(text): """ check for added emphasis resulting from exclamation points (up to 4 of them) """ ep_count = text.count("!") if ep_count > 4: ep_count = 4 # (empirically derived mean sentiment intensity rating increase for # exclamation points) ep_amplifier = ep_count * 0.292 return ep_amplifier
8f78a5f24aa22b5f2b4927131bfccf22ccc69ff3
3,646,379
def inline_singleton_lists(dsk): """ Inline lists that are only used once >>> d = {'b': (list, 'a'), ... 'c': (f, 'b', 1)} # doctest: +SKIP >>> inline_singleton_lists(d) # doctest: +SKIP {'c': (f, (list, 'a'), 1)} Pairs nicely with lazify afterwards """ dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk) dependents = reverse_dict(dependencies) keys = [k for k, v in dsk.items() if istask(v) and v and v[0] is list and len(dependents[k]) == 1] return inline(dsk, keys, inline_constants=False)
a4c2a8b6d96d0bfac8e9ba88a4bed301c3054f0a
3,646,380
def vegasflowplus_sampler(*args, **kwargs): """Convenience wrapper for sampling random numbers Parameters ---------- `integrand`: tf.function `n_dim`: number of dimensions `n_events`: number of events per iteration `training_steps`: number of training_iterations Returns ------- `sampler`: a reference to the generate_random_array method of the integrator class """ return sampler(VegasFlowPlus, *args, **kwargs)
1b53d83bd010a8113640858d46d66c9c0ef76ff8
3,646,381
def remove_recalculated_sectors(df, prefix='', suffix=''): """Return df with Total gas (sum of all sectors) removed """ idx = recalculated_row_idx(df, prefix='', suffix='') return df[~idx]
54272933f72d45cf555f76086c809eba14713242
3,646,382
def unparse_headers(hdrs): """Parse a dictionary of headers to a string. Args: hdrs: A dictionary of headers. Returns: The headers as a string that can be used in an NNTP POST. """ return "".join([unparse_header(n, v) for n, v in hdrs.items()]) + "\r\n"
7c06127752d0c6be19894703ba95f2e827e89b8f
3,646,383
def modify_natoms(row, BBTs, fg): """This function takes a row of a pandas data frame and calculates the new number of atoms based on the atom difference indicated in itw functional groups BBTs : list of instances of BBT class fg : instance of the Parameters class (fg parameters) returns : n_atoms (int)""" n_atoms = row['N_ATOMS'] for i in BBTs[row['BBT']].BBT: n_atoms += fg.par[i]['atom_dif'] if n_atoms < 1: return np.nan return n_atoms
2c2df3d2859d33128f982b936011c73bafb723bc
3,646,384
def recreate_cursor(collection, cursor_id, retrieved, batch_size): """ Creates and returns a Cursor object based on an existing cursor in the in the server. If cursor_id is invalid, the returned cursor will raise OperationFailure on read. If batch_size is -1, then all remaining documents on the cursor are returned. """ if cursor_id == 0: return None cursor_info = {'id': cursor_id, 'firstBatch': []} _logger.info( "collection: {0} cursor_info: {1} retrieved {2} batch_size {3}" .format(collection, cursor_id, retrieved, batch_size)) cursor = CommandCursor(collection, cursor_info, 0, retrieved=retrieved) cursor.batch_size(batch_size) return cursor
1a4987715e35f1cf09ac3046c36c752289797ee6
3,646,385
def nut00b(date1, date2): """ Wrapper for ERFA function ``eraNut00b``. Parameters ---------- date1 : double array date2 : double array Returns ------- dpsi : double array deps : double array Notes ----- The ERFA documentation is below. - - - - - - - - - - e r a N u t 0 0 b - - - - - - - - - - Nutation, IAU 2000B model. Given: date1,date2 double TT as a 2-part Julian Date (Note 1) Returned: dpsi,deps double nutation, luni-solar + planetary (Note 2) Notes: 1) The TT date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The nutation components in longitude and obliquity are in radians and with respect to the equinox and ecliptic of date. The obliquity at J2000.0 is assumed to be the Lieske et al. (1977) value of 84381.448 arcsec. (The errors that result from using this function with the IAU 2006 value of 84381.406 arcsec can be neglected.) The nutation model consists only of luni-solar terms, but includes also a fixed offset which compensates for certain long- period planetary terms (Note 7). 3) This function is an implementation of the IAU 2000B abridged nutation model formally adopted by the IAU General Assembly in 2000. The function computes the MHB_2000_SHORT luni-solar nutation series (Luzum 2001), but without the associated corrections for the precession rate adjustments and the offset between the GCRS and J2000.0 mean poles. 4) The full IAU 2000A (MHB2000) nutation model contains nearly 1400 terms. The IAU 2000B model (McCarthy & Luzum 2003) contains only 77 terms, plus additional simplifications, yet still delivers results of 1 mas accuracy at present epochs. This combination of accuracy and size makes the IAU 2000B abridged nutation model suitable for most practical applications. The function delivers a pole accurate to 1 mas from 1900 to 2100 (usually better than 1 mas, very occasionally just outside 1 mas). The full IAU 2000A model, which is implemented in the function eraNut00a (q.v.), delivers considerably greater accuracy at current dates; however, to realize this improved accuracy, corrections for the essentially unpredictable free-core-nutation (FCN) must also be included. 5) The present function provides classical nutation. The MHB_2000_SHORT algorithm, from which it is adapted, deals also with (i) the offsets between the GCRS and mean poles and (ii) the adjustments in longitude and obliquity due to the changed precession rates. These additional functions, namely frame bias and precession adjustments, are supported by the ERFA functions eraBi00 and eraPr00. 6) The MHB_2000_SHORT algorithm also provides "total" nutations, comprising the arithmetic sum of the frame bias, precession adjustments, and nutation (luni-solar + planetary). These total nutations can be used in combination with an existing IAU 1976 precession implementation, such as eraPmat76, to deliver GCRS- to-true predictions of mas accuracy at current epochs. However, for symmetry with the eraNut00a function (q.v. for the reasons), the ERFA functions do not generate the "total nutations" directly. Should they be required, they could of course easily be generated by calling eraBi00, eraPr00 and the present function and adding the results. 7) The IAU 2000B model includes "planetary bias" terms that are fixed in size but compensate for long-period nutations. The amplitudes quoted in McCarthy & Luzum (2003), namely Dpsi = -1.5835 mas and Depsilon = +1.6339 mas, are optimized for the "total nutations" method described in Note 6. The Luzum (2001) values used in this ERFA implementation, namely -0.135 mas and +0.388 mas, are optimized for the "rigorous" method, where frame bias, precession and nutation are applied separately and in that order. During the interval 1995-2050, the ERFA implementation delivers a maximum error of 1.001 mas (not including FCN). References: Lieske, J.H., Lederle, T., Fricke, W., Morando, B., "Expressions for the precession quantities based upon the IAU /1976/ system of astronomical constants", Astron.Astrophys. 58, 1-2, 1-16. (1977) Luzum, B., private communication, 2001 (Fortran code MHB_2000_SHORT) McCarthy, D.D. & Luzum, B.J., "An abridged model of the precession-nutation of the celestial pole", Cel.Mech.Dyn.Astron. 85, 37-49 (2003) Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., Francou, G., Laskar, J., Astron.Astrophys. 282, 663-683 (1994) Copyright (C) 2013-2017, NumFOCUS Foundation. Derived, with permission, from the SOFA library. See notes at end of file. """ dpsi, deps = ufunc.nut00b(date1, date2) return dpsi, deps
a5235543aca0d6de6e79878ac3db1d208d237a0d
3,646,386
def Join_Factors(*factor_data, merge_names=None, new_name=None, weight=None, style='SAST'): """合并因子,按照权重进行加总。只将非缺失的因子的权重重新归一合成。 Parameters: =========== factor_data: dataframe or tuple of dataframes merge_names: list 待合并因子名称,必须是data_frame中列的子集 new_name: str 合成因子名称 weight: list or None 待合并因子的权重 style : str, 'SAST" or 'AST' 字段、品种、时间三个维度在factor_data中的排布类型。SAST(Stack Attribute-Symbol-Time)是最常用的, 索引是Time-Symbol的MultiIndex,列是字段;AST(Attribute-Symbol-Time),Index是时间,Columns是Symbol. """ def nansum(a, w): nanind = np.isfinite(a) if np.sum(nanind) == 0.0: return np.nan return np.sum(a[nanind] * w[nanind]) / np.sum(w[nanind]) if new_name is None: new_name = 'new' if isinstance(merge_names, str): merge_names = [merge_names] if len(factor_data) == 1: if merge_names is None: factor_values = factor_data[0].values else: factor_values = factor_data[0][merge_names].values elif style == 'SAST': factor_data = align_dataframes(*factor_data) factor_values = np.hstack((x.values for x in factor_data)) else: factor_data = align_dataframes(*factor_data, axis='both') factor_values = np.stack((x.values for x in factor_data)) nfactors = factor_values.shape[1] if factor_values.ndim == 2 else factor_values.shape[0] if weight is None: weight = np.asarray([1.0 / nfactors] * nfactors) else: weight = np.asarray(weight) / np.sum(weight) if factor_values.ndim == 2: weight_array = np.tile(weight, (factor_values.shape[0],1)) na_ind = np.isnan(factor_values) weight_array[na_ind] = 0.0 weight_array = weight_array / weight_array.sum(axis=1)[:, np.newaxis] new_values = np.nansum(factor_values * weight_array, axis=1) new_values[np.all(na_ind, axis=1)] = np.nan return pd.DataFrame(new_values, index=factor_data[0].index, columns=[new_name]) else: new_values = np.apply_along_axis(nansum, 0, factor_values, w=weight) return pd.DataFrame(new_values, index=factor_data[0].index, columns=factor_data[0].columns)
95db1eda297cb8cb05a1db9b1fae9c25a034685f
3,646,387
from pathlib import Path def _check_for_file_changes(filepath: Path, config: Config) -> bool: """Returns True if a file was modified in a working dir.""" # Run 'git add' to avoid false negatives, as 'git diff --staged' is used for # detection. This is important when there are external factors that impact the # committing process (like pre-commit). _call_git(config, "add", [filepath.as_posix()]) git_diff_out = _get_git_output(config, "diff", ["--staged", filepath.as_posix()]) # If 'git diff' output is empty, the file wasn't modified. return git_diff_out != b""
c99da7e993e74f7dbe5789c48832afc59638762c
3,646,388
import time def wait_or_cancel(proc, title, message): """ Display status dialog while process is running and allow user to cancel :param proc: subprocess object :param title: title for status dialog :param message: message for status dialog :return: (process exit code, stdout output or None) """ pDialog = xbmcgui.DialogProgress() pDialog.create(title, "") while proc and proc.poll() is None and not pDialog.iscanceled(): pDialog.update(50, message) try: if not pDialog.iscanceled(): msg = proc.communicate()[0] exitcode = proc.returncode if exitcode == 0: stdout = msg pDialog.update(100, "Complete!") time.sleep(3) else: xbmcgui.Dialog().ok( "Error during {desc}".format(desc=title.lower()), msg) stdout = msg else: proc.terminate() stdout = None exitcode = 1 except: pass pDialog.close() return (exitcode, stdout)
8b60e459523933ee205210d4761b6b7d9d8acbfb
3,646,389
def getg_PyInteractiveBody_one_in_two_out(): """Return a graph that has a PyInteractiveBody with one input and two outputs. """ @dl.Interactive( [("num", dl.Int(dl.Size(32)))], [('num_out', dl.Int(dl.Size(32))), ('val_out', dl.Bool())] ) def interactive_func(node: dl.PythonNode): for _ in range(2): num = node.receive("num") print(f"received num: {num}") node.send(num_out=None, val_out=False) node.send(num_out=14, val_out=False) s0 = dl.lib.StateSaver(bool, condition=lambda x: x) s1 = dl.lib.StateSaver(int, verbose=True) with dl.DeltaGraph() as graph: int_func = interactive_func.call(4) s0.save_and_exit_if(int_func.val_out) s1.save_and_exit(int_func.num_out) return graph
31af32a5ece2f4c76635a8f37a0ac644c5f0e364
3,646,390
def batch_norm_relu(inputs, is_training): """Performs a batch normalization followed by a ReLU.""" # We set fused=True for a performance boost. inputs = tf.layers.batch_normalization( inputs=inputs, axis=FLAGS.input_layout.find('C'), momentum=FLAGS.batch_norm_decay, epsilon=FLAGS.batch_norm_epsilon, center=True, scale=True, training=is_training, fused=FLAGS.use_fused_batchnorm) return tf.nn.relu(inputs)
ab771b9d8747bc27d747dd9dce42a6bc9a1d59d3
3,646,391
def knn(points, p, k): """ Calculates the k nearest neighbours of a point. :param points: list of points :param p: reference point :param k: amount of neighbours :return: list of k neighbours """ return sorted(points, key=lambda x: distance(p, x))[:k]
e1a806cd4c16b5ecbf66301406dafeb2b12c46db
3,646,392
def ruleset_detail(request, slug): """ View for return the specific ruleset that user pass by using its slug in JSON format. :param request: WSGI request from user :return: Specific ruleset metadata in JSON format. """ # try to fetch ruleset from database try: ruleset = Ruleset.objects.get(slug=slug) except Ruleset.DoesNotExist: return HttpResponse(status=404) if request.method == 'GET': serializer = RulesetSerializer(ruleset) return JsonResponse(serializer.data)
a122a2e20641a13d6a934c0261f199ff304ae622
3,646,393
import requests import json def send_slack_notification(message): """ Send slack notification Arguments: message {string} -- Slack notification message Returns: response {Response} -- Http response object """ response = requests.post( SLACK_WEBHOOK, data=json.dumps( { "text": message, "username": USERNAME, "channel": CHANNEL, "icon_emoji": ICON_EMOJI, } ), ) return response
6c5f0e51c1bfce19ff9a4aec77c1e4c98cd359fa
3,646,394
def method_detect(method: str): """Detects which method to use and returns its object""" if method in POSTPROCESS_METHODS: if method == "rtb-bnb": return RemovingTooTransparentBordersHardAndBlurringHardBorders() elif method == "rtb-bnb2": return RemovingTooTransparentBordersHardAndBlurringHardBordersTwo() else: return None else: return False
cb1dafba5a7c225c093ab602c6e383cb7f499bba
3,646,396
def approve_pipelines_for_publishing(pipeline_ids): # noqa: E501 """approve_pipelines_for_publishing # noqa: E501 :param pipeline_ids: Array of pipeline IDs to be approved for publishing. :type pipeline_ids: List[] :rtype: None """ return util.invoke_controller_impl()
585d4972955e240f146c3d06d5a181dcad36d111
3,646,397
def get_x(document_id, word2wid, corpus_termfrequency_vector): """ Get the feature vector of a document. Parameters ---------- document_id : int word2wid : dict corpus_termfrequency_vector : list of int Returns ------- list of int """ word_list = list(reuters.words(document_id)) word_count = float(len(word_list)) assert word_count > 0 document_tf_vec = get_termfrequency_vector(word2wid, word_list) x = [] for i, wd_count in enumerate(document_tf_vec): x.append(wd_count / (word_count * corpus_termfrequency_vector[i])) return x
fca6e5a6071a6b48b83effb37d3b77a88ddf4046
3,646,398
def process_chain_of_trust(host: str, image: Image, req_delegations: list): """ Processes the whole chain of trust, provided by the notary server (`host`) for any given `image`. The 'root', 'snapshot', 'timestamp', 'targets' and potentially 'targets/releases' are requested in this order and afterwards validated, also according to the `policy_rule`. Returns the the signed image targets, which contain the digests. Raises `NotFoundExceptions` should no required delegetions be present in the trust data, or no image targets be found. """ tuf_roles = ["root", "snapshot", "timestamp", "targets"] trust_data = {} key_store = KeyStore() # get all trust data and collect keys (from root and targets), as well as # hashes (from snapshot and timestamp) for role in tuf_roles: trust_data[role] = get_trust_data(host, image, TUFRole(role)) key_store.update(trust_data[role]) # if the 'targets.json' has delegation roles defined, get their trust data # as well if trust_data["targets"].has_delegations(): for delegation in trust_data["targets"].get_delegations(): trust_data[delegation] = get_trust_data(host, image, TUFRole(delegation)) # validate all trust data's signatures, expiry dates and hashes for role in trust_data: trust_data[role].validate(key_store) # validate needed delegations if req_delegations: if trust_data["targets"].has_delegations(): delegations = trust_data["targets"].get_delegations() req_delegations_set = set(req_delegations) delegations_set = set(delegations) delegations_set.discard("targets/releases") # make an intersection between required delegations and actually # present ones if not req_delegations_set.issubset(delegations_set): missing = list(req_delegations_set - delegations_set) raise NotFoundException( "could not find delegation roles {} in trust data.".format( str(missing) ) ) else: raise NotFoundException("could not find any delegations in trust data.") # if certain delegations are required, then only take the targets fields of the # required delegation JSON's. otherwise take the targets field of the targets JSON, as # long as no delegations are defined in the targets JSON. should there be delegations # defined in the targets JSON the targets field of the releases JSON will be used. if req_delegations: image_targets = [ trust_data[target_role].signed.get("targets", {}) for target_role in req_delegations ] else: targets_key = ( "targets/releases" if trust_data["targets"].has_delegations() else "targets" ) image_targets = [trust_data[targets_key].signed.get("targets", {})] if not any(image_targets): raise NotFoundException("could not find any image digests in trust data.") return image_targets
391024aeaa814f3159c8f45a925afce105b7b339
3,646,399
import struct def collect_js( deps, closure_library_base = None, has_direct_srcs = False, no_closure_library = False, css = None): """Aggregates transitive JavaScript source files from unfurled deps.""" srcs = [] direct_srcs = [] ijs_files = [] infos = [] modules = [] descriptors = [] stylesheets = [] js_module_roots = [] has_closure_library = False for dep in deps: srcs += [getattr(dep.closure_js_library, "srcs", depset())] ijs_files += [getattr(dep.closure_js_library, "ijs_files", depset())] infos += [getattr(dep.closure_js_library, "infos", depset())] modules += [getattr(dep.closure_js_library, "modules", depset())] descriptors += [getattr(dep.closure_js_library, "descriptors", depset())] stylesheets += [getattr(dep.closure_js_library, "stylesheets", depset())] js_module_roots += [getattr(dep.closure_js_library, "js_module_roots", depset())] has_closure_library = ( has_closure_library or getattr(dep.closure_js_library, "has_closure_library", False) ) if no_closure_library: if has_closure_library: fail("no_closure_library can't be used when Closure Library is " + "already part of the transitive closure") elif has_direct_srcs and not has_closure_library: direct_srcs += closure_library_base has_closure_library = True if css: direct_srcs += closure_library_base + [css.closure_css_binary.renaming_map] return struct( srcs = depset(direct_srcs, transitive = srcs), js_module_roots = depset(transitive = js_module_roots), ijs_files = depset(transitive = ijs_files), infos = depset(transitive = infos), modules = depset(transitive = modules), descriptors = depset(transitive = descriptors), stylesheets = depset(transitive = stylesheets), has_closure_library = has_closure_library, )
7a243401280646103522ed339ff20c35f05e031d
3,646,400
import termios import struct import fcntl def send_control(uuid, type, data): """ Sends control data to the terminal, as for example resize events """ sp = sessions[uuid] if type == 'resize': winsize = struct.pack("HHHH", data['rows'], data['cols'], 0, 0) fcntl.ioctl(sp['ptymaster'].fileno(), termios.TIOCSWINSZ, winsize) return True else: serverboards.warning("Unknown control type: %s" % (type)) return False
262ef0ccffac80c0293d1446eb0e38e50b2ce687
3,646,401
def dgausscdf(x): """ Derivative of the cumulative distribution function for the normal distribution. """ return gausspdf(x)
e968f20ca28555eb50d5766440c5f3f47522c1ff
3,646,404
import tqdm def model_datasets_to_rch(gwf, model_ds, print_input=False): """convert the recharge data in the model dataset to a recharge package with time series. Parameters ---------- gwf : flopy.mf6.modflow.mfgwf.ModflowGwf groundwater flow model. model_ds : xr.DataSet dataset containing relevant model grid information print_input : bool, optional value is passed to flopy.mf6.ModflowGwfrch() to determine if input should be printed to the lst file. Default is False Returns ------- rch : flopy.mf6.modflow.mfgwfrch.ModflowGwfrch recharge package """ # check for nan values if model_ds['recharge'].isnull().any(): raise ValueError('please remove nan values in recharge data array') # get stress period data if model_ds.steady_state: mask = model_ds['recharge'] != 0 if model_ds.gridtype == 'structured': rch_spd_data = mdims.data_array_2d_to_rec_list( model_ds, mask, col1='recharge', first_active_layer=True, only_active_cells=False) elif model_ds.gridtype == 'vertex': rch_spd_data = mdims.data_array_1d_vertex_to_rec_list( model_ds, mask, col1='recharge', first_active_layer=True, only_active_cells=False) # create rch package rch = flopy.mf6.ModflowGwfrch(gwf, filename=f'{gwf.name}.rch', pname=f'{gwf.name}', fixed_cell=False, maxbound=len(rch_spd_data), print_input=True, stress_period_data={0: rch_spd_data}) return rch # transient recharge if model_ds.gridtype == 'structured': empty_str_array = np.zeros_like(model_ds['idomain'][0], dtype="S13") model_ds['rch_name'] = xr.DataArray(empty_str_array, dims=('y', 'x'), coords={'y': model_ds.y, 'x': model_ds.x}) model_ds['rch_name'] = model_ds['rch_name'].astype(str) # dimension check if model_ds['recharge'].dims == ('time', 'y', 'x'): axis = 0 rch_2d_arr = model_ds['recharge'].data.reshape( (model_ds.dims['time'], model_ds.dims['x'] * model_ds.dims['y'])).T # check if reshaping is correct if not (model_ds['recharge'].values[:, 0, 0] == rch_2d_arr[0]).all(): raise ValueError( 'reshaping recharge to calculate unique time series did not work out as expected') elif model_ds['recharge'].dims == ('y', 'x', 'time'): axis = 2 rch_2d_arr = model_ds['recharge'].data.reshape( (model_ds.dims['x'] * model_ds.dims['y'], model_ds.dims['time'])) # check if reshaping is correct if not (model_ds['recharge'].values[0, 0, :] == rch_2d_arr[0]).all(): raise ValueError( 'reshaping recharge to calculate unique time series did not work out as expected') else: raise ValueError('expected dataarray with 3 dimensions' f'(time, y and x) or (y, x and time), not {model_ds["recharge"].dims}') rch_unique_arr = np.unique(rch_2d_arr, axis=0) rch_unique_dic = {} for i, unique_rch in enumerate(rch_unique_arr): model_ds['rch_name'].data[np.isin( model_ds['recharge'].values, unique_rch).all(axis=axis)] = f'rch_{i}' rch_unique_dic[f'rch_{i}'] = unique_rch mask = model_ds['rch_name'] != '' rch_spd_data = mdims.data_array_2d_to_rec_list(model_ds, mask, col1='rch_name', first_active_layer=True, only_active_cells=False) elif model_ds.gridtype == 'vertex': empty_str_array = np.zeros_like(model_ds['idomain'][0], dtype="S13") model_ds['rch_name'] = xr.DataArray(empty_str_array, dims=('cid'), coords={'cid': model_ds.cid}) model_ds['rch_name'] = model_ds['rch_name'].astype(str) # dimension check if model_ds['recharge'].dims == ('cid', 'time'): rch_2d_arr = model_ds['recharge'].values elif model_ds['recharge'].dims == ('time', 'cid'): rch_2d_arr = model_ds['recharge'].values.T else: raise ValueError('expected dataarray with 2 dimensions' f'(time, cid) or (cid, time), not {model_ds["recharge"].dims}') rch_unique_arr = np.unique(rch_2d_arr, axis=0) rch_unique_dic = {} for i, unique_rch in enumerate(rch_unique_arr): model_ds['rch_name'][(rch_2d_arr == unique_rch).all( axis=1)] = f'rch_{i}' rch_unique_dic[f'rch_{i}'] = unique_rch mask = model_ds['rch_name'] != '' rch_spd_data = mdims.data_array_1d_vertex_to_rec_list(model_ds, mask, col1='rch_name', first_active_layer=True, only_active_cells=False) # create rch package rch = flopy.mf6.ModflowGwfrch(gwf, filename=f'{gwf.name}.rch', pname='rch', fixed_cell=False, maxbound=len(rch_spd_data), print_input=print_input, stress_period_data={0: rch_spd_data}) # get timesteps tdis_perioddata = mfpackages.get_tdis_perioddata(model_ds) perlen_arr = [t[0] for t in tdis_perioddata] time_steps_rch = [0.0] + np.array(perlen_arr).cumsum().tolist() # create timeseries packages for i, key in tqdm(enumerate(rch_unique_dic.keys()), total=len(rch_unique_dic.keys()), desc="Building ts packages rch"): # add extra time step to the time series object (otherwise flopy fails) recharge_val = list(rch_unique_dic[key]) + [0.0] recharge = list(zip(time_steps_rch, recharge_val)) if i == 0: rch.ts.initialize(filename=f'{key}.ts', timeseries=recharge, time_series_namerecord=key, interpolation_methodrecord='stepwise') else: rch.ts.append_package(filename=f'{key}.ts', timeseries=recharge, time_series_namerecord=key, interpolation_methodrecord='stepwise') return rch
b32442c508e17205737ddb8168fe323b57cfbb2f
3,646,406
from typing import List from datetime import datetime def create_events_to_group( search_query: str, valid_events: bool, group: Group, amount: int = 1, venue: bool = False, ) -> List[Event]: """ Create random test events and save them to a group Arguments: search_query {str} -- use query param for the search request valid_events {bool} -- should the groups searchable by the the query term group {Group} -- group to at the events Keyword Arguments: amount {int} -- how many events should be created (default: {1}) venue {bool} -- if venue should be added to eventa (default: {False}) Returns: List[Event] -- created & saved events """ created_events: List[Event] = [] for i in range(0, amount): event_name: str = random_string(search_query=search_query, valid=valid_events) event: Event = Event( meetup_id=event_name, time=datetime.now(), name=event_name, link="http://none", date_in_series_pattern=False, ) if venue: event.venue_name = event_name event.venue_location = {"lat": i + 1, "lon": i + 1} created_events.append(event) group.add_events(events=created_events) group.save() sleep(1) return created_events
31045c8f9311d677d766d87ed9fc1d6848cc210d
3,646,407
def alt_stubbed_receiver() -> PublicKey: """Arbitrary known public key to be used as reciever.""" return PublicKey("J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98")
c07461fc060f9dc637e93cadd32604aae892f924
3,646,408
import base64 def create_api_headers(token): """ Create the API header. This is going to be sent along with the request for verification. """ auth_type = 'Basic ' + base64.b64encode(bytes(token + ":")).decode('ascii') return { 'Authorization': auth_type, 'Accept': 'application/json', 'Content-Type': 'application/json' }
41ba1e22898dab2d42dde52e4458abc40640e957
3,646,409
def _combine(bundle, transaction_managed=False, rollback=False, use_reversion=True): """ Returns one sreg and DHCP output for that SREG. If rollback is True the sreg will be created and then rolleback, but before the rollback all its HWAdapters will be polled for their DHCP output. """ bundle['errors'] = None bundle['old-dhcp-output'] = get_all_dhcp_for_system(bundle['system']) sreg = StaticReg( label=bundle['a'].label, domain=bundle['a'].domain, ip_str=bundle['ip'], system=bundle['system'], description='Migrated SREG', ip_type=bundle['a'].ip_type ) try: bundle['new-dhcp-output'] = ( "<span class='no-dhcp-output'>No new DHCP output</span>" ) view_names = [v.name for v in bundle['a'].views.all()] try: bundle['a'].delete(check_cname=False, call_prune_tree=False) except ValidationError, e: rollback = True bundle['errors'] = 'Error while deleting the A record.' + str(e) return try: bundle['ptr'].delete() except ValidationError, e: rollback = True bundle['errors'] = 'Error while deleting the PTR record.' + str(e) return try: sreg.save() for name in view_names: sreg.views.add(View.objects.get(name=name)) if use_reversion: reversion.set_comment('Migrated via combine()') except ValidationError, e: rollback = True bundle['errors'] = 'Error while creating the SREG record.' + str(e) return for nic in bundle['hwadapters']: hw_info, kvs = nic.emit_hwadapter() if not hw_info['mac']: rollback = True return try: hw, _ = HWAdapter.objects.get_or_create( sreg=sreg, mac=hw_info['mac'] ) # HWAdapter class does this for us. #hw.name = hw_info['name'].replace hw.save() except ValidationError, e: rollback = True bundle['errors'] = 'Error while creating HW Adapter' return try: for kv in kvs: if kv['key'] in ('hostname', 'option_hostname'): # If the option host-name value matches the SREG fqdn # we don't need to add the option, it will be added by # default. all other cases it will be overriden. if kv['value'] == sreg.fqdn: continue else: key = 'host_name' else: key = kv['key'] if HWAdapterKeyValue.objects.filter(key=key, obj=hw).exists(): pass else: kv_ = HWAdapterKeyValue( key=key, value=kv['value'], obj=hw ) kv_.clean() kv_.save() for kv in nic._nic: SystemKeyValue.objects.filter(pk=kv.pk).delete() except ValidationError, e: transaction.rollback() bundle['errors'] = ( 'Error while creating HW Adapter KeyValue. ' + str(e) ) return bundle['new-dhcp-output'] = get_all_dhcp_for_system(bundle['system']) return sreg finally: if not transaction_managed: if rollback: transaction.rollback() else: transaction.commit()
0171e804e4f10167d85e92608a09bca55308edfa
3,646,410
def get_node_session(*args, **kwargs): """Creates a NodeSession instance using the provided connection data. Args: *args: Variable length argument list with the connection data used to connect to the database. It can be a dictionary or a connection string. **kwargs: Arbitrary keyword arguments with connection data used to connect to the database. Returns: mysqlx.XSession: XSession object. """ settings = _get_connection_settings(*args, **kwargs) if "routers" in settings: raise InterfaceError("NodeSession expects only one pair of host and port") return NodeSession(settings)
bb992b7e49a698dfb7b54b1492616913a6b5df27
3,646,411
def edit_role(payload, search_term): """Find and edit the role.""" role = Role.query.get(search_term) # if edit request == stored value if not role: return response_builder(dict(status="fail", message="Role does not exist."), 404) try: if payload["name"] == role.name: return response_builder(dict( data=dict(path=role.serialize()), message="No change specified." ), 200) else: old_role_name = role.name role.name = payload["name"] role.save() return response_builder(dict( data=dict(path=role.serialize()), message="Role {} has been changed" " to {}.".format(old_role_name, role.name) ), 200) except KeyError: return response_builder( dict(status="fail", message="Name to edit to must be provided."), 400)
8690c8fc1c1aea5245d9cef540c355a2903a8484
3,646,413
def use_redis_cache(key, ttl_sec, work_func): """Attemps to return value by key, otherwise caches and returns `work_func`""" redis = redis_connection.get_redis() cached_value = get_pickled_key(redis, key) if cached_value: return cached_value to_cache = work_func() pickle_and_set(redis, key, to_cache, ttl_sec) return to_cache
a2c631466aef18c7bb640b17e57421e257ad7314
3,646,414
def counting_sort(array, low, high): """Razeni pocitanim (CountingSort). Seradte zadane pole 'array' pricemz o poli vite, ze se v nem nachazeji pouze hodnoty v intervalu od 'low' po 'high' (vcetne okraju intervalu). Vratte serazene pole. """ counts = [0 for i in range(high - low + 1)] for elem in array: counts[elem - low] += 1 current = 0 for i in range(high - low + 1): for j in range(current, current + counts[i]): array[j] = i + low current += counts[i] return array
bd4ccccdb24786ec3f3d867afe1adf340c9e53b5
3,646,415
import re def normalize_archives_url(url): """ Normalize url. will try to infer, find or guess the most useful archives URL, given a URL. Return normalized URL, or the original URL if no improvement is found. """ # change new IETF mailarchive URLs to older, still available text .mail archives new_ietf_exp = re.compile( "https://mailarchive\\.ietf\\.org/arch/search/" "\\?email_list=(?P<list_name>[\\w-]+)" ) ietf_text_archives = ( r"https://www.ietf.org/mail-archive/text/\g<list_name>/" ) new_ietf_browse_exp = re.compile( r"https://mailarchive.ietf.org/arch/browse/(?P<list_name>[\w-]+)/?" ) match = new_ietf_exp.match(url) if match: return re.sub(new_ietf_exp, ietf_text_archives, url) match = new_ietf_browse_exp.match(url) if match: return re.sub(new_ietf_browse_exp, ietf_text_archives, url) return url
e8a5351af28338c77c3e94fdf2b81e22c7a6edfd
3,646,416
def getIsolatesFromIndices(indices): """ Extracts the isolates from the indices of a df_X. :param pandas.index indices: cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP :return dict: keyed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP values correspond to rows element in the index """ keys = [n for n in indices.names] result = {} for idx, key in enumerate(keys): result[key] = [v[idx] for v in indices.values] return result
4e9200c722ce0c478d13eddcc799f4a8f7cab6db
3,646,418
def save_group_geo_org(user_id, group_id, area_id, org_unit_id): """Method for attaching org units and sub-counties.""" try: if org_unit_id: geo_org_perm, ctd = CPOVCUserRoleGeoOrg.objects.update_or_create( user_id=user_id, group_id=group_id, org_unit_id=org_unit_id, is_void=False, defaults={'area_id': area_id, 'org_unit_id': org_unit_id, 'user_id': user_id, 'group_id': group_id, 'is_void': False},) geo_org_perm, ctd = CPOVCUserRoleGeoOrg.objects.update_or_create( user_id=user_id, group_id=group_id, area_id=area_id, is_void=False, defaults={'area_id': area_id, 'org_unit_id': org_unit_id, 'user_id': user_id, 'group_id': group_id, 'is_void': False},) except Exception, e: error = 'Error searching org unit -%s' % (str(e)) print error return None else: return geo_org_perm, ctd
ed7750760405e12f790454e247e54917184e7044
3,646,419
def tf_efficientnet_lite0(pretrained=False, **kwargs): """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model
49ea1c68f168ad613222808e2fbb1ead52190243
3,646,420
import ast from typing import Optional def get_qualname(node: ast.AST) -> Optional[str]: """ If node represents a chain of attribute accesses, return is qualified name. """ parts = [] while True: if isinstance(node, ast.Name): parts.append(node.id) break elif isinstance(node, ast.Attribute): parts.append(node.attr) node = node.value else: return None return '.'.join(reversed(parts))
0d08b25a50b7d159f5df3b0b17282725eb748f38
3,646,422
def traceUsage(addr, register, steps): """ Given a start address, a register which holds a value and the number of steps, this function disassembles forward #steps instructions and traces the value of <register> until it is used in a call instruction. It then returns the offset added to <register> and the address of the call Note that this tracing is very basic and does neither handle multiple registers at the same time nor any other modification than adding constants e.g.: 00401622 mov eax, g_IAT //start at addr = 0x00401622, register = "eax" 00401627 mov ecx, [eax+0Ch] //trace ecx, forget eax from now on. Save offset "0x0C" 0040162A push edx //ignore 0040162B call ecx //return offset 0x0c and address 0x0040162B """ potentialOffset = -1 localRegister = register for step in range(steps): addr = NextHead(addr) dis = GetMnem(addr) if dis == 'mov' and localRegister in GetOpnd(addr,1): #look for e.g."mov eax, [<register>+1CCh]" potentialOffset = GetOpnd(addr,1) if potentialOffset[0] != '[' or potentialOffset[-1] != ']': #"[<register>+1CCh]" continue potentialOffset = potentialOffset[1:-1] #"<register>+1CCh" if '+' in potentialOffset: #we might have had "mov ecx, [eax]", so there is no plus potentialOffset = potentialOffset.split(register+'+')[1] # "1CCh" else: potentialOffset = "0" if potentialOffset.endswith('h'): potentialOffset = int(potentialOffset[:-1], 16) / 4 #"1cc" else: potentialOffset = int(potentialOffset) / 4 localRegister = GetOpnd(addr,0) #get new register to search for upcoming call-instruction elif dis == 'call' and GetOpnd(addr,0) == localRegister: return potentialOffset, addr if potentialOffset != -1: print "[-] Error: Got potentialOffset %s but no corresponding call - maybe increase the steps range?" % (str(potentialOffset)) return -1, -1
78c805af660b5e98348de1bd1ae4b7ce9a57238b
3,646,423
def array3d (surface): """pygame.surfarray.array3d (Surface): return array Copy pixels into a 3d array. Copy the pixels from a Surface into a 3D array. The bit depth of the surface will control the size of the integer values, and will work for any type of pixel format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ global numpysf try: return numpysf.array3d(surface) except AttributeError: return numpysf.array3d(surface)
a2079a540453d5ba69f5b10e292341ef6fcfb972
3,646,425
import torch def masked_kl_div(input, target, mask): """Evaluate masked KL divergence between input activations and target distribution. Parameters: input (tensor) - NxD batch of D-dimensional activations (un-normalized log distribution). target (tensor) - NxD normalized target distribution. mask (tensor, torch.bool) - NxD mask of elements to include in calculation. Returns: Nx1 tensor of cross-entropy calculation results. """ input = input.clone() input[~mask] = -float('inf') log_q = F.log_softmax(input, dim=1) log_q[~mask] = 0 log_p = torch.log(target) log_p[~mask] = 0 KLi = target * (log_p - log_q) KLi[target == 0] = 0 KL = torch.sum(KLi, dim=1, keepdim=True) return KL
afdd704bac7caabd7d0cbbd2599af6c1a440ae1c
3,646,426
def find_peaks(ts, mindist=100): """ Find peaks in time series :param ts: :return: """ extreme_value = -np.inf extreme_idx = 0 peakvalues = [] peaktimes = [] find_peak = True idx = 0 for r in ts.iteritems(): # print(r) if find_peak: # look for maximum if r[1] > extreme_value: # update current maximum point extreme_value = r[1] extreme_idx = idx elif r[1] + mindist < extreme_value: # consider current maximum a peak peakvalues.append(extreme_value) peaktimes.append(extreme_idx) # update current maximum extreme_value = r[1] extreme_idx = idx find_peak = False else: # look for minimum if r[1] < extreme_value: # update value extreme_value = r[1] extreme_idx = idx elif r[1] - mindist > extreme_value: extreme_value = r[1] extreme_idx = idx find_peak = True idx += 1 return peakvalues, peaktimes
5f4dbf0b6c9e4e8961c14b1ba255ebcdf210c50b
3,646,428
def get_flanking_seq(genome, scaffold, start, end, flanking_length): """ Get flanking based on Blast hit """ for rec in SeqIO.parse(genome, "fasta"): if rec.id == scaffold: return str( rec.seq[int(start) - int(flanking_length) : int(end) + int(flanking_length)] )
509002a7099ad62b0449e1c5de9a1a7dd875bc0c
3,646,430
import re def d(vars): """List of variables starting with string "df" in reverse order. Usage: d(dir()) @vars list of variables output by dir() command """ list_of_dfs = [item for item in vars if (item.find('df') == 0 and item.find('_') == -1 and item != 'dfs')] list_of_dfs.sort(key=lambda x:int(re.sub("[^0-9]", "", x.replace('df',''))) if len(x) > 2 else 0, reverse=True) return list_of_dfs
4961ae70a61e45b81e06e55ee9553ff61fd45d18
3,646,431
import inspect def get_class_namespaces(cls: type) -> tuple[Namespace, Namespace]: """ Return the module a class is defined in and its internal dictionary Returns: globals, locals """ return inspect.getmodule(cls).__dict__, cls.__dict__ | {cls.__name__: cls}
46f275bcc328d9ca87ffdebf616d42096705d3fb
3,646,432
from .io import select_driver def write_stream(path, sync=True, *args, **kwargs): """Creates a writer object (context manager) to write multiple dataframes into one file. Must be used as context manager. Parameters ---------- path : str, filename or path to database table sync : bool, default True Set to `False` to run the writer in the background process. args, kwargs : parameters passed to writer driver (see erde.io modules) Example: with write_stream('/tmp/my_file.gpkg') as write: for df in data_generator(): write(df) """ dr, pm = select_driver(path) return dr.write_stream(path, sync=sync, *args, **kwargs)
8e2274e102b60b139b6e40f425682d06268e10a5
3,646,433
from typing import Dict def diff( df: DataFrame, columns: Dict[str, str], periods: int = 1, axis: PandasAxis = PandasAxis.ROW, ) -> DataFrame: """ Calculate row-by-row or column-by-column difference for select columns. :param df: DataFrame on which the diff will be based. :param columns: columns on which to perform diff, mapping source column to target column. For instance, `{'y': 'y'}` will replace the column `y` with the diff value in `y`, while `{'y': 'y2'}` will add a column `y2` based on diff values calculated from `y`, leaving the original column `y` unchanged. :param periods: periods to shift for calculating difference. :param axis: 0 for row, 1 for column. default 0. :return: DataFrame with diffed columns :raises QueryObjectValidationError: If the request in incorrect """ df_diff = df[columns.keys()] df_diff = df_diff.diff(periods=periods, axis=axis) return _append_columns(df, df_diff, columns)
38ed83fc7e1847a2c9e31abb217990becc1bc04f
3,646,434
import base64 def decodeTx(data: bytes) -> Transaction: """Function to convert base64 encoded data into a transaction object Args: data (bytes): the data to convert Returns a transaction object """ data = base64.b64decode(data) if data[:1] != tx_flag: return None timestamp = float(data[1:21].decode('utf-8')) hash = data[21:53].hex() script_sig = data[53:117].hex() inputs = [] outputs = [] io = data[117:].split(array_flag) for x in io: if x[:1] == tx_in: pub_key = x[1:34].hex() sig = x[34:98].hex() utxoRef = x[98:].decode('utf-8') inputs.append(Input(utxoRef, pub_key, sig)) elif x[:1] == tx_out: addr = x[1:35].decode('utf-8') amount = float(x[35:].decode('utf-8')) outputs.append(Output(addr, amount)) tx = Transaction(inputs, outputs) tx.timestamp = timestamp tx.hash = hash tx.script_sig = script_sig return tx
da52e9dcb641d2986fa47d15f9da8d1edea28659
3,646,435
def create_package_from_datastep(table): """Create an importable model package from a score code table. Parameters ---------- table : swat.CASTable The CAS table containing the score code. Returns ------- BytesIO A byte stream representing a ZIP archive which can be imported. See Also -------- :meth:`model_repository.import_model_from_zip <.ModelRepository.import_model_from_zip>` """ assert 'DataStepSrc' in table.columns sess = table.session.get_connection() dscode = table.to_frame().loc[0, 'DataStepSrc'] file_metadata = [{'role': 'score', 'name': 'dmcas_scorecode.sas'}] zip_file = _build_zip_from_files({ 'fileMetadata.json': file_metadata, 'dmcas_scorecode.sas': dscode }) return zip_file
0874f1a755ed73af09091a7c0f1b3fb3e5e861e4
3,646,436
def _test_diff(diff: list[float]) -> tuple[float, float, float]: """Последовательный тест на медианную разницу с учетом множественного тестирования. Тестирование одностороннее, поэтому p-value нужно умножить на 2, но проводится 2 раза. """ _, upper = seq.median_conf_bound(diff, config.P_VALUE / population.count()) return float(np.median(diff)), upper, np.max(diff)
024d0eaba612361e4fef39839bfd31474d5be5a6
3,646,437
def get_repo_of_app_or_library(app_or_library_name): """ This function takes an app or library name and will return the corresponding repo for that app or library""" specs = get_specs() repo_name = specs.get_app_or_lib(app_or_library_name)['repo'] if not repo_name: return None return Repo(repo_name)
72c0349354fdc11da3ff16f2dfa3126eb02fa381
3,646,438
from datetime import datetime def get_index_price_change_by_ticker(fromdate: str, todate: str, market: str="KOSPI") -> DataFrame: """입력된 기간동안의 전체 지수 등락률 Args: fromdate (str ): 조회 시작 일자 (YYMMDD) todate (str ): 조회 종료 일자 (YYMMDD) market (str, optional): 조회 시장 (KOSPI/KOSDAQ/RKX/테마) Returns: DataFrame: >> get_index_price_change_by_ticker("20210101", "20210130") 시가 종가 등락률 거래량 거래대금 지수명 코스피 2873.47 3152.18 9.703125 7162398637 149561467924511 코스피 200 389.29 430.22 10.507812 2221276866 119905899468167 코스피 100 2974.06 3293.96 10.757812 1142234783 95023508273187 코스피 50 2725.20 3031.59 11.242188 742099360 79663247553065 코스피 200 중소형주 1151.78 1240.92 7.738281 1079042083 24882391194980 """ if isinstance(fromdate, datetime.datetime): fromdate = _datetime2string(fromdate) if isinstance(todate, datetime.datetime): todate = _datetime2string(todate) fromdate = fromdate.replace("-", "") todate = todate.replace("-", "") # KRX 웹 서버의 제약으로 인한 영업일 검사 fromdate = get_nearest_business_day_in_a_week(fromdate, prev=False) todate = get_nearest_business_day_in_a_week(todate) return krx.get_index_price_change_by_ticker(fromdate, todate, market)
6d65ffeaccd1e5fe307e1e5387e413db3c2eb5fe
3,646,439
def axpy(alpha, x, y, stream=None): """y <- alpha*x + y """ global _blas if not isinstance(alpha, Number): raise ValueError('alpha is not a numeric type') validate_argument_dtype(x, 'x') validate_argument_dtype(y, 'y') if not _blas: _blas = Blas() _blas.stream = stream dtype = promote(promote(type(alpha), x.dtype), y.dtype) yf = colmajor(y, dtype, 'y') _blas.axpy(dtype.type(alpha), x.astype(dtype), yf) if y.dtype == yf.dtype and not alias(y, yf): y[:] = yf return y else: return yf
10b8c46b1fc160d637241750c408957b8f184ee9
3,646,440
def _unenroll_get_hook(app_context): """Add field to unenroll form offering data removal, if policy supports.""" removal_policy = _get_removal_policy(app_context) return removal_policy.add_unenroll_additional_fields(app_context)
6c8e6a06d45fecfa8828ce8a24ca9e1e910b1e9c
3,646,441
from typing import Union def query_fetch_bom_df(search_key: str, size: int) -> Union[pd.DataFrame, None]: """Fetch and return bom dataframe of the article Runs recursive query on database to fetch the bom. """ # Recursive query raw_query = f"""WITH cte AS ( SELECT * FROM [{DB_NAME}].[dbo].[{SQL_T_BOM}] WHERE father = '{search_key}' UNION ALL SELECT p.* FROM [{DB_NAME}].[dbo].[{SQL_T_BOM}] p INNER JOIN cte ON cte.child = p.father WHERE cte.child Like '%{size}' OR cte.child Like '%l' OR cte.child Like '%g' OR cte.child Like '%x' OR cte.child Like '%b' OR cte.child Like '%r' OR cte.child Like '%k' OR cte.child Like '%c' OR cte.child Like '4-pux%' OR cte.child Like '4-cca-ang%' ) SELECT * FROM cte ORDER BY cte.process_order, cte.father, cte.child option (maxrecursion 100);""" df = None try: df = pd.read_sql(raw_query, engine) except Exception as e: df = None return df
753f0378590df1c2b3e50f7bad8d2b15490ae488
3,646,442
def zscore(collection, iteratee=None): """Calculate the standard score assuming normal distribution. If iteratee is passed, each element of `collection` is passed through a iteratee before the standard score is computed. Args: collection (list|dict): Collection to process. iteratee (mixed, optional): Iteratee applied per iteration. Returns: float: Calculated standard score. Example: >>> results = zscore([1, 2, 3]) # [-1.224744871391589, 0.0, 1.224744871391589] .. versionadded:: 2.1.0 """ array = pyd.map_(collection, iteratee) avg = mean(array) sig = std_deviation(array) return pyd.map_(array, lambda item: (item - avg) / sig)
a813295f6cce309b936b94a9d70f082f435a4b89
3,646,443
from typing import Tuple def AND( *logicals: Tuple[func_xltypes.XlExpr] ) -> func_xltypes.XlBoolean: """Determine if all conditions in a test are TRUE https://support.office.com/en-us/article/ and-function-5f19b2e8-e1df-4408-897a-ce285a19e9d9 """ if not logicals: raise xlerrors.NullExcelError('logical1 is required') # Use delayed evaluation to minimize th amount of values to evaluate. for logical in logicals: val = logical() for item in xl.flatten([val]): if func_xltypes.Blank.is_blank(item): continue if not bool(item): return False return True
ebdc5c4f2c3cab31a78507923eded284eb679fd4
3,646,444
def check_mask(mask): """Check if mask is valid by its area""" area_ratio = np.sum(mask) / float(mask.shape[0] * mask.shape[1]) return (area_ratio > MASK_THRES_MIN) and (area_ratio < MASK_THRES_MAX)
a82f415d95ea07571da2aabeeddc6837b0a80f8d
3,646,445
def supported_estimators(): """Return a `dict` of supported estimators.""" allowed = { 'LogisticRegression': LogisticRegression, 'RandomForestClassifier': RandomForestClassifier, 'DecisionTreeClassifier': DecisionTreeClassifier, 'KNeighborsClassifier': KNeighborsClassifier, 'MultinomialNB': MultinomialNB, 'GaussianNB': GaussianNB, 'BernoulliNB': BernoulliNB } return allowed
1bb76e81252c3b959a376f23f2462d4faef234a9
3,646,446
from hiicart.gateway.base import GatewayError from hiicart.gateway.amazon.gateway import AmazonGateway from hiicart.gateway.google.gateway import GoogleGateway from hiicart.gateway.paypal.gateway import PaypalGateway from hiicart.gateway.paypal2.gateway import Paypal2Gateway from hiicart.gateway.paypal_adaptive.gateway import PaypalAPGateway from hiicart.gateway.braintree.gateway import BraintreeGateway from hiicart.gateway.authorizenet.gateway import AuthorizeNetGateway from hiicart.gateway.paypal_express.gateway import PaypalExpressCheckoutGateway from hiicart.gateway.stripe.gateway import StripeGateway def validate_gateway(gateway): """Test that a gateway is correctly set up. Returns True if successful, or an error message.""" gateways = { 'amazon': AmazonGateway, 'google': GoogleGateway, 'paypal': PaypalGateway, 'paypal2': Paypal2Gateway, 'paypal_adaptive': PaypalAPGateway, 'paypal_express': PaypalExpressCheckoutGateway, 'braintree': BraintreeGateway, 'authorizenet': AuthorizeNetGateway, 'stripe': StripeGateway } try: cls = gateways[gateway] obj = cls() return obj._is_valid() or "Authentication Error" except GatewayError, err: return err.message
c60e3e88cf6bb919208821d8ee214368d39dc7f6
3,646,447
import sqlite3 def execute_query(db, query): """get data from database """ result = [] with closing(sqlite3.connect(db)) as conn: conn.row_factory = sqlite3.Row cur = conn.cursor() for row in cur.execute(query): result.append({name: row[name] for name in row.keys()}) return result
75476c8a9f14751eb46fc2891ba5e7bddecd3c0e
3,646,448
def to_mgb_supported_dtype(dtype_): """get the dtype supported by megbrain nearest to given dtype""" if ( dtype.is_lowbit(dtype_) or dtype.is_quantize(dtype_) or dtype.is_bfloat16(dtype_) ): return dtype_ return _detail._to_mgb_supported_dtype(dtype_)
864b5bb7099771705ad478e5e89db8f3035f1c4f
3,646,450
def get_reset_state_name(t_fsm): """ Returns the name of the reset state. If an .r keyword is specified, that is the name of the reset state. If the .r keyword is not present, the first state defined in the transition table is the reset state. :param t_fsm: blifparser.BlifParser().blif.fsm object :return str reset_state: name of the reset state """ reset_state = None if t_fsm.r is None: if len(t_fsm.transtable) > 0: reset_state = t_fsm.transtable[0][1] else: reset_state = t_fsm.r.name return reset_state
c65ea80f94f91b31a179faebc60a97f7260675c4
3,646,451
def gridmake(*arrays): """ Expands one or more vectors (or matrices) into a matrix where rows span the cartesian product of combinations of the input arrays. Each column of the input arrays will correspond to one column of the output matrix. Parameters ---------- *arrays : tuple/list of np.ndarray Tuple/list of vectors to be expanded. Returns ------- out : np.ndarray The cartesian product of combinations of the input arrays. Notes ----- Based of original function ``gridmake`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if all([i.ndim == 1 for i in arrays]): d = len(arrays) if d == 2: out = _gridmake2(*arrays) else: out = _gridmake2(arrays[0], arrays[1]) for arr in arrays[2:]: out = _gridmake2(out, arr) return out else: raise NotImplementedError("Come back here")
56c5375024170fbd599500c0603e0e3dcc7f53d4
3,646,452
import logging import math def pagerotate(document: vp.Document, clockwise: bool): """Rotate the page by 90 degrees. This command rotates the page by 90 degrees counter-clockwise. If the `--clockwise` option is passed, it rotates the page clockwise instead. Note: if the page size is not defined, an error is printed and the page is not rotated. """ page_size = document.page_size if page_size is None: logging.warning("pagerotate: page size is not defined, page not rotated") return document w, h = page_size if clockwise: document.rotate(math.pi / 2) document.translate(h, 0) else: document.rotate(-math.pi / 2) document.translate(0, w) document.page_size = h, w return document
37f0a9e726f490c357afb48ace49484cfcae84ce
3,646,453
import torch def gauss_reparametrize(mu, logvar, n_sample=1): """Gaussian reparametrization""" std = logvar.mul(0.5).exp_() size = std.size() eps = Variable(std.data.new(size[0], n_sample, size[1]).normal_()) z = eps.mul(std[:, None, :]).add_(mu[:, None, :]) z = torch.clamp(z, -4., 4.) return z.view(z.size(0)*z.size(1), z.size(2), 1, 1)
5c4fa87c5287aae3727608a003c3c91c2ba5c1a9
3,646,456
def forward_pass(img, session, images_placeholder, phase_train_placeholder, embeddings, image_size): """Feeds an image to the FaceNet model and returns a 128-dimension embedding for facial recognition. Args: img: image file (numpy array). session: The active Tensorflow session. images_placeholder: placeholder of the 'input:0' tensor of the pre-trained FaceNet model graph. phase_train_placeholder: placeholder of the 'phase_train:0' tensor of the pre-trained FaceNet model graph. embeddings: placeholder of the 'embeddings:0' tensor from the pre-trained FaceNet model graph. image_size: (int) required square image size. Returns: embedding: (numpy array) of 128 values after the image is fed to the FaceNet model. """ # If there is a human face if img is not None: # Normalize the pixel values of the image for noise reduction for better accuracy and resize to desired size image = load_img( img=img, do_random_crop=False, do_random_flip=False, do_prewhiten=True, image_size=image_size ) # Run forward pass on FaceNet model to calculate embedding feed_dict = {images_placeholder: image, phase_train_placeholder: False} embedding = session.run(embeddings, feed_dict=feed_dict) return embedding else: return None
846c05a167e116ca4efbe3888486a3ee740d33ef
3,646,458
import urllib def check_url(url): """Returns True if the url returns a response code between 200-300, otherwise return False. """ try: req = urllib.request.Request(url, headers=headers) response = urllib.request.urlopen(req) return response.code in range(200, 209) except Exception: return False
79f20eeb14724b728f020ff4c680e49f6a1a2473
3,646,459
def build_permutation_importance( data, data_labels, feature_names, model, metrics, repeats=100, random_seed=42 ): """Calculates permutation feature importance.""" pi_results = {} for metric in metrics: pi = sklearn.inspection.permutation_importance( model, data, data_labels, n_repeats=repeats, scoring=metric, random_state=random_seed) pi_results[metric] = [] for feature_id, feature_name in enumerate(feature_names): pi_results[metric].append(( feature_name, pi.importances_mean[feature_id], pi.importances_std[feature_id] )) # for i in pi.importances_mean.argsort()[::-1]: # if pi.importances_mean[i] - 2 * pi.importances_std[i] > 0: # print(f'{feature_name:<8}' # f'{pi.importances_mean[feature_id]:.3f}' # f' +/- {pi.importances_std[feature_id]:.3f}') return pi_results
3b0b87ddf53446156b20189dad7c3d0b3ae2a1c2
3,646,460
def _load_parent(collection, meta): """Determine the parent document for the document that is to be ingested.""" parent = ensure_dict(meta.get("parent")) parent_id = meta.get("parent_id", parent.get("id")) if parent_id is None: return parent = Document.by_id(parent_id, collection=collection) if parent is None: raise BadRequest( response=jsonify( {"status": "error", "message": "Cannot load parent document"}, status=400, ) ) return parent
2f53440fa9610f9e8ca494ec8ec27bf9d6a09273
3,646,461
import requests def get_latest_sensor_reading(sensor_serial, metric): """ Get latest sensor reading from MT sensor metrics: 'temperature', 'humidity', 'water_detection' or 'door' """ headers = { "Content-Type": "application/json", "Accept": "application/json", "X-Cisco-Meraki-API-Key": meraki_api_key } params = { "serials[]": sensor_serial, "metric": metric } try: msg = requests.request('GET', f"{base_url}/networks/{network_id}/sensors/stats/latestBySensor", headers=headers, params=params) if msg.ok: data = msg.json() return data except Exception as e: print("API Connection error: {}".format(e))
88de9d770f3be91700e3c86ff6460e2fdaa35d01
3,646,462
def border_msg(msg: str): """ This function creates boarders in the top and bottom of text """ row = len(msg) h = ''.join(['+'] + ['-' * row] + ['+']) return h + "\n" + msg + "\n" + h
cdd9d17ba76014f4c80b9c429aebbc4ca6f959c3
3,646,463
def create_app(config_name='development'): """Returns flask app based on the configuration""" flask_app = Flask(__name__) flask_app.config.from_object(app_config[config_name]) flask_app.config['JSON_SORT_KEYS'] = False flask_app.url_map.strict_slashes = False flask_app.register_error_handler(400, handle_bad_request) flask_app.register_error_handler(404, handle_not_found) flask_app.register_blueprint(v1_bp) flask_app.register_blueprint(party_bp) flask_app.register_blueprint(office_bp) flask_app.register_blueprint(user_bp) return flask_app
783edefb40c2f3cc0aefa0788b0c1c04d581aa39
3,646,464
def auto_merge_paths(data, auto_merge_distance, auto_close_paths=True): """ This function connects all paths in the given dataset, for which the start or endpoints are closer than auto_merge_distance. :param data: Should be a list or tuple containing paths, attributes, svg_attributes. :param auto_merge_distance: If the start or endpoint of a pair of paths is closer than this distance in units of milli meters, they are automatically merged. If one of the paths has to be reversed to do so, this is automatically done. A line is added to the path to bridge the gap. :param auto_close_paths: If set the paths are automatically closed after the merging operation if the start and end point of one path are closer than the auto_merge_distance. It is closed by a line and it's closed flag is set. :return paths, attributes, svg_attributes, iters, numclosed: Modified paths, modified attributes, svg_attributes, number of pairs connected and number of paths that were closed. """ paths, attributes, svg_attributes = data def fix_first_pair(paths_, attributes_): """ Helper function that fixes the next best pair of paths, if they fulfill the condition :rtype: NoneType in case paths_ is empty. Else fixed paths_ and attributes_. """ for i_ in range(len(paths_)): # Get start end end points start1 = paths_[i_][0].start end1 = paths_[i_][-1].end for j in range(len(paths_)): if i_ != j: start2 = paths_[j][0].start end2 = paths_[j][-1].end # Calculate all relevant distances for this pair distance_ = px2mm(np.abs(start2 - end1)) distance_r1 = px2mm(np.abs(start2 - start1)) distance_r2 = px2mm(np.abs(end2 - end1)) # Perform merger if distance_ < auto_merge_distance or distance_r2 < auto_merge_distance: first = i_ second = j else: first = j second = i_ if distance_r1 < auto_merge_distance or distance_r2 < auto_merge_distance: # Reverse paths_[j] if necessary paths_[j] = svgpathtools.path.Path( *[svgpathtools.path.bpoints2bezier(segment.bpoints()[::-1]) for segment in paths_[j]]) if min([distance_, distance_r1, distance_r2]) < auto_merge_distance: # Merge both paths paths_[first] = svgpathtools.path.Path(*[segment for segment in paths_[first]] + [ svgpathtools.path.Line(paths_[first][-1].end, paths_[second][0].start)] + [segment for segment in paths_[second]]) return paths_[:second] + paths_[second + 1:], attributes_[:second] + attributes_[second + 1:] return None iters = 0 while True: ret = fix_first_pair(paths, attributes) if ret is not None: paths, attributes = ret iters += 1 else: break # Make sure, paths are closed... numclosed = 0 if auto_close_paths: for i, path in enumerate(paths): # Get start end end point distance start = path[0].start end = path[-1].end distance = px2mm(np.abs(start - end)) if distance < auto_merge_distance: # Close the path paths[i] = svgpathtools.path.Path(*[segment for segment in path] + [svgpathtools.path.Line(end, start)]) paths[i].closed = True numclosed += 1 return paths, attributes, svg_attributes, iters, numclosed
34ec7d0b853a70159ebef6244236475375a3ca9d
3,646,465
def is_authorized(secure: AccessRestriction): """Returns authorization status based on the given access restriction. :param secure: access restriction :type secure: AccessRestriction :return: authorization status (``True`` or ``False``) """ if secure == AccessRestriction.ALL: return True elif secure == AccessRestriction.STAFF: return is_staff(get_course()) elif secure == AccessRestriction.STUDENT: return is_enrolled(get_course()) else: raise Exception(f"{secure} is not a valid AccessRestriction")
e070ae5521db1079426b80b6ff8a3fc5c9a9ba09
3,646,466
def create_link_forum(**attrs): """Save a new link forum.""" link = build_link_forum(**attrs) link.save() return link
e94e1001e42f46cd1c1803fbff35d0eded89858e
3,646,467
def prepare_scan(): """ Returns a lexical scanner for HTSQL grammar. """ # Start a new grammar. grammar = LexicalGrammar() # Regular context. query = grammar.add_rule('query') # Whitespace characters and comments (discarded). query.add_token(r''' SPACE: [\s]+ | [#] [^\0\r\n]* ''', is_junk=True) # A sequence of characters encloses in single quotes. query.add_token(r''' STRING: ['] ( [^'\0] | [']['] )* ['] ''', unquote=(lambda t: t[1:-1].replace("''", "'"))) # An opening quote character without a closing quote. query.add_token(r''' BAD_STRING: ['] ''', error="cannot find a matching quote mark") # A number in exponential notation. query.add_token(r''' FLOAT: ( [0-9]+ ( [.] [0-9]* )? | [.] [0-9]+ ) [eE] [+-]? [0-9]+ ''') # A number with a decimal point. query.add_token(r''' DECIMAL: [0-9]+ [.] [0-9]* | [.] [0-9]+ ''') # An unsigned integer number. query.add_token(r''' INTEGER: [0-9]+ ''') # A sequence of alphanumeric characters (not starting with a digit). query.add_token(r''' NAME: [\w]+ ''') # Operators and punctuation characters. The token code coincides # with the token value. query.add_token(r''' SYMBOL: [~] | [!][~] | [<][=] | [<] | [>][=] | [>] | [=][=] | [=] | [!][=][=] | [!][=] | [\^] | [?] | [-][>] | [@] | [:][=] | [!] | [&] | [|] | [+] | [-] | [*] | [/] | [(] | [)] | [{] | [}] | [.] | [,] | [:] | [;] | [$] ''', is_symbol=True) # The `[` character starts an identity constructor. query.add_token(r''' LBRACKET: [\[] ''', is_symbol=True, push='identity') # An unmatched `]`. query.add_token(r''' BAD_RBRACKET: [\]] ''', error="cannot find a matching '['") # The input end. query.add_token(r''' END: $ ''', is_symbol=True, pop=1) # Identity constructor context. identity = grammar.add_rule('identity') # Whitespace characters (discarded). identity.add_token(r''' SPACE: [\s]+ ''', is_junk=True) # Start of a nested label group. identity.add_token(r''' LBRACKET: [\[] | [(] ''', is_symbol=True, push='identity') # End of a label group or the identity constructor. identity.add_token(r''' RBRACKET: [\]] | [)] ''', is_symbol=True, pop=1) # Label separator. identity.add_token(r''' SYMBOL: [.] ''', is_symbol=True) # Unquoted sequence of alphanumeric characters and dashes. identity.add_token(r''' LABEL: [\w-]+ ''') # A sequence of characters encloses in single quotes. identity.add_token(r''' STRING: ['] ( [^'\0] | [']['] )* ['] ''', unquote=(lambda t: t[1:-1].replace("''", "'"))) # An opening quote character without a closing quote. identity.add_token(r''' BAD_STRING: ['] ''', error="cannot find a matching quote mark") # A reference indicator. identity.add_token(r''' REFERENCE: [$] ''', is_symbol=True, push='name') # Unexpected end of input. identity.add_token(r''' END: $ ''', error="cannot find a matching ']'") # A context for an identifier following the `$` indicator # in an identity constructor. We need a separate rule because # `%NAME` and `%LABEL` productions intersect. name = grammar.add_rule('name') # Whitespace characters (discarded). name.add_token(r''' SPACE: [\s]+ ''', is_junk=True) # An integer number; not expected here, but ensures that the following # `%NAME` production does not start with a digit. name.add_token(r''' INTEGER: [0-9]+ ''', pop=1) # A sequence of alphanumeric characters (not starting with a digit). name.add_token(r''' NAME: [\w]+ ''', pop=1) # Anything else. name.add_token(r''' OTHER: () ''', is_junk=True, pop=1) # Add a `%DIRSIG` token in front of `+` and `-` direction indicators # to distinguish them from addition/subtraction operators. grammar.add_signal(''' DIRSIG: ( `+` | `-` )+ ( `:` | `,` | `;` | `)` | `}` ) ''') # Add `%PIPESIG` in front of `/:` pipe indicator to prevent it from # being recognized as a division operator. grammar.add_signal(''' PIPESIG: `/` `:` ''') # Add `%LHSSIG` in front of a left-hand side of an assignment expression. grammar.add_signal(''' LHSSIG: `$`? %NAME ( `.` `$`? %NAME )* ( `(` ( `$`? %NAME ( `,` `$`? %NAME )* `,`? )? `)` )? `:=` ''') # Generate and return the scanner. return grammar()
ffc30354378a03f95be988b7ee62b01708795f41
3,646,469
def get_test_server(ctxt, **kw): """Return a Server object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ kw['object_type'] = 'server' get_db_server_checked = check_keyword_arguments( db_utils.get_test_server) db_server = get_db_server_checked(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_server['id'] server = objects.Server(ctxt, **db_server) return server
03d754223274282b15aeb9b5cf636f6acd90024c
3,646,470
def keras_model(optimizer="Adamax", activation="softplus", units=32): """Function to create model, required for KerasClassifier""" model = Sequential() model.add(Dense(units, activation="relu", input_dim=2500)) model.add(Dense(2, activation=activation)) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) return model
ccd1cc5652a207e3c4c2bc170d43fe22b4375c0b
3,646,471
def start_end_key(custom_cmp): """ Compare models with start and end dates. """ class K(object): """ Define comparison operators. http://code.activestate.com/recipes/576653-convert-a-cmp-function-to-a-key-function/ """ def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return custom_cmp(self.obj, other.obj) < 0 def __gt__(self, other): return custom_cmp(self.obj, other.obj) > 0 def __eq__(self, other): return custom_cmp(self.obj, other.obj) == 0 def __le__(self, other): return custom_cmp(self.obj, other.obj) <= 0 def __ge__(self, other): return custom_cmp(self.obj, other.obj) >= 0 def __ne__(self, other): return custom_cmp(self.obj, other.obj) != 0 return K
b1d7b48cc3e9926b6138850ad3b8307adbb4f2f3
3,646,472
def get_previous_release_date(): """ Fetch the previous release date (i.e. the release date of the current live database) """ releases = Release.objects.all().order_by('-date') return str(releases[1].date)
764d90daaf5c60460f22e56063a40c261cb6b45e
3,646,473
def readLensModeParameters(calibfiledir, lensmode='WideAngleMode'): """ Retrieve the calibrated lens correction parameters """ # For wide angle mode if lensmode == 'WideAngleMode': LensModeDefaults, LensParamLines = [], [] with open(calibfiledir, 'r') as fc: # Read the full file as a line-split string block calib = fc.read().splitlines() # Move read cursor back to the beginning fc.seek(0) # Scan through calibration file, find and append line indices # (lind) to specific lens settings for lind, line in enumerate(fc): if '[WideAngleMode defaults' in line: LensModeDefaults.append(lind) elif '[WideAngleMode@' in line: LensParamLines.append(lind) # Specify regular expression pattern for retrieving numbers numpattern = r'[-+]?\d*\.\d+|[-+]?\d+' # Read detector settings at specific lens mode aRange, eShift = [], [] for linum in LensModeDefaults: # Collect the angular range aRange = parsenum( numpattern, calib, aRange, linenumber=linum, offset=2, Range='all') # Collect the eShift eShift = parsenum( numpattern, calib, eShift, linenumber=linum, offset=3, Range='all') # Read list calibrated Da coefficients at all retardation ratios rr, aInner, Da1, Da3, Da5, Da7 = [], [], [], [], [], [] for linum in LensParamLines: # Collect the retardation ratio (rr) rr = parsenum( numpattern, calib, rr, linenumber=linum, offset=0, Range='all') # Collect the aInner coefficient aInner = parsenum( numpattern, calib, aInner, linenumber=linum, offset=1, Range='all') # Collect Da1 coefficients Da1 = parsenum( numpattern, calib, Da1, linenumber=linum, offset=2, Range='1:4') # Collect Da3 coefficients Da3 = parsenum( numpattern, calib, Da3, linenumber=linum, offset=3, Range='1:4') # Collect Da5 coefficients Da5 = parsenum( numpattern, calib, Da5, linenumber=linum, offset=4, Range='1:4') # Collect Da7 coefficients Da7 = parsenum( numpattern, calib, Da7, linenumber=linum, offset=5, Range='1:4') aRange, eShift, rr, aInner = list(map(lambda x: np.asarray( x, dtype='float').ravel(), [aRange, eShift, rr, aInner])) Da1, Da3, Da5, Da7 = list( map(lambda x: np.asarray(x, dtype='float'), [Da1, Da3, Da5, Da7])) return aRange, eShift, rr, aInner, Da1, Da3, Da5, Da7 else: print('This mode is currently not supported!')
51245aa19f32ebb31df5748e0b40022ccae01e24
3,646,474
def scale(boxlist, y_scale, x_scale, scope=None): """scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: boxlist: BoxList holding N boxes """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) y_min, x_min, y_max, x_max = tf.split( value=boxlist.boxes, num_or_size_splits=4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = BoxList( tf.concat([y_min, x_min, y_max, x_max], 1)) return _copy_extra_datas(scaled_boxlist, boxlist)
adffbdce632470852e0499bb93915f93a7695d5a
3,646,475
import requests def fetch(uri: str, method: str = 'get', token: str = None): """:rtype: (str|None, int)""" uri = 'https://api.github.com/{0}'.format(uri) auth = app.config['GITHUB_AUTH'] headers = {'Accept': 'application/vnd.github.mercy-preview+json'} json = None if token: headers['Authorization'] = 'token {}'.format(token) auth = None try: result = getattr(requests, method.lower())(uri, auth=auth, headers=headers) result.raise_for_status() json = result.json() if result.status_code != 204 else None except requests.HTTPError as e: app.logger.info( "Request to {} is failed ({}, {}): {}\n{}\n" .format(result.url, method, e.strerror, result.status_code, result.text) ) return json, result.status_code
14cde2808108173e6ab86f3eafb4c8e35daf4b40
3,646,476
from typing import OrderedDict from typing import Mapping from typing import Sequence from typing import Container from typing import Iterable from typing import Sized def nested_tuple(container): """Recursively transform a container structure to a nested tuple. The function understands container types inheriting from the selected abstract base classes in `collections.abc`, and performs the following replacements: `Mapping` `tuple` of key-value pair `tuple`s. The order is preserved in the case of an `OrderedDict`, otherwise the key-value pairs are sorted if orderable and otherwise kept in the order of iteration. `Sequence` `tuple` containing the same elements in unchanged order. `Container and Iterable and Sized` (equivalent to `Collection` in python >= 3.6) `tuple` containing the same elements in sorted order if orderable and otherwise kept in the order of iteration. The function recurses into these container types to perform the same replacement, and leaves objects of other types untouched. The returned container is hashable if and only if all the values contained in the original data structure are hashable. Parameters ---------- container Data structure to transform into a nested tuple. Returns ------- tuple Nested tuple containing the same data as `container`. """ if isinstance(container, OrderedDict): return tuple(map(nested_tuple, container.items())) if isinstance(container, Mapping): return tuple(sorted_if_possible(map(nested_tuple, container.items()))) if not isinstance(container, (str, bytes)): if isinstance(container, Sequence): return tuple(map(nested_tuple, container)) if ( isinstance(container, Container) and isinstance(container, Iterable) and isinstance(container, Sized) ): return tuple(sorted_if_possible(map(nested_tuple, container))) return container
60dac69865d753b14558d7156e40703e26fb57a1
3,646,477
from typing import OrderedDict def _validate_args(func, args, kwargs): """Validate customer function args and convert them to kwargs.""" # Positional arguments validate all_parameters = [param for _, param in signature(func).parameters.items()] # Implicit parameter are *args and **kwargs if any(param.kind in {param.VAR_KEYWORD, param.VAR_POSITIONAL} for param in all_parameters): raise UnsupportedParameterKindError(func.__name__) all_parameter_keys = [param.name for param in all_parameters] empty_parameters = {param.name: param for param in all_parameters if param.default is Parameter.empty} min_num = len(empty_parameters) max_num = len(all_parameters) if len(args) > max_num: raise TooManyPositionalArgsError(func.__name__, min_num, max_num, len(args)) provided_args = OrderedDict({param.name: args[idx] for idx, param in enumerate(all_parameters) if idx < len(args)}) for _k in kwargs.keys(): if _k not in all_parameter_keys: raise UnexpectedKeywordError(func.__name__, _k, all_parameter_keys) if _k in provided_args.keys(): raise MultipleValueError(func.__name__, _k) provided_args[_k] = kwargs[_k] if len(provided_args) < len(empty_parameters): missing_keys = empty_parameters.keys() - provided_args.keys() raise MissingPositionalArgsError(func.__name__, missing_keys) for pipeline_input_name in provided_args: data = provided_args[pipeline_input_name] if data is not None and not isinstance(data, SUPPORTED_INPUT_TYPES): msg = ( "Pipeline input expected an azure.ai.ml.Input or primitive types (str, bool, int or float), " "but got type {}." ) raise UserErrorException( message=msg.format(type(data)), no_personal_data_message=msg.format("[type(pipeline_input_name)]"), ) return provided_args
51d357d032dc0b26aeb32d1850b1a630bafab508
3,646,478
def _qual_arg(user_value, python_arg_name, gblock_arg_name, allowable): """ Construct and sanity check a qualitative argument to send to gblocks. user_value: value to try to send to gblocks python_arg_name: name of python argument (for error string) gblock_arg_name: name of argument in gblocks allowable: dictionary of allowable values mapping python to whatever should be jammed into gblocks """ if user_value in allowable.keys(): return "-{}={}".format(gblock_arg_name,allowable[user_value]) else: err = "\n\n{} '{}' not recognized\n".format(python_arg_name, user_value) err += "must be one of:\n" allowed = list(allowable) allowed.sort() for a in allowed: err += " {}\n".format(a) raise ValueError(err)
7bf6717ee3dbeb533902773c86316d2bbdcd59a9
3,646,479