content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def escape_html(text: str) -> str: """Replaces all angle brackets with HTML entities.""" return text.replace('<', '&lt;').replace('>', '&gt;')
f853bcb3a69b8c87eb3d4bcea5bbca66376c7db4
3,646,244
import random def pptest(n): """ Simple implementation of Miller-Rabin test for determining probable primehood. """ bases = [random.randrange(2,50000) for _ in range(90)] # if any of the primes is a factor, we're done if n<=1: return 0 for b in bases: if n%b==0: return 0 tests,s = 0,0 m = n-1 # turning (n-1) into (2**s) * m while not m&1: # while m is even m >>= 1 s += 1 for b in bases: tests += 1 isprob = algP(m,s,b,n) if not isprob: break if isprob: return (1-(1./(4**tests))) else: return 0
3a74cfebb6b14659a34ab0b6c761efd16d2736fa
3,646,245
def schedule_conv2d_NCHWc(outs): """Schedule for conv2d_NCHW[x]c Parameters ---------- outs : Array of Tensor The computation graph description of conv2d_NCHWc in the format of an array of tensors. The number of filter, i.e., the output channel. Returns ------- sch : Schedule The computation schedule for the op. """ return _default_schedule(outs, False)
a24cb4f6e1dd3d8891bc82df75f53c8afe709727
3,646,246
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, W_dash_b2_d_t, theta_ex_d_Ave_d, L_dashdash_ba2_d_t): """1時間当たりの給湯機の消費電力量 (1) Args: W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h) W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h) W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h) W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h) W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h) W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h) theta_ex_d_Ave_d: 日平均外気温度 (℃) L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h) Returns: ndarray: 1時間当たりの給湯機の消費電力量 (kWh/h) """ # 待機時及び水栓給湯時の補機による消費電力量 (2) E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d) # 湯はり時の補機による消費電力量 (3) E_E_hs_aux2_d_t = calc_E_E_hs_aux2_d_t(W_dash_b2_d_t) # 保温時の補機による消費電力量 (4) E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t) # 1日当たりの給湯機の消費電力量 (1) E_E_hs_d_t = E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t return E_E_hs_d_t
00cf40b221d2a24081d9c362fb5e8474057ddb93
3,646,247
import functools def keras_quantile_loss(q): """Return keras loss for quantile `q`.""" func = functools.partial(_tilted_loss_scalar, q) func.__name__ = f'qunatile loss, q={q}' return func
173a9410c2994bd02e5845a85cc2050489ce2d12
3,646,248
from typing import Mapping from typing import Union def _reactions_table(reaction: reaction_pb2.Reaction, dataset_id: str) -> Mapping[str, Union[str, bytes, None]]: """Adds a Reaction to the 'reactions' table. Args: reaction: Reaction proto. dataset_id: Dataset ID. Returns: Dict mapping string column names to values. """ values = { "dataset_id": dataset_id, "reaction_id": reaction.reaction_id, "serialized": reaction.SerializeToString().hex(), } try: reaction_smiles = message_helpers.get_reaction_smiles(reaction, generate_if_missing=True) # Control for REACTION_CXSMILES. values["reaction_smiles"] = reaction_smiles.split()[0] except ValueError: values["reaction_smiles"] = None if reaction.provenance.doi: values["doi"] = reaction.provenance.doi else: values["doi"] = None return values
b09df06a13d1f1d42ab22da1c6bcc00c48c2e81d
3,646,250
from typing import List from typing import Dict from typing import Any import time import json def get_incidents_for_alert(**kwargs) -> list: """ Return List of incidents for alert. :param kwargs: Contains all required arguments. :return: Incident List for alert. """ incidents: List[Dict[str, Any]] = [] headers = { 'X-FeApi-Token': kwargs['client'].get_api_token(), 'Accept': CONTENT_TYPE_JSON } params = { 'start_time': time.strftime(API_SUPPORT_DATE_FORMAT, time.localtime(kwargs['start_time'])), 'duration': '48_hours' } if kwargs['malware_type']: params['malware_type'] = kwargs['malware_type'] # http call resp = kwargs['client'].http_request(method="GET", url_suffix=URL_SUFFIX['GET_ALERTS'], params=params, headers=headers) total_records = resp.get('alertsCount', 0) if total_records > 0: if kwargs['replace_alert_url']: replace_alert_url_key_domain_to_instance_url(resp.get('alert', []), kwargs['instance_url']) count = kwargs['fetch_count'] for alert in resp.get('alert', []): # set incident context_alert = remove_empty_entities(alert) context_alert['incidentType'] = ALERT_INCIDENT_TYPE if count >= kwargs['fetch_limit']: break occurred_date = dateparser.parse(context_alert.get('occurred', '')) assert occurred_date is not None incident = { 'name': context_alert.get('name', ''), 'occurred': occurred_date.strftime( DATE_FORMAT_WITH_MICROSECOND), 'rawJSON': json.dumps(context_alert) } if not kwargs['is_test'] and alert.get('uuid', '') and kwargs['fetch_artifacts']: set_attachment_file(client=kwargs['client'], incident=incident, uuid=alert.get('uuid', ''), headers=headers) remove_nulls_from_dictionary(incident) incidents.append(incident) count += 1 return incidents
48d2519d5e5aa25d6b0fc6a6e2c959489e861e1c
3,646,251
def pbar(*args, **kwargs): """ Progress bar. This function is an alias of :func:`dh.thirdparty.tqdm.tqdm()`. """ return dh.thirdparty.tqdm.tqdm(*args, **kwargs)
3de7101becc015e402aa067c676104f34679e549
3,646,252
import torch def calc_driver_mask(n_nodes, driver_nodes: set, device='cpu', dtype=torch.float): """ Calculates a binary vector mask over graph nodes with unit value on the drive indeces. :param n_nodes: numeber of driver nodes in graph :param driver_nodes: driver node indeces. :param device: the device of the `torch.Tensor` :param dtype: the data type of the `torch.Tensor` :return: the driver mask vector. """ driver_mask = torch.zeros(n_nodes, device=device, dtype=dtype) driver_mask[list(driver_nodes)] = 1 return driver_mask
2d2a08a86629ece190062f68dd25fc450d0fd84e
3,646,253
from typing import List def all_fermions(fields: List[Field]) -> bool: """Checks if all fields are fermions.""" boolean = True for f in fields: boolean = boolean and f.is_fermion return boolean
eb54d5ad5b3667e67634b06d2943e2d14c8a0c61
3,646,254
def open_file(name): """ Return an open file object. """ return open(name, 'r')
8921ee51e31ac6c64d9d9094cedf57502a2aa436
3,646,255
import math def _bit_length(n): """Return the number of bits necessary to store the number in binary.""" try: return n.bit_length() except AttributeError: # pragma: no cover (Python 2.6 only) return int(math.log(n, 2)) + 1
bea6cb359c7b5454bdbb1a6c29396689035592d7
3,646,256
def read_dwd_percentile_old(filename): """ Read data from .txt file into Iris cube :param str filename: file to process :returns: cube """ # use header to hard code the final array shapes longitudes = np.arange(-179.5, 180.5, 1.) latitudes = np.arange(89.5, -90.5, -1.) data = np.ma.zeros((latitudes.shape[0], longitudes.shape[0])) # read in the dat indata = np.genfromtxt(filename, dtype=(float)) this_lat = [] tl = 0 # process each row, append until have complete latitude band for row in indata: this_lat += [row] if len(this_lat) == longitudes.shape[0]: # copy into final array and reset data[tl, :] = this_lat tl += 1 this_lat = [] # mask the missing values data = np.ma.masked_where(data <= -999.000, data) cube = utils.make_iris_cube_2d(data, latitudes, longitudes, "R90p", "%") return cube
4d8366606c4e00eb43aa2c6a50a735617c7ca242
3,646,257
import base64 def media_post(): """API call to store new media on the BiBli""" data = request.get_json() fname = "%s/%s" % (MUSIC_DIR, data["name"]) with open(fname, "wb") as file: file.write(base64.decodestring(data["b64"])) audiofile = MP3(fname) track = {"file": data["name"], "title": "", "artist": "?"} tags = audiofile.tags if tags: track["artist"] = tags["artist"][0] if "artist" in tags else "?" track["title"] = tags["title"][0] if "title" in tags else None if audiofile.info: seconds = int(audiofile.info.length) minutes = seconds / 60 seconds = seconds % 60 track["duration"] = "%s:%02d" % (minutes, seconds) # make sure there's a title if not track["title"]: track["title"] = fname.replace(".mp3", "") return jsonify({"music": track})
ff2aa7df2cdc6ea9bf3d657c7bb675e824639107
3,646,258
from pathlib import Path def obtain_stores_path(options, ensure_existence=True) -> Path: """ Gets the store path if present in options or asks the user to input it if not present between parsed_args. :param options: the parsed arguments :param ensure_existence: whether abort if the path does not exist :return: the store path """ path = Path(get_option_or_default(options, Options.STORE_PATH, DEFAULT_SECRETS_PATH)) if ensure_existence and not path.exists(): abort(f"Error: path does not exist ({path})") return path
8bb3ff96cdc57f85058ad7cd3c96552462b8de9f
3,646,259
def compute_roc(distrib_noise, distrib_signal): """compute ROC given the two distribributions assuming the distributions are the output of np.histogram example: dist_l, _ = np.histogram(acts_l, bins=n_bins, range=histrange) dist_r, _ = np.histogram(acts_r, bins=n_bins, range=histrange) tprs, fprs = compute_roc(dist_l, dist_r) Parameters ---------- distrib_noise : 1d array the noise distribution distrib_signal : 1d array the noise+signal distribution Returns ------- 1d array, 1d array the roc curve: true positive rate, and false positive rate """ # assert len(distrib_noise) == len(distrib_signal) # assert np.sum(distrib_noise) == np.sum(distrib_signal) n_pts = len(distrib_noise) tpr, fpr = np.zeros(n_pts), np.zeros(n_pts) # slide the decision boundary from left to right for b in range(n_pts): fn, tp = np.sum(distrib_signal[:b]), np.sum(distrib_signal[b:]) tn, fp = np.sum(distrib_noise[:b]), np.sum(distrib_noise[b:]) # calculate TP rate and FP rate tpr[b] = tp / (tp + fn) fpr[b] = fp / (tn + fp) return tpr, fpr
d9a970435fd7b0dc79cfb4eca24a6e6779ce9300
3,646,261
def zoom(clip, screensize, show_full_height=False): """Zooms preferably image clip for clip duration a little To make slideshow more movable Parameters --------- clip ImageClip on which to work with duration screensize Wanted (width, height) tuple show_full_height Should this image be shown in full height. This is usefull when 4:3 images are shown in 16:9 video and need to be shown in full. Otherwise they are shown in full width and top and bottom is cut off. Returns ------ VideoClip in desired size """ #We need to resize high imageč differently if clip.h > clip.w or show_full_height: clip_resized = (clip.fx(resize, width=screensize[0]*2) .fx(resize, lambda t : 1+0.02*t) .set_position(('center', 'center')) ) clip_composited = CompositeVideoClip([clip_resized]) \ .fx(resize, height=screensize[1]) else: clip_resized = (clip.fx(resize, height=screensize[1]*2) .fx(resize, lambda t : 1+0.02*t) .set_position(('center', 'center')) ) clip_composited = CompositeVideoClip([clip_resized]) \ .fx(resize, width=screensize[0]) vid = CompositeVideoClip([clip_composited.set_position(('center', 'center'))], size=screensize) return vid
668ef2b598432fc18510a5a69b73e400eec42b17
3,646,262
import typing def create( host_address: str, topics: typing.Sequence[str]) -> Subscriber: """ Create a subscriber. :param host_address: The server notify_server address :param topics: The topics to subscribe to. :return: A Subscriber instance. """ return Subscriber(create_subscriber(host_address, topics))
40221a3be496528115afdc2eda063a006e08aadd
3,646,263
def solution(n): """ Return the product of a,b,c which are Pythagorean Triplet that satisfies the following: 1. a < b < c 2. a**2 + b**2 = c**2 3. a + b + c = 1000 >>> solution(1000) 31875000 """ product = -1 d = 0 for a in range(1, n // 3): """Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c """ b = (n * n - 2 * a * n) // (2 * n - 2 * a) c = n - a - b if c * c == (a * a + b * b): d = a * b * c if d >= product: product = d return product
a0bf0f0bde50f536f6c91f2b52571be38e494cea
3,646,264
def with_metaclass(meta, *bases): """A Python 2/3 compatible way of declaring a metaclass. Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2 /_compat.py>`_ via `python-future <http://python-future.org>`_. License: BSD. Use it like this:: class MyClass(with_metaclass(MyMetaClass, BaseClass)): pass """ class _Metaclass(meta): """Inner class""" __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, attrs): if this_bases is None: return type.__new__(cls, name, (), attrs) return meta(name, bases, attrs) return _Metaclass(str('temporary_class'), None, {})
0fe8e95fe29821e4cda8b66ff54ddd1b73e51243
3,646,266
def energy(particles): """total kinetic energy up to a constant multiplier""" return np.sum([particle.size ** 2 * np.linalg.norm(particle.speed) ** 2 for particle in particles])
29cae5c46d053f6fa558ba7a839d8b647c86d236
3,646,267
def post_adaptation_non_linear_response_compression_matrix(P_2, a, b): """ Returns the post adaptation non linear response compression matrix. Parameters ---------- P_2 : numeric or array_like Point :math:`P_2`. a : numeric or array_like Opponent colour dimension :math:`a`. b : numeric or array_like Opponent colour dimension :math:`b`. Returns ------- ndarray Points :math:`P`. Examples -------- >>> P_2 = 24.2372054671 >>> a = -0.000624112068243 >>> b = -0.000506270106773 >>> post_adaptation_non_linear_response_compression_matrix(P_2, a, b) ... # doctest: +ELLIPSIS array([ 7.9463202..., 7.9471152..., 7.9489959...]) """ P_2 = as_float_array(P_2) a = as_float_array(a) b = as_float_array(b) R_a = (460 * P_2 + 451 * a + 288 * b) / 1403 G_a = (460 * P_2 - 891 * a - 261 * b) / 1403 B_a = (460 * P_2 - 220 * a - 6300 * b) / 1403 RGB_a = tstack([R_a, G_a, B_a]) return RGB_a
6b7f8bcc62142e99c63c0e7a9b073e25f3c36e8c
3,646,268
def forward(network, x): """ 入力信号を出力に変換する関数 Args: network: ネットワークのDict x: Inputの配列 Returns: 出力信号 """ w1, w2, w3 = network['W1'], network['W2'], network['W3'] b1, b2, b3 = network['B1'], network['B2'], network['B3'] # 1層目 a1 = np.dot(x, w1) + b1 z1 = sigmoid(a1) # 2層目 a2 = np.dot(z1, w2) + b2 z2 = sigmoid(a2) # 3層目 a3 = np.dot(z2, w3) + b3 y = identity(a3) return y
93c79a049e4c45f31a502aa81f840e48ff41d229
3,646,269
from typing import List def join_with_and(words: List[str]) -> str: """Joins list of strings with "and" between the last two.""" if len(words) > 2: return ", ".join(words[:-1]) + ", and " + words[-1] elif len(words) == 2: return " and ".join(words) elif len(words) == 1: return words[0] else: return ""
ecb2c1fa060657f2ea4173c4382a81c9b42beeb9
3,646,270
from nipype.interfaces.base import Bunch def condition_generator(single_sub_data, params_name, duration = 2): """Build a bunch to show the relationship between each onset and parameter Build a bunch for make a design matrix for next analysis. This bunch is for describing the relationship between each onset and parameter. Args: single_sub_data: A pandas DataFrame which contains data for one subject. It must contains the information about run, onsets, and parameters. params_name: A list of names of parameters which you want to analysis. The order of the names will be inherited to the design matrix next. duration: The duration of a TR. Returns: subject_info: A list of bunch type which can be resolve by SpecifySPMModel interface in nipype. """ run_num = set(single_sub_data.run) subject_info = [] for i in run_num: tmp_table = single_sub_data[single_sub_data.run == i] tmp_onset = tmp_table.onset.values.tolist() pmod_names = [] pmod_params = [] pmod_poly = [] for param in params_name: pmod_params.append(tmp_table[param].values.tolist()) pmod_names.append(param) pmod_poly.append(1) tmp_Bunch = Bunch(conditions=["trial_onset_run"+str(i)], onsets=[tmp_onset], durations=[[duration]], pmod=[Bunch(name = pmod_names, poly = pmod_poly, param = pmod_params)]) subject_info.append(tmp_Bunch) return subject_info
6a4743043a49b6a1703c3b42840256a58e07f3bd
3,646,271
from typing import Callable from typing import Any def one_hot( encoding_size: int, mapping_fn: Callable[[Any], int] = None, dtype="bool" ) -> DatasetTransformFn: """Transform data into a one-hot encoded label. Arguments: encoding_size {int} -- The size of the encoding mapping_fn {Callable[[Any], int]} -- A function transforming the input data to an integer label. If not specified, labels are automatically inferred from the data. Returns: DatasetTransformFn -- A function to be passed to the Dataset.transform() """ mem, maxcount = {}, -1 def auto_label(x: Any) -> int: nonlocal mem, maxcount, encoding_size h = hash(str(x)) if not h in mem.keys(): maxcount += 1 if maxcount >= encoding_size: raise ValueError( "More unique labels found than were specified by the encoding size ({} given)".format( encoding_size ) ) mem[h] = maxcount return mem[h] label_fn = mapping_fn or auto_label def encode(x): nonlocal encoding_size, dtype, label_fn o = np.zeros(encoding_size, dtype=dtype) o[label_fn(x)] = True return o return _dataset_element_transforming(fn=encode)
48758666885969c10b5e6ef46f2d392cd06800a2
3,646,273
def is_url_relative(url): """ True if a URL is relative, False otherwise. """ return url[0] == "/" and url[1] != "/"
91e1cb756a4554973e53fd1f607515577bc63294
3,646,274
def _split_link_ends(link_ends): """ Examples -------- >>> from landlab.grid.unstructured.links import _split_link_ends >>> _split_link_ends(((0, 1, 2), (3, 4, 5))) (array([0, 1, 2]), array([3, 4, 5])) >>> _split_link_ends([(0, 3), (1, 4), (2, 5)]) (array([0, 1, 2]), array([3, 4, 5])) >>> _split_link_ends((0, 3)) (array([0]), array([3])) """ links = np.array(list(link_ends), ndmin=2, dtype=np.int) if len(links) != 2: links = links.transpose() if links.size == 0: return (np.array([], dtype=np.int), np.array([], dtype=np.int)) else: return links[0], links[1]
3aee58b5e4e928d45a33026c0b9e554c859d0d6f
3,646,276
from heapq import heappop, heappush def dijkstra(vertex_count: int, source: int, edges): """Uses Dijkstra's algorithm to find the shortest path in a graph. Args: vertex_count: The number of vertices. source : Vertex number (0-indexed). edges : List of (cost, edge) (0-indexed). Returns: costs : List of the shortest distance. parents: List of parent vertices. Landau notation: O(|Edges|log|Vertices|). See: https://atcoder.jp/contests/abc191/submissions/19964078 https://atcoder.jp/contests/abc191/submissions/19966232 """ hq = [(0, source)] # weight, vertex number (0-indexed) costs = [float("inf") for _ in range(vertex_count)] costs[source] = 0 pending = -1 parents = [pending for _ in range(vertex_count)] while hq: cost, vertex = heappop(hq) if cost > costs[vertex]: continue for weight, edge in edges[vertex]: new_cost = cost + weight if new_cost < costs[edge]: costs[edge] = new_cost parents[edge] = vertex heappush(hq, (new_cost, edge)) return costs, parents
d33f8dc28bf07154ffd7582a5bdd7161e195f331
3,646,277
def plot_labels(labels, lattice=None, coords_are_cartesian=False, ax=None, **kwargs): """ Adds labels to a matplotlib Axes Args: labels: dict containing the label as a key and the coordinates as value. lattice: Lattice object used to convert from reciprocal to Cartesian coordinates coords_are_cartesian: Set to True if you are providing. coordinates in Cartesian coordinates. Defaults to False. Requires lattice if False. ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'text'. Color defaults to blue and size to 25. Returns: matplotlib figure and matplotlib ax """ ax, fig, plt = get_ax3d_fig_plt(ax) if "color" not in kwargs: kwargs["color"] = "b" if "size" not in kwargs: kwargs["size"] = 25 for k, coords in labels.items(): label = k if k.startswith("\\") or k.find("_") != -1: label = "$" + k + "$" off = 0.01 if coords_are_cartesian: coords = np.array(coords) else: if lattice is None: raise ValueError("coords_are_cartesian False requires the lattice") coords = lattice.get_cartesian_coords(coords) ax.text(*(coords + off), s=label, **kwargs) return fig, ax
b0172061e043fcaef38d2503be67333862da3acf
3,646,279
def contains(poly0, poly1): """ Does poly0 contain poly1? As an initial implementation, returns True if any vertex of poly1 is within poly0. """ # check for bounding box overlap bb0 = (min(p[0] for p in poly0), min(p[1] for p in poly0), max(p[0] for p in poly0), max(p[1] for p in poly0)) bb1 = (min(p[0] for p in poly1), min(p[1] for p in poly1), max(p[0] for p in poly1), max(p[1] for p in poly1)) if ((bb0[0] > bb1[2]) or (bb0[2] < bb1[0]) or (bb0[1] > bb1[3]) or (bb0[3] < bb1[1])): return False # check each vertex def _isleft(p, p0, p1): return ((p1[0]-p0[0])*(p[1]-p0[1]) - (p[0]-p0[0])*(p1[1]-p0[1])) > 0 for p in poly1: wn = 0 for i in range(len(poly0)-1): p0 = poly0[i] p1 = poly0[i+1] if p0[1] <= p[1] < p1[1]: # upward crossing if _isleft(p, p0, p1): wn += 1 elif p0[1] >= p[1] > p1[1]: if not _isleft(p, p0, p1): wn -= 1 if wn != 0: return True return False
26ea4bd17a55ed05afa049a9aaab5237f0965674
3,646,280
def new_halberd(game_state): """ A composite component representing a Sword item. """ c = Composite() set_item_components(c, game_state) set_melee_weapon_component(c) c.set_child(Description("Halberd", "A long stick with a with an axe-head at one end." "It's a useful weapon when you want to keep danger at bay.")) c.set_child(GraphicChar(None, colors.GRAY, icon.HALBERD)) c.set_child(DataPoint(DataTypes.WEIGHT, 8)) c.set_child(accuracy_item_stat(10)) c.set_child(damage_item_stat(1, 5)) c.set_child(CritChanceBonusEffect(0.1)) c.set_child(crit_multiplier_item_stat(2)) c.set_child(DefenciveAttackEffect(0.75)) c.set_child(OffenciveAttackEffect(0.20)) return c
1e6ccdce08a5e4e26c6dc8d09db38ef4b6d7b2f0
3,646,282
from typing import List def get_regional_services(service_list: List[AWSService] = None) -> List[AWSService]: """List all services which are tied to specific regions.""" services = service_list or get_services() return [s for s in services if s.is_regional]
d856acfc24430102ccb72a76eedbc47ace842894
3,646,283
def f_setup_config(v_config_filename): """This function read the configuration file""" df_conf_file = pd.read_csv(v_config_filename, delimiter="|", header=0) api_key = df_conf_file[df_conf_file.CONFIG_VAR == 'API_KEY']['VALUE'].values[0] data_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'DATA_DIR']['VALUE'].values[0] json_log_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'JSON_DIR']['VALUE'].values[0] gcs_bucket = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_BUCKET']['VALUE'].values[0] # gcs_service_account_key = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_SERVICE_ACOUNT_KEY']['VALUE'].values[0] # aws_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_ACCESS_KEY']['VALUE'].values[0] # aws_secret_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_SECRET_ASSES_KEY']['VALUE'].values[0] aws_s3 = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_S3_BUCKET']['VALUE'].values[0] export_csv = df_conf_file[df_conf_file.CONFIG_VAR == 'EXPORT_CSV']['VALUE'].values[0] cleanup_days = df_conf_file[df_conf_file.CONFIG_VAR == 'CLEANUP_DAYS']['VALUE'].values[0] # return api_key, gcs_bucket, gcs_service_account_key, aws_key, aws_secret_key, \ # aws_s3, data_dir, json_log_dir, export_csv, cleanup_days return api_key, gcs_bucket, aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
b2e9a8e822a2c582549055184cc8096f174fdb3b
3,646,285
def choose_username(email): """ Chooses a unique username for the provided user. Sets the username to the email parameter umodified if possible, otherwise adds a numerical suffix to the email. """ def get_suffix(number): return "" if number == 1 else "_"+str(number).zfill(3) user_model = get_user_model() num = 1 while user_model.objects.filter(username=email+get_suffix(num)).exists(): num += 1 return email + get_suffix(num)
594c060df6df5c89c7c08a2e3979960866cc5688
3,646,286
def lms2rgb(image): """ Convert an array of pixels from the LMS colorspace to the RGB colorspace. This function assumes that each pixel in an array of LMS values. :param image: An np.ndarray containing the image data :return: An np.ndarray containing the transformed image data """ return np.clip(apply_matrix_to_image(lms_matrix_inverse, image), 0.0, 1.0)
736d7101a4c4256725fd4f09c6a453c418c1ae81
3,646,287
def __apply_to_property_set (f, property_set): """ Transform property_set by applying f to each component property. """ properties = feature.split (property_set) return '/'.join (f (properties))
5091065f90b602a775c24eca9e2ab3bc6861e0c8
3,646,288
def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call): """ Efficiency for operation of a SOFC (based on LHV of NG) including all auxiliary losses Valid for Q_load in range of 1-10 [kW_el] Modeled after: - **Approach A (NREL Approach)**: http://energy.gov/eere/fuelcells/distributedstationary-fuel-cell-systems and NREL : p.5 of [M. Zolot et al., 2004]_ - **Approach B (Empiric Approach)**: [Iain Staffell]_ :type Q_load_W : float :param Q_load_W: Load at each time step :type Q_design_W : float :param Q_design_W: Design Load of FC :type phi_threshold : float :param phi_threshold: where Maximum Efficiency is reached, used for Approach A :type approach_call : string :param appraoch_call: choose "A" or "B": A = NREL-Approach, B = Empiric Approach :rtype eta_el : float :returns eta_el: electric efficiency of FC (Lower Heating Value), in abs. numbers :rtype Q_fuel : float :returns Q_fuel: Heat demand from fuel (in Watt) ..[M. Zolot et al., 2004] M. Zolot et al., Analysis of Fuel Cell Hybridization and Implications for Energy Storage Devices, NREL, 4th International Advanced Automotive Battery. http://www.nrel.gov/vehiclesandfuels/energystorage/pdfs/36169.pdf ..[Iain Staffell, 2009] Iain Staffell, For Domestic Heat and Power: Are They Worth It?, PhD Thesis, Birmingham: University of Birmingham. http://etheses.bham.ac.uk/641/1/Staffell10PhD.pdf """ phi = 0.0 ## Approach A - NREL Approach if approach_call == "A": phi = float(Q_load_W) / float(Q_design_W) eta_max = 0.425 # from energy.gov if phi >= phi_threshold: # from NREL-Shape eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold) if phi < phi_threshold: if phi <= 118 / 520.0 * phi_threshold: eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0)) if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold: eta_el = eta_max * 2 / 3.0 + \ eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0)) if phi > 0.5 * phi_threshold and phi < phi_threshold: eta_el = eta_max * (2 / 3.0 + 0.25) + \ 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5)) eta_therm_max = 0.45 # constant, after energy.gov if phi < phi_threshold: eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold) else: eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold))) ## Approach B - Empiric Approach if approach_call == "B": if Q_design_W > 0: phi = float(Q_load_W) / float(Q_design_W) else: phi = 0 eta_el_max = 0.39 eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4 eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2 eta_el = eta_el_max * eta_el_score eta_therm = eta_therm_max * eta_therm_score if phi < 0.2: eta_el = 0 return eta_el, eta_therm
0cd14d976d773dc34d7ea96e80db4267e33aac1f
3,646,291
from typing import Tuple def erdos_renyi( num_genes: int, prob_conn: float, spec_rad: float = 0.8 ) -> Tuple[np.ndarray, float]: """Initialize an Erdos Renyi network as in Sun–Taylor–Bollt 2015. If the spectral radius is positive, the matrix is normalized to a spectral radius of spec_rad and the scale shows the normalization. If the spectral radius is zero, the returned matrix will have entries of 0, 1, and -1, and the scale is set to zero. Args: num_genes: Number of genes/nodes. prob_conn: Probability of connection. spec_rad: The desired spectral radius. Returns: Adjacency matrix and its scale. """ signed_edges = erdos_renyi_ternary(num_genes, prob_conn) return scale_by_spec_rad(signed_edges, spec_rad)
87e29376ec79ea9198bb3c668fdc31fc61216a26
3,646,292
import _ctypes def IMG_LoadTextureTyped_RW(renderer, src, freesrc, type): """Loads an image file from a file object to a texture as a specific format. This function allows you to explicitly specify the format type of the image to load. The different possible format strings are listed in the documentation for :func:`IMG_LoadTyped_RW`. See :func:`IMG_LoadTexture` for more information. Args: renderer (:obj:`SDL_Renderer`): The SDL rendering context with which to create the texture. src (:obj:`SDL_RWops`): The file object from which to load the image. freesrc (int): If non-zero, the input file object will be closed and freed after it has been read. type (bytes): A bytestring indicating the image format with which the file object should be loaded. Returns: POINTER(:obj:`SDL_Texture`): A pointer to the new texture containing the image, or a null pointer if there was an error. """ return _ctypes["IMG_LoadTextureTyped_RW"](renderer, src, freesrc, type)
ef9f963e71b7419ec790bd3fdb06eb470d30972b
3,646,293
def soft_l1(z: np.ndarray, f_scale): """ rho(z) = 2 * ((1 + z)**0.5 - 1) The smooth approximation of l1 (absolute value) loss. Usually a good choice for robust least squares. :param z: z = f(x)**2 :param f_scale: rho_(f**2) = C**2 * rho(f**2 / C**2), where C is f_scale :return: """ loss = np.empty((3, z.shape[0]), dtype=np.float64) c2 = f_scale * f_scale ic2 = 1.0 / c2 z = ic2 * z sqrt_1pz = np.sqrt(z + 1) loss[0, :] = c2 * 2 * (sqrt_1pz - 1) loss[1, :] = 1 / sqrt_1pz loss[2, :] = -ic2 * 0.5 * np.power(loss[1, :], 3) return loss
95813cd59c99ab94e6b4693237dc85f5b7d31b14
3,646,294
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5): """ Given an image, this returns points that make up a hit box around it. Attempts to trim out transparent pixels. :param Image image: Image get hit box from. :param int hit_box_detail: How detailed to make the hit box. There's a trade-off in number of points vs. accuracy. :Returns: List of points """ def sample_func(sample_point): """ Method used to sample image. """ if sample_point[0] < 0 \ or sample_point[1] < 0 \ or sample_point[0] >= image.width \ or sample_point[1] >= image.height: return 0 point_tuple = sample_point[0], sample_point[1] color = image.getpixel(point_tuple) if color[3] > 0: return 255 else: return 0 # Do a quick check if it is a full tile p1 = 0, 0 p2 = 0, image.height - 1 p3 = image.width - 1, image.height - 1 p4 = image.width - 1, 0 if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4): # Do a quick check if it is a full tile p1 = (-image.width / 2, -image.height / 2) p2 = (image.width / 2, -image.height / 2) p3 = (image.width / 2, image.height / 2) p4 = (-image.width / 2, image.height / 2) return p1, p2, p3, p4 # Get the bounding box logo_bb = pymunk.BB(-1, -1, image.width, image.height) # Set of lines that trace the image line_set = pymunk.autogeometry.PolylineSet() # How often to sample? downres = 1 horizontal_samples = int(image.width / downres) vertical_samples = int(image.height / downres) # Run the trace # Get back one or more sets of lines covering stuff. line_sets = pymunk.autogeometry.march_soft( logo_bb, horizontal_samples, vertical_samples, 99, sample_func) if len(line_sets) == 0: return [] selected_line_set = line_sets[0] selected_range = None if len(line_set) > 1: # We have more than one line set. Try and find one that covers most of # the sprite. for line in line_set: min_x = None min_y = None max_x = None max_y = None for point in line: if min_x is None or point.x < min_x: min_x = point.x if max_x is None or point.x > max_x: max_x = point.x if min_y is None or point.y < min_y: min_y = point.y if max_y is None or point.y > max_y: max_y = point.y if min_x is None or max_x is None or min_y is None or max_y is None: raise ValueError("No points in bounding box.") my_range = max_x - min_x + max_y + min_y if selected_range is None or my_range > selected_range: selected_range = my_range selected_line_set = line # Reduce number of vertices # original_points = len(selected_line_set) selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set, hit_box_detail) # downsampled_points = len(selected_line_set) # Convert to normal points, offset fo 0,0 is center, flip the y hh = image.height / 2 hw = image.width / 2 points = [] for vec2 in selected_line_set: point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height) points.append(point) if len(points) > 1 and points[0] == points[-1]: points.pop() # print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}") return points
dd74e18fac1fe96728837ce8af62c38461592baa
3,646,295
async def open_local_endpoint( host="0.0.0.0", port=0, *, queue_size=None, **kwargs ): """Open and return a local datagram endpoint. An optional queue size argument can be provided. Extra keyword arguments are forwarded to `loop.create_datagram_endpoint`. """ return await open_datagram_endpoint( host, port, remote=False, endpoint_factory=lambda: LocalEndpoint(queue_size), **kwargs )
a3b03408bbe35972b0588912a0628df2be9cddc5
3,646,296
from typing import Union def parse_bool(value: Union[str, bool]) -> bool: """Parse a string value into a boolean. Uses the sets ``CONSIDERED_TRUE`` and ``CONSIDERED_FALSE`` to determine the boolean value of the string. Args: value (Union[str, bool]): the string to parse (is converted to lowercase and stripped of surrounding whitespace) Raises: ValueError: if the string cannot reliably be determined true or false Returns: bool: the parsed result """ if value is True or value is False: return value val = value.strip().lower() if val in CONSIDERED_TRUE: return True if val in CONSIDERED_FALSE: return False raise ValueError(f"Value {value} is not compatible with boolean!")
86bb61b82eb71627f3563584779f3a17ea1bc8b7
3,646,297
from datetime import datetime import traceback import requests def send_rocketchat_notification(text: str, exc_info: Exception) -> dict: """ Sends message with specified text to configured Rocketchat channel. We don't want this method to raise any exceptions, as we don't want to unintentionally break any kind of error management flow. (We only use rocket chat notification when something goes wrong). If you want to know if this method worked or not, you'll have to inspect the response. """ full_message = f"{datetime.now(tz=timezone.utc).isoformat()}\n{text}\n\ {config.get('HOSTNAME')}: {exc_info}\n\ {traceback.format_exception(etype=type(exc_info),value=exc_info,tb=exc_info.__traceback__)}" result = None try: response = requests.post( config.get('ROCKET_URL_POST_MESSAGE'), headers={ 'X-Auth-Token': config.get('ROCKET_AUTH_TOKEN'), 'X-User-Id': config.get('ROCKET_USER_ID'), 'Content-Type': 'application/json' }, json={ 'channel': config.get('ROCKET_CHANNEL'), 'text': full_message } ) result = response.json() except Exception as exception: # pylint: disable=broad-except # not doing exc_info=exception - as this causes a lot of noise, and we're more interested # in the main code! logger.error('failed to send rocket chat notification %s', exception) return result
c466621cfd8ead8f6773bc1c461fb779c0374937
3,646,298
def get_number_of_forms_all_domains_in_couch(): """ Return number of non-error, non-log forms total across all domains specifically as stored in couch. (Can't rewrite to pull from ES or SQL; this function is used as a point of comparison between row counts in other stores.) """ all_forms = ( XFormInstance.get_db().view('couchforms/by_xmlns').one() or {'value': 0} )['value'] device_logs = ( XFormInstance.get_db().view('couchforms/by_xmlns', key=DEVICE_LOG_XMLNS).one() or {'value': 0} )['value'] return all_forms - device_logs
a30f5e6410dc3b91c3a962169fad20e2a8d4a8fb
3,646,299
def compute_wolfe_gap(point_x, objective_function, feasible_region): """Compute the Wolfe gap given a point.""" grad = objective_function.evaluate_grad(point_x.cartesian_coordinates) v = feasible_region.lp_oracle(grad) wolfe_gap = grad.dot(point_x.cartesian_coordinates - v) return wolfe_gap
f2b09a232063599aa7525a70e6d3a0d8bafb57e7
3,646,300
async def get(ip, community, oid, port=161, timeout=DEFAULT_TIMEOUT): # type: (str, str, str, int, int) -> PyType """ Delegates to :py:func:`~puresnmp.aio.api.raw.get` but returns simple Python types. See the "raw" equivalent for detailed documentation & examples. """ raw_value = await raw.get(ip, community, oid, port, timeout=timeout) return raw_value.pythonize()
6682d9877ac4d5b287088fd17d626011b95b6c31
3,646,301
def preprocess_image(image, params): """Preprocess image tensor. Args: image: tensor, input image with shape [cur_batch_size, height, width, depth]. params: dict, user passed parameters. Returns: Preprocessed image tensor with shape [cur_batch_size, height, width, depth]. """ func_name = "preprocess_image" # Convert from [0, 255] -> [-1.0, 1.0] floats. image = tf.cast(x=image, dtype=tf.float32) * (2. / 255) - 1.0 print_obj(func_name, "image", image) return image
4e5a563610c2ecdcd29fa5c077025100625b767b
3,646,302
def get_vlim(xarr: xr.DataArray, alpha: float) -> dict: """Get vmin, vmax using mean and std.""" mean = xarr.mean() std = xarr.std() return {"vmin": max(0., mean - alpha * std), "vmax": mean + alpha * std}
4f6c87f290ab23db56fe67f700136a49e2b52363
3,646,303
def count_consumed_symbols(e): """Count how many symbols are consumed from each sequence by a single sequence diff entry.""" op = e.op if op == DiffOp.ADDRANGE: return (0, len(e.valuelist)) elif op == DiffOp.REMOVERANGE: return (e.length, 0) elif op == DiffOp.PATCH: return (1, 1) else: raise NBDiffFormatError("Invalid op '{}'".format(op))
63a3d97840fae49a7ff3279e10e553d82dfcf801
3,646,304
def maha_dist_sq(cols, center, cov): """Calculate squared Mahalanobis distance of all observations (rows in the vectors contained in the list cols) from the center vector with respect to the covariance matrix cov""" n = len(cols[0]) p = len(cols) assert len(center) == p # observation matrix obs = flex.double(flex.grid(n, p)) for i, col in enumerate(cols): obs.matrix_paste_column_in_place(col, i) d2 = maha_dist_sq_cpp(obs, flex.double(center), cov) return d2
9e54a7f49ed3b977b351007991f3fe263306a20a
3,646,305
def get_form_target(): """ Returns the target URL for the comment form submission view. """ if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"): return get_comment_app().get_form_target() else: return urlresolvers.reverse("comments.views.comments.post_comment")
d7e6ad126a35109d589d7f2734a4bd3e56df748f
3,646,306
def secret_page(username=None, password=None): """ Returns the HTML for the page visited after the user has logged-in. """ if username is None or password is None: raise ValueError("You need to pass both username and password!") return _wrapper(""" <h1> Welcome, {username}! </h1> <p> <small> Pst! I know your password is <span class="spoilers"> {password}</span>. </small> </p> """.format(username=escape(username.capitalize()), password=escape(password))) # ==== Edit username and pw ion secret.py ====
3bd81f30f0bf63290c6ee24cf3bccb7090fd406c
3,646,307
import collections def user(username): """ displays a single user """ all_badgers = loads(r_server.get('all_badgers')) this_badger = all_badgers[username] this_badger_sorted = collections.OrderedDict(sorted(this_badger.items(), reverse=True)) days = days_in_a_row(this_badger) kwargs = {'badgers': { username: this_badger_sorted }, 'days': days } return render_template('index.html', **kwargs)
27cf03175184cc839a64d931aa3477a0196c24aa
3,646,308
import numpy def ifft(a, axis): """ Fourier transformation from grid to image space, along a given axis. (inverse Fourier transform) :param a: numpy array, 1D or 2D (`uv` grid to transform) :param axis: int; axes over which to calculate :return: numpy array (an image in `lm` coordinate space) """ return numpy.fft.fftshift( numpy.fft.ifft(numpy.fft.ifftshift(a, axis), axis=axis), axis )
3a96d6b615c8da63deaeca5e98a4f82f18fec8dd
3,646,309
import random def transitions_and_masks_to_proposals(t1, t2, m1, m2, max_samples=10, max_ccs=6): """ assumes set-based s and a... so shape should be (n_components, *component_shape) Takes two transitions with their masks, and combines them using connected-component relabeling to form proposals Returns a list of tuples of ((s1, a1, s2) proposal, disconnected_component_idxs). """ sa1, s21 = t1 sa2, s22 = t2 # get_dcs_from_mask should return a set of tuples of indices, inc. the empty tuple # where the subgraph represented by each tuple is disconnected from the result of # the graph. Note that mask should be square, so columns corresp. to action idxs are # dummy columns. # # E.g., if mask is [[1,0,0,0],[0,1,0,0],[0,0,1,1],[0,0,1,1]], # this function should return: # set([ (,), (0,), (1,), (0,1), (2, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3) ]) dc1 = get_dcs_from_mask(m1, max_ccs) dc2 = get_dcs_from_mask(m2, max_ccs) # get shared connected components in random order shared_dc = list(dc1.intersection(dc2)) random.shuffle(shared_dc) # subsample shared_dc down to max_samples if len(shared_dc) > max_samples: shared_dc = shared_dc[:max_samples] all_idxs = set(range(len(sa1))) res = [] for dc in shared_dc: not_dc = list(all_idxs - set(dc)) dc = list(dc) # (0, 2) proposed_sa = np.zeros_like(sa1) proposed_s2 = np.zeros_like(sa1) proposed_sa[dc] = sa1[dc] proposed_sa[not_dc] = sa2[not_dc] proposed_s2[dc] = s21[dc] proposed_s2[not_dc] = s22[not_dc] proposed_t = (proposed_sa, proposed_s2) res.append((proposed_t, tuple(dc))) return res
146b937e7a46d6d051b10f900574378874535932
3,646,310
import requests import re def exists(url): """Check based on protocol if url exists.""" parsed_url = urlparse(url) if parsed_url.scheme == "": raise RuntimeError("Invalid url: %s" % url) if parsed_url.scheme in ('http', 'https'): r = requests.head(url, verify=False) if r.status_code == 200: return True elif r.status_code == 404: return False else: r.raise_for_status() elif parsed_url.scheme in ('s3', 's3s'): s3_eps = boto.regioninfo.load_regions()['s3'] region = None for r, e in list(s3_eps.items()): if re.search(e, parsed_url.netloc): region = r break if region is None: raise RuntimeError("Failed to find region for endpoint %s." % parsed_url.netloc) conn = boto.s3.connect_to_region(region, aws_access_key_id=parsed_url.username, aws_secret_access_key=parsed_url.password) match = re.search(r'/(.*?)/(.*)$', parsed_url.path) if not match: raise RuntimeError("Failed to parse bucket & key from %s." % parsed_url.path) bn, kn = match.groups() try: bucket = conn.get_bucket(bn) except boto.exception.S3ResponseError as e: if e.status == 404: return False else: raise key = bucket.get_key(kn) if key is None: return False else: return True else: raise NotImplementedError("Failed to check existence of %s url." % parsed_url.scheme)
bb91fd5fb93ec6441125a1aa4874ad6d7f103535
3,646,312
def inverse_chirality_symbol(symbol): """ Inverses a chirality symbol, e.g., the 'R' character to 'S', or 'NS' to 'NR'. Note that chiral double bonds ('E' and 'Z') must not be inversed (they are not mirror images of each other). Args: symbol (str): The chirality symbol. Returns: str: The inverse chirality symbol. Raises: InputError: If ``symbol`` could not be recognized. """ inversion_dict = {'R': 'S', 'S': 'R', 'NR': 'NS', 'NS': 'NR', 'E': 'E', 'Z': 'Z'} if symbol not in list(inversion_dict.keys()): raise InputError(f"Recognized chirality symbols are 'R', 'S', 'NR', 'NS', 'E', and 'Z', got {symbol}.") return inversion_dict[symbol]
e87fae6ad9169efac0b3c95f53dfb92e0c450909
3,646,313
from typing import Optional def delete( request: HttpRequest, wid: Optional[int] = None, workflow: Optional[Workflow] = None, ) -> JsonResponse: """Delete a workflow.""" if request.method == 'POST': # Log the event Log.objects.register( request.user, Log.WORKFLOW_DELETE, None, { 'id': workflow.id, 'name': workflow.name}) # Nuke the logs pointing to the workflow for litem in workflow.logs.all(): litem.workflow = None litem.save() # Perform the delete operation workflow.delete() # In this case, the form is valid anyway return JsonResponse({'html_redirect': reverse('home')}) return JsonResponse({ 'html_form': render_to_string( 'workflow/includes/partial_workflow_delete.html', {'workflow': workflow}, request=request), })
07b6de0d66a5101660f1bf4aa37abe4be71568ff
3,646,315
def at_threshold(FPR, TPR, parameter, threshold): """ False positive rate (FPR) and True positive rate (TPR) at the selected threshold. :param FPR: False positive rates of given receiver operating characteristic (ROC) curve :param TPR: True positive rate of given receiver operating characteristic (ROC) curve :param parameter: possible thresholds :param threshold: selected threshold """ index = np.argmin(np.abs(parameter - threshold)) FPR_at_threshold = FPR[index] TPR_at_threshold = TPR[index] return FPR_at_threshold, TPR_at_threshold
d66edc0e43a18a5fdf8b6d216e4130aef8a7b17b
3,646,316
def _check_kl_estimator(estimator_fn, distribution_fn, num_samples=10000, rtol=1e-1, atol=1e-3, grad_rtol=2e-1, grad_atol=1e-1): """Compares the estimator_fn output and gradient to exact KL.""" rng_key = jax.random.PRNGKey(0) def expected_kl(params): distribution_a = distribution_fn(**params[0]) distribution_b = distribution_fn(**params[1]) return distribution_a.kl_divergence(distribution_b) def estimate_kl(params): distribution_a = distribution_fn(**params[0]) distribution_b = distribution_fn(**params[1]) return estimator_fn(distribution_a, distribution_b, rng_key=rng_key, num_samples=num_samples) params = ( dict(loc=0.0, scale=1.0), dict(loc=0.1, scale=1.0), ) expected_value, expected_grad = jax.value_and_grad(expected_kl)(params) value, grad = jax.value_and_grad(estimate_kl)(params) np.testing.assert_allclose(expected_value, value, rtol=rtol, atol=atol) chex.assert_tree_all_close(expected_grad, grad, rtol=grad_rtol, atol=grad_atol)
b4e34f35f6531f795c8621fee2082993c3b518bd
3,646,317
def relative_bias(simu, reco, relative_scaling_method='s1'): """ Compute the relative bias of a reconstructed variable as `median(reco-simu)/relative_scaling(simu, reco)` Parameters ---------- simu: `numpy.ndarray` reco: `numpy.ndarray` relative_scaling_method: str see `ctaplot.ana.relative_scaling` Returns ------- """ assert len(reco) == len(simu) if len(simu) == 0: return 0 return np.median((reco - simu) / relative_scaling(simu, reco, method=relative_scaling_method))
1bc611b1ea135d593bc9b8c83a02a50eeaf18a7e
3,646,318
from typing import Dict from typing import Any def addon_config() -> Dict[str, Any]: """Sample addon config.""" return { "package-name": "djangocms-blog", "installed-apps": [ "filer", "easy_thumbnails", "aldryn_apphooks_config", "parler", "taggit", "taggit_autosuggest", "meta", "djangocms_blog", "sortedm2m", ], "settings": { "META_SITE_PROTOCOL": "https", "META_USE_SITES": True, "MIDDLEWARE": ["django.middleware.gzip.GZipMiddleware"], }, "urls": [["", "djangocms_blog.taggit_urls"]], "message": "Please check documentation to complete the setup", }
f4266735ef2f0809e5802abed54dfde4c1cbd708
3,646,319
def join_mutations_regions( out_path: str, sample1_id: int, sample2_id: int, mutations_file: File, regions_file: File ) -> File: """ Join mutations and regions together to compute an allele frequence. """ def iter_mut_points(muts): for pos, count in muts: yield pos, "mut", count def iter_region_points(regions): for start, end, depth in regions: yield start - 0.5, "region", depth def iter_allele_freqs(points): denom = 0 for pos, kind, count in points: if kind == "region": denom = count elif kind == "mut": yield pos, count, denom, count / denom points1 = iter_mut_points(read_mutations(mutations_file)) points2 = iter_region_points(read_regions(regions_file)) points = iter_merge(points1, points2) allele_freqs = iter_allele_freqs(points) allele_freqs_path = f"{out_path}/allele_freqs/{sample1_id}_{sample2_id}.allele_freqs" return write_allele_freqs(allele_freqs_path, allele_freqs)
4d712a914e2f4c221df982fbd3352eb4f572ad11
3,646,320
def credibility_interval(post, alpha=1.): """Calculate bayesian credibility interval. Parameters: ----------- post : array_like The posterior sample over which to calculate the bayesian credibility interval. alpha : float, optional Confidence level. Returns: -------- med : float Median of the posterior. low : float Lower part of the credibility interval. up : float Upper part of the credibility interval. """ z = erf(alpha/sp.sqrt(2)) lower_percentile = 100 * (1 - z) / 2 upper_percentile = 100 * (1 + z) / 2 low, med, up = sp.percentile( post, [lower_percentile, 50, upper_percentile] ) return med, low, up
b31009918324980ba2ffc53a1f29af1f4e421f95
3,646,321
def svn_ra_invoke_replay_revstart_callback(*args): """ svn_ra_invoke_replay_revstart_callback(svn_ra_replay_revstart_callback_t _obj, svn_revnum_t revision, void replay_baton, svn_delta_editor_t editor, void edit_baton, apr_hash_t rev_props, apr_pool_t pool) -> svn_error_t """ return apply(_ra.svn_ra_invoke_replay_revstart_callback, args)
4c792a16d6dcdbb588062f1f47b3caed84bbd610
3,646,322
import click def tree(ctx, rootpage): """Export metadata of a page tree.""" if not rootpage: click.serror("No root page selected via --entity!") return 1 outname = getattr(ctx.obj.outfile, 'name', None) with api.context() as cf: results = [] try: #page = content.ConfluencePage(cf, rootpage, expand='metadata.labels,metadata.properties') #results.append(page.json) pagetree = cf.walk(rootpage, depth_1st=True, expand='metadata.labels,metadata.properties,version') for depth, data in pagetree: data.update(dict(depth=depth)) results.append(data) except api.ERRORS as cause: # Just log and otherwise ignore any errors api.diagnostics(cause) else: ctx.obj.log.info('Got {} results.'.format(len(results))) if results: print_result(ctx, results)
d055d8dc5fc5a3a267500362ca89b6e895d9d50f
3,646,323
import math def to_half_life(days): """ Return the constant [1/s] from the half life length [day] """ s= days * 3600*24 return -math.log(1/2)/s
af7724dfb9442bf1f5e931df5dd39b31d0e78091
3,646,324
import struct def Send (dst_ip, data, sequence=0, spoof_source=False, dst_port=MDNS_PORT, src_port=MDNS_PORT, dns_name=TEST_QUERY): """ Send one packet of MDNS with data. :param dst_ip: IP as string. :param data: Data as bytes/string. :param sequence: Number to use for sequence. Int. :param spoof_source: Default:False. Set as IP for spoofing. :param dst_port: .... :param src_port: ... :param dns_name: DNS name to put in the MDNS request. :return: semper vera!!! """ payload = "" payload += "\x00" # TransID is 2 bytes. Using one for sequence. payload += struct.pack('B', sequence) payload += "\x00\x00" # Stndrt qry payload += "\x00\x01" # 1 questions payload += "\x00\x00" # 0 ans RRs payload += "\x00\x00" # 0 authority RRs payload += "\x00\x00" # 0 additional RRs # Start of query: payload += struct.pack('B', len(dns_name)) # Length? -> YES it is! payload += dns_name # name payload += "\x00" # Query Terminator payload += "\x00\x0c" # PTR request payload += "\x00\x01" # class IN if spoof_source is False: pkt = IP( dst = dst_ip # src = "1.1.1.1" ) / UDP( sport = src_port, dport = dst_port ) / Raw( load = payload ) else: pkt = IP( dst = dst_ip, src = spoof_source ) / UDP( sport = src_port, dport = dst_port ) / Raw( load = data ) send(pkt) return True
9541c71d52dcbaa09ffba1aa1bf4d4d422d66ed6
3,646,325
from units.models import Unit def accreds_logs_list(request): """Display the list of accreds""" main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK) main_unit.set_rights_can_select(lambda unit: Accreditation.static_rights_can('LIST', request.user, unit)) main_unit.set_rights_can_edit(lambda unit: Accreditation.static_rights_can('CREATE', request.user, unit)) main_unit.check_if_can_use_hidden(request.user) if request.GET.get('upk'): update_current_unit(request, request.GET.get('upk')) return render(request, 'units/accreds/logs_list.html', {'main_unit': main_unit})
41961a3cd4f351d13ae5132cfb37e83be7050cc5
3,646,326
from typing import Counter def build_dict(file_name, max_vocab_size): """ reads a list of sentences from a file and returns - a dictionary which maps the most frequent words to indices and - a table which maps indices to the most frequent words """ word_freq = Counter() with open(file_name) as file: for line in file: word_freq.update(line.split()) if max_vocab_size <= 0: max_vocab_size = len(word_freq) words, _ = zip(*word_freq.most_common(max_vocab_size)) # ID of pad_string must be 0 words = [pad_string, unk_string] + list(words) word2ID = {w:i for i,w in enumerate(words)} return word2ID, words
ec2067e1fbf8d0f6845024ae69f8531c1f776348
3,646,327
import functools import logging def from_net(func): """ 为进行相似度数据收集的函数装饰,作用是忽略env中的数据获取模式,改变数据获取模式, 只使用网络数据模式进行数据收集,完成整个任务后,再恢复之前的数据获取模式 :param func: 进行相似度应用且有数据收集行为的函数 """ @functools.wraps(func) def wrapper(*args, **kwargs): # 临时保存env设置中的g_data_fetch_mode fetch_mode = ABuEnv.g_data_fetch_mode # 设置数据获取模式为强制网络模式 ABuEnv.g_data_fetch_mode = ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET if fetch_mode != ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET: # 如果原有设置不是强制网络模式,warning提示 logging.warning('data from net!!!') result = func(*args, **kwargs) # 恢复之前的g_data_fetch_mode ABuEnv.g_data_fetch_mode = fetch_mode return result return wrapper
68c3a9302d83cf0e02a74c104fd2b5894b85020a
3,646,328
def embedding_lookup(ids, params): """ Returns the embeddings lookups. The difference of this function to TensorFlow's function is that this function expects the ids as the first argument and the parameters as the second; while, in TensorFlow, is the other way around. :param ids: the ids :type ids: tf.Tensor :param params: the parameters :type params: tf.Tensor :return: the lookup :rtype: tf.Tensor """ return tf.nn.embedding_lookup(params, ids)
ef85f95cfa5d2a426616ee9203707877ae202051
3,646,330
def in_auto_mode(conx: Connection) -> bool: """Determine whether the controller is in AUTO or one of the MANUAL modes. Wraps the Karel IN_AUTO_MODE routine. NOTE: this method is moderately expensive, as it executes a Karel program on the controller. :returns: True if the controller is in AUTO mode :rtype: bool """ ret = exec_karel_prg(conx, prg_name='dmh_autom') if not ret[JSON_SUCCESS]: raise DominhException("Select_TPE error: " + ret[JSON_REASON]) return ret['in_auto_mode']
c2819344130a1562fab5a9ece177f8b400a15fbc
3,646,331
def pref(pref_name, default=None): """Return a preference value. Since this uses CFPreferencesCopyAppValue, Preferences can be defined several places. Precedence is: - MCX - /var/root/Library/Preferences/com.github.salopensource.sal.plist - /Library/Preferences/com.github.salopensource.sal.plist - default_prefs defined here. """ default_prefs = { 'ServerURL': 'http://sal', 'osquery_launchd': 'com.facebook.osqueryd.plist', 'SkipFacts': [], 'SyncScripts': True, 'BasicAuth': True, 'GetGrains': False, 'GetOhai': False, 'LastRunWasOffline': False, 'SendOfflineReport': False, } pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID) if pref_value is None and default: pref_value = default elif pref_value is None and pref_name in default_prefs: pref_value = default_prefs.get(pref_name) # we're using a default value. We'll write it out to # /Library/Preferences/<BUNDLE_ID>.plist for admin # discoverability set_pref(pref_name, pref_value) if isinstance(pref_value, NSDate): # convert NSDate/CFDates to strings pref_value = str(pref_value) return pref_value
10102f3dde316e473d5943fee059f729d6e9454c
3,646,332
def tRange(tStart, tStop, *, timedelta=300): """ Generate datetime list between tStart and tStop with fixed timedelta. Parameters ---------- tStart: datetime start time. tStop: datetime stop time. Keywords -------- timedelta: int time delta in seconds (default: 300). Returns ------- tList: list datetime between tStart and tStop with fixed timedelta. Examples -------- >>> import datetime as dt >>> tList = tRange(dt.datetime(2011, 1, 1), dt.datetime(2011, 1, 2), ... >>> timedelta=3600 * 12) >>> tList [datetime.datetime(2011, 1, 1, 0, 0), datetime.datetime(2011, 1, 1, 12, 0), datetime.datetime(2011, 1, 2, 0, 0)] History ------- 2020-02-25 First version. """ nTimedelta = int((tStop - tStart) / dt.timedelta(seconds=timedelta)) + 1 tList = [tStart + dt.timedelta(seconds=timedelta * i) for i in range(0, nTimedelta) if tStart + dt.timedelta(seconds=timedelta * i) <= tStop] return tList
4dec7a624bcd2b349d361831993b8108e99725a8
3,646,333
import numpy def TransformInversePoints(T,points): """Transforms a Nxk array of points by the inverse of an affine matrix""" kminus = T.shape[1]-1 return numpy.dot(points-numpy.tile(T[0:kminus,kminus],(len(points),1)),T[0:kminus,0:kminus])
7e04a741c6ad0ec08e40ab393a703a1878ef784a
3,646,334
def act_func(act): """function that can choose activation function Args: act: (str) activation function name Returns: corresponding Pytorch activation function """ return nn.ModuleDict([ ['relu', nn.ReLU(inplace=True)], ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)], ['selu', nn.SELU(inplace=True)] ])[act]
ffd0e6f2ec3ea419c4c3fbb618e4734d59420826
3,646,335
def ajax_get_hashtags(): """Flask Ajax Get Hashtag Route.""" f = request.args.get('f', 0, type=int) t = request.args.get('t', 0, type=int) hashtags_list = get_hashtags() try: if t == 0: return jsonify(dict(hashtags_list[f:])) elif t > len(hashtags_list): return jsonify(dict(hashtags_list[f:])) else: return jsonify(dict(hashtags_list[f:t])) except: return False
3c9249a5fefb93d422c6e2c4be56394711bf1d7a
3,646,336
def extract_pdf_information(pdf_path): """ Print and return pdf information """ # read binary with open(pdf_path, 'rb') as f: pdf = PdfFileReader(f) information = pdf.getDocumentInfo() number_of_pages = pdf.getNumPages() txt = f""" Information about {pdf_path}: Author: {information.author} Creator: {information.creator} Producer: {information.producer} Subject: {information.subject} Title: {information.title} Number of pages: {number_of_pages} """ print(txt) return information
bec3667aba872f8e7bf53da09a9fb1905bcf5eec
3,646,337
def normalize_string(subject: str) -> str: """Deprecated function alias""" logger.warn("normalize_string is deprecated") return string_to_title(subject)
6531a6e7211c61d8439bfa8ddc0e609c35b8b6f3
3,646,338
import inspect def get_default_args(func): """ Return dict for parameter name and default value. Parameters ---------- func : Callable A function to get parameter name and default value. Returns ------- Dict Parameter name and default value. Examples -------- >>> def test_func(a: int, b: str = "c") -> int: ... return a+1 >>> get_default_args(test_func) {'b': 'c'} >>> def test_func2(a: int = 1, b="c") -> int: ... return a+1 >>> get_default_args(test_func2) {'a': 1, 'b': 'c'} """ signature = inspect.signature(func) return { k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty }
dcc75dceae1385868866d668aa021584547190df
3,646,339
def sec_to_time(seconds): """Transform seconds into a formatted time string. Parameters ----------- seconds : int Seconds to be transformed. Returns ----------- time : string A well formatted time string. """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "%02d:%02d:%02d" % (h, m, s)
59fcfe2f53d11ea7daac736b59b5eaeb72172dba
3,646,340
def power_oos(dmap_object, Y): """ Performs out-of-sample extension to calculate the values of the diffusion coordinates at each given point using the power-like method. Parameters ---------- dmap_object : DiffusionMap object Diffusion map upon which to perform the out-of-sample extension. Y : array-like, shape (n_query, n_features) Data for which to perform the out-of-sample extension. Returns ------- phi : numpy array, shape (n_query, n_eigenvectors) Transformed value of the given values. """ m = int(Y.shape[0]) k_yx, y_bandwidths = dmap_object.local_kernel.compute(Y, return_bandwidths=True) # Evaluate on ref points yy_right_norm_vec = dmap_object._make_right_norm_vec(k_yx, y_bandwidths)[1] k_yy_diag = dmap_object.local_kernel.kernel_fxn(0, dmap_object.epsilon_fitted) data_full = np.vstack([dmap_object.local_kernel.data, Y]) k_full = sps.hstack([k_yx, sps.eye(m) * k_yy_diag]) right_norm_full = np.hstack([dmap_object.right_norm_vec, yy_right_norm_vec]) weights = dmap_object._compute_weights(data_full) P = dmap_object._left_normalize(dmap_object._right_normalize(k_full, right_norm_full, weights)) L = dmap_object._build_generator(P, dmap_object.epsilon_fitted, y_bandwidths) L_yx = L[:, :-m] L_yy = np.array(L[:, -m:].diagonal()) adj_evals = dmap_object.evals - L_yy.reshape(-1, 1) dot_part = np.array(L_yx.dot(dmap_object.dmap)) return (1. / adj_evals) * dot_part
4de7d75324cd05a7d1ada0e8f6e724ecd551930c
3,646,341
def detect_face_landmarks(image, face_rect=None): """ detect face landmarks, if face_rect is None, the face_rect is the same size as image -> object :param image: :param face_rect: where the face is """ if(face_rect == None): face_rect = dlib.rectangle(0, 0, image.shape[0], image.shape[1]) return _detect_face_landmarks(image, face_rect)
70c299ae2ce98409e2359e11fa9def0d35e7554f
3,646,342
from typing import Iterable from typing import Mapping def ensure_iterable(obj): """Ensure ``obj`` is either a sequential iterable object that is not a string type. 1. If ``obj`` is :const:`None` return an empty :class:`tuple`. 2. If ``obj`` is an instance of :class:`str`, :class:`bytes`, or :class:`Mapping`, or not :class:`Iterable` return a list containing ``obj`` 3. Return ``obj`` Parameters ---------- obj : object The object to ensure iterability of Returns ------- :class:`Sequence` Returns either ``obj`` or a wrapepr around ``obj`` """ if obj is None: return tuple() if not isinstance(obj, Iterable) or isinstance(obj, basestring) or isinstance(obj, Mapping): return [obj] return obj
56c2db3d87c5927b1f2dbb51b64e7be73956d2b8
3,646,343
def test_dist(**kwargs): """ Test Distance """ a = np.random.random((2, 3)) d = ahrs.utils.metrics.euclidean(a[0], a[1]) result = np.allclose(d, np.linalg.norm(a[0] - a[1])) return result
46a9343fda3445fe0f07bfbb41fc321e6572e4a7
3,646,344
def get_pca(coords): """ Parameters ----------- coords : 2D np.array of points Returns --------- new_coords : 2D np.array of points keeps original number of dimension as input coords variance_ratio : tuple """ pca = PCA(n_components=3) # pca.fit(coords) # new_coords = pca.transform(coords) new_coords = pca.fit_transform(coords) return new_coords, pca.explained_variance_ratio_
a0bce6a7c4b50139502cbdedc6f0f456f21d26b6
3,646,345
def get_form_info(email): """Gets all existing application form info from the database.""" user_id = get_user_id(email) if not user_id: return (False, "Invalid user ID. Please contact the organizers.") query = """ SELECT * FROM applications WHERE user_id = %s AND application_year = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id, app_year.year + "0000"]) application = cursor.fetchone() query = """ SELECT * FROM members WHERE user_id = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id]) member = cursor.fetchone() query = """ SELECT * FROM diet WHERE user_id = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id]) diet = cursor.fetchall() query = """ SELECT * FROM race WHERE user_id = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id]) race = cursor.fetchall() validationForm = ValidationForm() validationForm.fill(application, member) return (FormInfo(application, member, diet, race), validationForm)
f612b83aeb63ff138cc637dc04446ce59f6ecc6b
3,646,346
import logging def getLog(): """simple wrapper around basic logger""" return logging
b51942d2ed02f9ea7faf0a626715ec07e1677c88
3,646,347
from datetime import datetime def _date(defval, t): """ 支持的格式: unix 时间戳 yyyy-mm-dd 格式的日期字符串 yyyy/mm/dd 格式的日期字符串 yyyymmdd 格式的日期字符串 如果年月日其中有一项是0,将被转换成 1 """ if t is None: return defval if isinstance(t, (int, float)): return datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S') lt = len(t) if lt < 8: return defval if lt == 8: format_str = '%Y%m%d' else: t = t.replace('/', '-') format_str = '%Y-%m-%d %H:%M:%S' if lt > 19: format_str += '.%f' try: return str(datetime.strptime(t, format_str)) except: return defval
e8a1121da89d9dc46bdce5d1b8c70ec973909abb
3,646,348
import math def compute_lat_long_distance(point1, point2): """ Compute the distance between two records that have fields 'lat' and 'lon'. See details and reference implementation at http://andrew.hedges.name/experiments/haversine/ :param point1: a record with { 'lat', 'lon' } :param point2: a record with { 'lat', 'lon' } :return: """ lat1 = degree_to_rad(point1['lat']) lat2 = degree_to_rad(point2['lat']) lon1 = degree_to_rad(point1['lon']) lon2 = degree_to_rad(point2['lon']) dlon = lon2 - lon1 dlat = lat2 - lat1 a = math.sin(dlat / 2) * math.sin(dlat / 2) + \ math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) * math.sin(dlon / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) earth_radius = 3961 # Use 6373 for km d = earth_radius * c # In miles return round(d, 3)
8058df18106636a0bc6c1f7471f912e07e61ae21
3,646,349
import logging def entropy_analysis(data_df): """ Masked Shannon entropy analysis for sequences Parameters ---------- data_df: pandas.DataFrame merged Pandas dataframe Returns ------- H_list: list entropy values for all positions null_freq_list: list masked percentage for all positions """ seq_list = data_df['sequence'].values.tolist() base_set = set([]) for seq in seq_list: base_set.update(set(seq)) H_list = [] null_freq_list = [] STEP = ceil(len(seq_list[0]) / 10) for base_idx in range(len(seq_list[0])): if base_idx % STEP == 0: logging.info('Entropy analysis in progress: {}% completed.'.format(10 * base_idx // STEP)) H, null_freq = base_entropy_masked(seq_list, base_set, base_idx) H_list.append(H,) null_freq_list.append(null_freq) logging.info('Entropy analysis in progress: DONE.') return H_list, null_freq_list
8b1d887f2c39b39a833c13780864bd47d0d8d648
3,646,350
import re def get_requirements(filename): """ Helper function to read the list of requirements from a file """ dependency_links = [] with open(filename) as requirements_file: requirements = requirements_file.read().strip('\n').splitlines() requirements = [req for req in requirements if not req.startswith('#')] for i, req in enumerate(requirements): if ':' in req: match_obj = re.match(r"git\+(?:https|ssh|http):.*#egg=(.*)-(.*)", req) assert match_obj, "Cannot make sense of url {}".format(req) requirements[i] = "{req}=={ver}".format(req=match_obj.group(1), ver=match_obj.group(2)) dependency_links.append(req) return requirements, dependency_links
292d45ab8e7f8523734326869bb1dd05c6f395f1
3,646,351
def nigam_and_jennings_response(acc, dt, periods, xi): """ Implementation of the response spectrum calculation from Nigam and Jennings (1968). Ref: Nigam, N. C., Jennings, P. C. (1968) Digital calculation of response spectra from strong-motion earthquake records. National Science Foundation. :param acc: acceleration in m/s2 :param periods: response periods of interest :param dt: time step of the acceleration time series :param xi: critical damping factor :return: response displacement, response velocity, response acceleration """ acc = -np.array(acc, dtype=np.float) periods = np.array(periods, dtype=np.float) if periods[0] == 0: s = 1 else: s = 0 w = 6.2831853 / periods[s:] dt = np.float(dt) xi = np.float(xi) # implement: delta_t should be less than period / 20 a, b = compute_a_and_b(xi, w, dt) resp_u = np.zeros([len(periods), len(acc)], dtype=np.float) resp_v = np.zeros([len(periods), len(acc)], dtype=np.float) for i in range(len(acc) - 1): # possibly speed up using scipy.signal.lfilter # x_i+1 = A cross (u, v) + B cross (acc_i, acc_i+1) # Eq 2.7a resp_u[s:, i + 1] = (a[0][0] * resp_u[s:, i] + a[0][1] * resp_v[s:, i] + b[0][0] * acc[i] + b[0][1] * acc[i + 1]) resp_v[s:, i + 1] = (a[1][0] * resp_u[s:, i] + a[1][1] * resp_v[s:, i] + b[1][0] * acc[i] + b[1][1] * acc[i + 1]) w2 = w ** 2 if s: sdof_acc = np.zeros_like(resp_u, dtype=np.float) sdof_acc[s:] = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:] sdof_acc[0] = acc else: sdof_acc = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:] return resp_u, resp_v, sdof_acc
4e9853b660d85d12701bafe9e328bc91499df73a
3,646,352
def binary_hamiltonian(op, nqubits, qubits1, qubits2, weights, device=None): """Generates tt-tensor classical Ising model Hamiltonian (two-qubit interaction terms in a single basis). Hamiltonian of the form: H = sum_i omega_i sigma_ind1(i) sigma_ind2(i) where omega_i are the Hamiltonian weights, sigma is the operator specified by op, and ind1, ind2 are the qubit numbers specified of index i. spins and weight values. Parameters ---------- op : tt-tensor, single-qubit operator to encode MaxCut graph nqubits : int, number of qubits (vertices) to encode in MaxCut problem qubits1 : List/tensor of ints, qubit indices qubits2 : List/tensor of ints, qubit indices weights : List/tensor of real floats, graph weights Returns ------- Hamiltonian encoding specified classical Ising model graph. """ H, inds_min, inds_max = [], minimum(qubits1, qubits2), maximum(qubits1, qubits2) for i in range(0, len(qubits1)): #H = tt_matrix_sum(H, _two_qubit_interaction(op, op, inds_min[i], inds_max[i], weights[i], nqubits)) H = tt_matrix_sum(H, _two_qubit_interaction(op, op, inds_min[i], inds_max[i], weights[i], nqubits), device=device) return [*H]
50360d50123c44719a8875a59d02e913cd95f2ad
3,646,353
def map_entry(entry, fields): """ Retrieve the entry from the given fields and replace it if it should have a different name within the database. :param entry: is one of the followings: - invalid field name - command (i.g. $eq) - valid field with no attribute name - valid field with an attribute name to use instead """ field = fields.get(entry) if isinstance(field, ListField) and isinstance(field.inner, EmbeddedField): fields = field.inner.embedded_document_cls.schema.fields elif isinstance(field, EmbeddedField): fields = field.embedded_document_cls.schema.fields return getattr(field, 'attribute', None) or entry, fields
05d392f3ab387381b0f114db05834d642350d817
3,646,354
def seqlogo_hairpin(N, target='none', ligand='theo', pam=None): """ Randomize the stem linking the aptamer to the sgRNA and the parts of the sgRNA that were the most conserved after being randomized in previous screens. Specifically, I identified these conserved positions by looking at a sequence logo of the relatively few (≈20) clones I sequenced from my previous screen. The theory behind this strategy is that positions with a clear preference for some nucleotides over others are more likely to be important for sensor activity. In this case, the previous screen was ``mhf`` and the sequence logo showed that all three positions in the ruler that were randomized had a preference for a non-native nucleotide. (In fact, the preference was for C in all three cases.) The ``mhf`` screen kept two positions in the ruler fixed, but since these positions were flanked by important-seeming positions on both sides, I decided to randomize the whole ruler this time. I am also randomizing the stem (often called a communication module) that connects the aptamer to the sgRNA. The ``N`` parameter dictates how long this stem should be, in base pairs, not counting any base pairs that are implicitly included with the aptamer. (Note: I realized that including one base pair on the end of the aptamer domain makes simulating the whole construct easier, so all the new aptamers include one base pair like that. But the theophylline aptamer predates this realization, so it doesn't.) Parameters ---------- N: int The length of the communication module, in base pairs. Recommended values are 3 and 4. """ # Make sure the length of the communication module makes sense. if N < 0: raise ValueError('qh: N must be >= 0') # Base this library on the optimized sgRNA described by Dang et al. sgrna = on(pam=pam, target=target) # Randomize the entire ruler. sgrna['ruler'].seq = 'GU' + 'N' * (len(sgrna['ruler']) - 2) # Randomize the communication module. sgrna['hairpin/5'].seq = N * 'N' sgrna['hairpin/3'].seq = N * 'N' # Insert the aptamer above the communication module. sgrna['hairpin/o'].attachment_sites = 0,4 sgrna.attach(aptamer(ligand), 'hairpin/o', 0, 'hairpin/o', 4) return sgrna
a6be46325d80a23e5afa820b042fa4c878370e45
3,646,355
from typing import Dict from typing import Any def azure_firewall_ip_group_list_command(client: AzureFirewallClient, args: Dict[str, Any]) -> CommandResults: """ List IP groups in resource group or subscription. Args: client (AzureFirewallClient): Azure Firewall API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ resource = args.get('resource') limit = arg_to_number(args.get('limit') or '50') page = arg_to_number(args.get('page') or '1') validate_pagination_arguments(limit, page) readable_message = get_pagination_readable_message(header='IP Group List:', limit=limit, page=page) start_offset = (page - 1) * limit end_offset = start_offset + limit complete_requests = False total_response = {'value': []} response = client.azure_firewall_ip_group_list_request(resource=resource) while not complete_requests: total_response['value'].extend(response.get('value')) if len(total_response['value']) >= end_offset or not response.get('nextLink'): complete_requests = True else: response = client.azure_firewall_ip_group_list_request(resource=resource, next_link=response.get('nextLink')) return generate_ip_group_command_output(total_response.get('value')[start_offset: end_offset], readable_header=readable_message)
c52108af9903f952adf316b11098f726d7280153
3,646,356