content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def auto_read(filename): """Automatically determine the format of filename and open accordingly""" #XXX: this won't work correctly on pipes #would be better to use file magic f = open(filename, 'r') firstchar = f.read(1) f.close() if firstchar == '#': return gnucap_read(filename) else: return spice_read(filename)
0485626e6305aa43ece6b6cf36a924f7526af26c
3,644,356
import time def WaitForOperation(client, messages, operation_name, operation_description=None, project=None, timeout=180): """Wait for an operation to complete. Polls the operation requested approximately every second, showing a progress indicator. Returns when the operation has completed. Args: client: The API client to use. messages: The API message to use. operation_name: The name of the operation to wait on, as returned by operations.list. operation_description: A short description of the operation to wait on, such as 'create' or 'delete'. Will be displayed to the user. project: The name of the project that this operation belongs to. timeout: Number of seconds to wait for. Defaults to 3 minutes. Returns: The operation when it is done. Raises: HttpException: A http error response was received while executing api request. Will be raised if the operation cannot be found. OperationError: The operation finished with error(s). Error: The operation the timeout without completing. """ tick_increment = 1 # every second(s) ticks = 0 message = ('Waiting for {0}[{1}]'.format( operation_description + ' ' if operation_description else '', operation_name)) request = messages.DeploymentmanagerOperationsGetRequest( project=project, operation=operation_name) with progress_tracker.ProgressTracker(message, autotick=False) as ticker: while ticks < timeout: operation = client.operations.Get(request) # Operation status is one of PENDING, RUNNING, DONE if operation.status == 'DONE': if operation.error: raise exceptions.OperationError( 'Error in Operation [{0}]: {1}'.format( operation_name, dm_util.RenderMessageAsYaml(operation.error))) else: # Operation succeeded return operation ticks += tick_increment ticker.Tick() time.sleep(tick_increment) # Timeout exceeded raise exceptions.Error( 'Wait for Operation [{0}] exceeded timeout [{1}].'.format( operation_name, str(timeout)))
e63b3951dd98762d28050ebff753f78e88cd0231
3,644,357
def get_unstaged_files(gitobj): """ ref: http://gitpython.readthedocs.io/en/stable/tutorial.html#obtaining-diff-information """ diff = [] diff.extend(gitobj.index.diff(gitobj.head.commit)) diff.extend(gitobj.index.diff(None)) return {"changed": diff, "untracked": gitobj.untracked_files}
623a2706bb0d2c428df0f44fe10a473e7d740938
3,644,358
from typing import Optional from typing import Union from typing import Tuple def conv2d( inp: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[int, Tuple[int, int]] = 1, padding: Union[int, Tuple[int, int]] = 0, dilation: Union[int, Tuple[int, int]] = 1, groups: int = 1, conv_mode="CROSS_CORRELATION", compute_mode="DEFAULT", ) -> Tensor: """ 2D convolution operation. Refer to :class:`~.Conv2d` for more information. :param inp: feature map of the convolution operation. :param weight: convolution kernel. :param bias: bias added to the result of convolution (if given). :param stride: stride of the 2D convolution operation. Default: 1 :param padding: size of the paddings added to the input on both sides of its spatial dimensions. Only zero-padding is supported. Default: 0 :param dilation: dilation of the 2D convolution operation. Default: 1 :param groups: number of groups into which the input and output channels are divided, so as to perform a ``grouped convolution``. When ``groups`` is not 1, ``in_channels`` and ``out_channels`` must be divisible by ``groups``, and the shape of weight should be `(groups, out_channel // groups, in_channels // groups, height, width)`. :type conv_mode: string or :class:`Convolution.Mode` :param conv_mode: supports "CROSS_CORRELATION". Default: "CROSS_CORRELATION" :type compute_mode: string or :class:`Convolution.ComputeMode` :param compute_mode: when set to "DEFAULT", no special requirements will be placed on the precision of intermediate results. When set to "FLOAT32", "Float32" would be used for accumulator and intermediate result, but only effective when input and output are of Float16 dtype. :return: output tensor. """ assert conv_mode == "CROSS_CORRELATION" or conv_mode.name == "CROSS_CORRELATION" assert compute_mode == "DEFAULT" or compute_mode.name == "DEFAULT" stride_h, stride_w = expand_hw(stride) pad_h, pad_w = expand_hw(padding) dilate_h, dilate_w = expand_hw(dilation) Sparse = builtin.Convolution.Sparse sparse_type = "DENSE" if groups == 1 else "GROUP" op = builtin.Convolution( stride_h=stride_h, stride_w=stride_w, pad_h=pad_h, pad_w=pad_w, dilate_h=dilate_h, dilate_w=dilate_w, strategy=get_conv_execution_strategy(), mode=conv_mode, compute_mode=compute_mode, sparse=sparse_type, ) inp, weight = utils.convert_inputs(inp, weight) (output,) = apply(op, inp, weight) if bias is not None: output += bias return output
fff9e2430c21757e3a5a4e1146ead63ad2fb5918
3,644,359
import encodings def find_tex_directives(texfile, ignore_root_loops=False): """Build a dictionary of %!TEX directives. The main ones we are concerned with are: root Specifies a root file to run tex on for this subsidiary TS-program Tells us which latex program to run TS-options Options to pass to TS-program encoding The text encoding of the tex file Arguments: texfile The initial tex file which should be searched for tex directives. If this file contains a “root” directive, then the file specified in this directive will be searched next. ignore_root_loops Specifies if this function exits with an error status if the tex root directives contain a loop. Returns: ``{str: str}`` Examples: >>> chdir('Tests/TeX') >>> directives = find_tex_directives('input/packages_input1.tex') >>> print(directives['root']) # doctest:+ELLIPSIS /.../Tests/TeX/packages.tex >>> print(directives['TS-program']) xelatex >>> find_tex_directives('makeindex.tex') {} >>> chdir('../..') """ if not texfile: return {} root_chain = [texfile] directive_regex = compile(r'%\s*!T[E|e]X\s+([\w-]+)\s*=\s*(.+)') directives = {} while True: for encoding in encodings: try: lines = [line for (line_number, line) in enumerate(open(texfile, encoding=encoding)) if line_number < 20] break except UnicodeDecodeError: continue new_directives = {directive.group(1): directive.group(2).rstrip() for directive in [directive_regex.match(line) for line in lines] if directive} directives.update(new_directives) if 'root' in new_directives: root = directives['root'] new_tex_file = (root if root.startswith('/') else realpath(join(dirname(texfile), root))) directives['root'] = new_tex_file else: break if new_tex_file in root_chain: if ignore_root_loops: break print('''<div id="commandOutput"><div id="preText"> <p class="error">There is a loop in your %!TEX root directives.</p> </div></div>''') exit(EXIT_LOOP_IN_TEX_ROOT) else: texfile = new_tex_file root_chain.append(texfile) return directives
df639f11f1609ee5c8a6bca0add8c154a42c481a
3,644,360
def projects(): """ Handles the GET & POST request to '/projects'. GET: requests to render page POST: request to edit project with sent data :return: render projects page / Json containing authorisation error / manage(data) function call """ if request.method == "GET": return render_template('projects.html') else: if not current_user.is_authenticated or (current_user.role != "admin" and current_user.role != "employee"): return jsonify( {'success': False, "message": "You are not authorized to edit the selected projects"}), 400, { 'ContentType': 'application/json'} data = request.json for project in data["projects"]: if current_user.role != "admin" and not employee_authorized_for_project(current_user.name, project): return jsonify( {'success': False, "message": "You are not authorized to edit the selected projects"}), 400, { 'ContentType': 'application/json'} return manage(data)
7a8a1d9c4d50623ad557d9dcaf419c5a3e83f521
3,644,361
def evolve_fqe_givens_sector(wfn: Wavefunction, u: np.ndarray, sector='alpha') -> Wavefunction: """Evolve a wavefunction by u generated from a 1-body Hamiltonian. Args: wfn: FQE Wavefunction on n-orbitals u: (n x n) unitary matrix. sector: Optional either 'alpha' or 'beta' indicating which sector to rotate Returns: New evolved wfn object. """ if sector == 'alpha': sigma = 0 elif sector == 'beta': sigma = 1 else: raise ValueError("Bad section variable. Either (alpha) or (beta)") if not np.isclose(u.shape[0], wfn.norb()): raise ValueError( "unitary is not specified for the correct number of orbitals") rotations, diagonal = givens_decomposition_square(u.copy()) # Iterate through each layer and time evolve by the appropriate # fermion operators for layer in rotations: for givens in layer: i, j, theta, phi = givens if not np.isclose(phi, 0): op = of.FermionOperator( ((2 * j + sigma, 1), (2 * j + sigma, 0)), coefficient=-phi) wfn = wfn.time_evolve(1.0, op) if not np.isclose(theta, 0): op = of.FermionOperator(((2 * i + sigma, 1), (2 * j + sigma, 0)), coefficient=-1j * theta) + \ of.FermionOperator(((2 * j + sigma, 1), (2 * i + sigma, 0)), coefficient=1j * theta) wfn = wfn.time_evolve(1.0, op) # evolve the last diagonal phases for idx, final_phase in enumerate(diagonal): if not np.isclose(final_phase, 1.0): op = of.FermionOperator( ((2 * idx + sigma, 1), (2 * idx + sigma, 0)), -np.angle(final_phase)) wfn = wfn.time_evolve(1.0, op) return wfn
7f8334d64a1965424c5a1faf166bbf8741c0e1ae
3,644,362
from typing import Iterable def epoch_folding_search(times, frequencies, nbin=128, segment_size=5000, expocorr=False, gti=None, weights=1, fdots=0): """Performs epoch folding at trial frequencies in photon data. If no exposure correction is needed and numba is installed, it uses a fast algorithm to perform the folding. Otherwise, it runs a *much* slower algorithm, which however yields a more precise result. The search can be done in segments and the results averaged. Use segment_size to control this Parameters ---------- times : array-like the event arrival times frequencies : array-like the trial values for the frequencies Other Parameters ---------------- nbin : int the number of bins of the folded profiles segment_size : float the length of the segments to be averaged in the periodogram fdots : array-like trial values of the first frequency derivative (optional) expocorr : bool correct for the exposure (Use it if the period is comparable to the length of the good time intervals). If True, GTIs have to be specified via the ``gti`` keyword gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...] Good time intervals weights : array-like weight for each time. This might be, for example, the number of counts if the times array contains the time bins of a light curve Returns ------- (fgrid, stats) or (fgrid, fdgrid, stats), as follows: fgrid : array-like frequency grid of the epoch folding periodogram fdgrid : array-like frequency derivative grid. Only returned if fdots is an array. stats : array-like the epoch folding statistics corresponding to each frequency bin. """ if expocorr or not HAS_NUMBA or isinstance(weights, Iterable): if expocorr and gti is None: raise ValueError('To calculate exposure correction, you need to' ' specify the GTIs') def stat_fun(t, f, fd=0, **kwargs): return profile_stat(fold_events(t, f, fd, **kwargs)[1]) return \ _folding_search(stat_fun, times, frequencies, segment_size=segment_size, use_times=True, expocorr=expocorr, weights=weights, gti=gti, nbin=nbin, fdots=fdots) return _folding_search(lambda x: profile_stat(_profile_fast(x, nbin=nbin)), times, frequencies, segment_size=segment_size, fdots=fdots)
7eaa1d038a883babcf55f239acf519f5c059b0b2
3,644,364
def boxbin(x,y,xedge,yedge,c=None,figsize=(5,5),cmap='viridis',mincnt=10,vmin=None,vmax=None,edgecolor=None,powernorm=False, ax=None,normed=False,method='mean',quantile=None,alpha=1.0,cbar=True,unconditional=False,master_count=np.array([])): """ This function will grid data for you and provide the counts if no variable c is given, or the median if a variable c is given. In the future I will add functionallity to do the median, and possibly quantiles. x: 1-D array y: 1-D array xedge: 1-D array for xbins yedge: 1-D array for ybins c: 1-D array, same len as x and y returns axis handle cbar handle C matrix (counts or median values in bin) """ midpoints = np.empty(xedge.shape[0]-1) for i in np.arange(1,xedge.shape[0]): midpoints[i-1] = xedge[i-1] + (np.abs(xedge[i] - xedge[i-1]))/2. #note on digitize. bin 0 is outside to the left of the bins, bin -1 is outside to the right ind1 = np.digitize(x,bins = xedge) #inds of x in each bin ind2 = np.digitize(y,bins = yedge) #inds of y in each bin #drop points outside range outsideleft = np.where(ind1 != 0) ind1 = ind1[outsideleft] ind2 = ind2[outsideleft] if c is None: pass else: c = c[outsideleft] outsideright = np.where(ind1 != len(xedge)) ind1 = ind1[outsideright] ind2 = ind2[outsideright] if c is None: pass else: c = c[outsideright] outsideleft = np.where(ind2 != 0) ind1 = ind1[outsideleft] ind2 = ind2[outsideleft] if c is None: pass else: c = c[outsideleft] outsideright = np.where(ind2 != len(yedge)) ind1 = ind1[outsideright] ind2 = ind2[outsideright] if c is None: pass else: c = c[outsideright] if c is None: c = np.zeros(len(ind1)) df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c}) df2 = df.groupby(["x","y"]).count() df = df2.where(df2.values >= mincnt).dropna() C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999 for i,ii in enumerate(df.index.values): C[ii[0],ii[1]] = df.c.values[i] C = np.ma.masked_where(C == -9999,C) if normed: n_samples = np.ma.sum(C) C = C/n_samples C = C*100 print('n_samples= {}'.format(n_samples)) if ax is None: fig = plt.figure(figsize=(5,5)) ax = plt.gca() else: pass if powernorm: pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,edgecolor=edgecolor,norm=colors.PowerNorm(gamma=0.5),vmin=vmin,vmax=vmax,alpha=alpha) if cbar: cbar = plt.colorbar(pm,ax=ax) else: cbar = pm else: pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=edgecolor,alpha=alpha) if cbar: cbar = plt.colorbar(pm,ax=ax) else: cbar = pm return ax,cbar,C elif unconditional: df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c}) if method=='mean': df2 = df.groupby(["x","y"])['c'].sum() df3 = df.groupby(["x","y"]).count() df2 = df2.to_frame() df2.insert(1,'Count',df3.values) df = df2.where(df2.Count >= mincnt).dropna() C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1]) for i,ii in enumerate(df.index.values): C[ii[0],ii[1]] = df.c.values[i] C = C/master_count.values if ax is None: fig = plt.figure(figsize=(5,5)) ax = plt.gca() else: pass if powernorm: pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha) if cbar: cbar = plt.colorbar(pm,ax=ax) else: pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha) if cbar: cbar = plt.colorbar(pm,ax=ax) else: df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c}) if method=='mean': df2 = df.groupby(["x","y"])['c'].mean() elif method=='std': df2 = df.groupby(["x","y"])['c'].std() elif method=='median': df2 = df.groupby(["x","y"])['c'].median() elif method=='qunatile': if quantile is None: print('No quantile given, defaulting to median') quantile = 0.5 else: pass df2 = df.groupby(["x","y"])['c'].apply(percentile(quantile*100)) df3 = df.groupby(["x","y"]).count() df2 = df2.to_frame() df2.insert(1,'Count',df3.values) df = df2.where(df2.Count >= mincnt).dropna() C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999 for i,ii in enumerate(df.index.values): C[ii[0],ii[1]] = df.c.values[i] C = np.ma.masked_where(C == -9999,C) if ax is None: fig = plt.figure(figsize=(5,5)) ax = plt.gca() else: pass if powernorm: pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha) if cbar: cbar = plt.colorbar(pm,ax=ax) else: cbar = pm else: pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha) if cbar: cbar = plt.colorbar(pm,ax=ax) else: cbar = pm return ax,cbar,C
b80a9fdf25d16ecd5e73addae325d1a2348ef900
3,644,367
def _get_elastic_document( tasks: list[dict], symprec: float, fitting_method: str, ) -> ElasticDocument: """ Turn a list of deformation tasks into an elastic document. Parameters ---------- tasks : list of dict A list of deformation tasks. symprec : float Symmetry precision for deriving symmetry equivalent deformations. If ``symprec=None``, then no symmetry operations will be applied. fitting_method : str The method used to fit the elastic tensor. See pymatgen for more details on the methods themselves. The options are: - "finite_difference" (note this is required if fitting a 3rd order tensor) - "independent" - "pseudoinverse" Returns ------- ElasticDocument An elastic document. """ structure = get(tasks[0], "output.transformations.history.0.input_structure") stresses = [] deformations = [] uuids = [] job_dirs = [] for doc in tasks: deformation = get(doc, "output.transformations.history.0.deformation") stress = get(doc, "output.output.stress") deformations.append(Deformation(deformation)) stresses.append(Stress(stress)) uuids.append(doc["uuid"]) job_dirs.append(doc["output"]["dir_name"]) return ElasticDocument.from_stresses( structure, stresses, deformations, uuids, job_dirs, fitting_method=fitting_method, symprec=symprec, )
f01ea537fbd73c6a2da529a4da15e358033ed2a9
3,644,368
from typing import Union from pathlib import Path from typing import Counter def first(filename: Union[str, Path]) -> int: """ Sort the input, prepend with 0 and append with 3 + the max. Return: (# of successive differences == 1) * (# of successive differences == 3) """ with open(filename, "rt") as infile: jolts = sorted(int(line.strip()) for line in infile) jolts = [0] + jolts + [jolts[-1] + 3] diffs = Counter(right - left for left, right in zip(jolts[:-1], jolts[1:])) return diffs[3] * diffs[1]
18ffe3e97d7256ea61fcf6e436d36bb360d0a285
3,644,369
def charge_is_valid(charge_profile, capacity=6, max_charge_rate=2.5, time_unit=0.5): """ Function determining if a charge profile is valid (and fully charges the battery) """ if np.all(np.isclose(capacity/time_unit, charge_profile.groupby(charge_profile.index.date).sum())) is False: return False elif np.all(charge_profile.groupby(charge_profile.index.date).max() <= max_charge_rate) is False: return False else: return True
489717fc834b9492ab3add1ddfaa5c55e2f4d8e9
3,644,370
def create_slice_obj(start, end, step): """Create slice object""" return slice(start, end, step)
88a5c5a9e0d3b714b4316d8744fcdd1a34f347a7
3,644,371
def binary_cross_entropy_error(y, t): """バイナリー交差エントロピー誤差""" #y.shape (N,C,H,W) delta = 1e-7 return -np.mean(t*np.log(y + delta) + (1-t)*np.log(1-y + delta))
a0090d4d5e6695ab0c4d988b8f0efbdfcd44984c
3,644,372
def get_abc(): """ :return: list all the abcs as a list """ # ok return list(abcs.find({}, {'_id': False}))
aa2c39bdc8ec1f31f43ea02701b5022f612b286b
3,644,373
from typing import Optional from typing import List def matching_system_code(concept: CodeableConcept, system: str) -> Optional[str]: """ Returns a code from a specified *system* contained within a given *concept*. If no code is found for the given *system*, returns None. Raises an :class:`AssertionError` if more than one encoding for a *system* is found within the given FHIR *concept*. """ system_codes: List[CodeableConcept] = [] if not concept: return None system_codes += list(filter(lambda c: matching_system(c, system), concept.coding)) assert len(system_codes) <= 1, "Multiple encodings found in FHIR concept " + \ f"«{concept.concept_type}» for system «{system}»." if not system_codes: return None return system_codes[0].code
cd9005ebcfd9ab15e5d27f7f30b8b4ea4b4db7b0
3,644,374
def get_pybullet(env_name): """ Returns pybullet dataset and envrironment. The dataset is provided through d4rl-pybullet. See more details including available dataset from its GitHub page. .. code-block:: python from d3rlpy.datasets import get_pybullet dataset, env = get_pybullet('hopper-bullet-mixed-v0') References: * https://github.com/takuseno/d4rl-pybullet Args: env_name (str): environment id of d4rl-pybullet dataset. Returns: tuple: tuple of :class:`d3rlpy.dataset.MDPDataset` and gym environment. """ try: env = gym.make(env_name) dataset = MDPDataset(**env.get_dataset()) return dataset, env except ImportError: raise ImportError( 'd4rl-pybullet is not installed.\n' \ 'pip install git+https://github.com/takuseno/d4rl-pybullet')
79d3e408698ea7454398490a98fd7d653625cbd4
3,644,375
from typing import Iterable from typing import Any def reverse(d: Iterable) -> Any: """Reverses the provided iterable, but also RETURNS it""" d.reverse() return d
5eff6b170afe6424f113ec4b15f985ee8d306e83
3,644,376
def scalar(typename): """ Returns scalar type from ROS message data type, like "uint8" from "uint8[100]". Returns type unchanged if already a scalar. """ return typename[:typename.index("[")] if "[" in typename else typename
729fb68bced11e190b3d32d03bbadd921f191bee
3,644,377
def subject(mock_messenger: AsyncMock) -> initiator.FirmwareUpdateInitiator: """The test subject.""" return initiator.FirmwareUpdateInitiator(mock_messenger)
cfab4395d5ffc3de6a33d3eeb2d7ce373f719b06
3,644,378
def onetangent(ri, rf, ta_transb, k=0, use_alts=True, center='earth'): """Orbit transfer with one tangential burn and one nontangential burn. Must be circular or coaxially elliptic. Currently only for circular orbits. :param ri: altitude (or radius) of initial circular orbit (km) :param rf: altitude (or radius) of initial circular orbit (km) :param ta_transb: true anomaly of transfer orbit at point b (rad) :param k: number of revolutions through perigee :param use_alts: Boolean for switching between ri,rf=altitude (True) and ri,rf=radius to center :param center: planetary center of focus; default=earth :return vtransa: transfer velocity required at point a (km/s) :return vtransb: transfer velocity required at point b (km/s) :return fpa_transb: flight path angle for the nontangential transfer (rad) :return TOF: time of flight (s) in work """ # update constants and parameters mu = get_mu(center=center) if use_alts and center.lower() == 'earth': ri, rf = [r+r_earth for r in [ri, rf]] # check location of tangent burn Rinv = ri/rf if Rinv > 1: # tangent burn is at apogee e_trans = (Rinv-1)/(np.cos(ta_transb)+Rinv) a_trans = ri/(1+e_trans) E0 = np.pi else: # tangent burn is at perigee e_trans = (Rinv-1)/(np.cos(ta_transb)-Rinv) a_trans = ri/(1-e_trans) E0 = 0. # compute initial, final, and transfer velocities at a, b vi = sqrt(mu/ri) vf = sqrt(mu/rf) vtransa = sqrt(2*mu/ri - mu/a_trans) vtransb = sqrt(2*mu/rf - mu/a_trans) # flight path angle of nontangential transfer fpa_transb = np.arctan(e_trans*np.sin(ta_transb) / (1+e_trans*np.cos(ta_transb))) # get delta-v's at each point and its total dva = vtransa - vi dvb = sqrt( vtransb**2 + vf**2 - 2*vtransb*vf*np.cos(fpa_transb) ) dv_otb = np.abs(dva) + np.abs(dvb) # computing eccentric anomaly E = np.arccos((e_trans+np.cos(ta_transb))/(1+e_trans*np.cos(ta_transb))) # computing time of flight TOF = sqrt(a_trans**3/mu) * \ (2*k*np.pi+(E-e_trans*np.sin(E))-(E0 - e_trans*np.sin(E0))) return vtransa, vtransb, fpa_transb, TOF
12eae51bc3833df94b063597e2444df851a7960c
3,644,379
def display_matplot(images, title = None, gray=None): """[Standard display fuction used throughout testing to see the output of thhe various transforms. Displays multilpe plots at once for comparison, always in a square format.] Arguments: images {[Array]} -- [the array that contains all of the images you wish to display] Keyword Arguments: title {[String]} -- [A title to display on the plot to keep track of which image is bing shown.] (default: {None}) gray {[Opencv const]} -- [The colour space you wish to display the image in.] (default: {None}) Returns: [matplotlib plot] -- [The created plot] """ n = np.ceil(np.sqrt(len(images))) index = 1 plt.set_cmap('gray') plt.title(title) for image in images: plt.subplot(n, n, index) plt.imshow(image) plt.xticks([]), plt.yticks([]) index += 1 plt.waitforbuttonpress(0) plt.close() return plt
635b6c977d1d71a9d7479e064978c3695115d757
3,644,380
def get_version(): """ It returns the pmml version . Returns ------- version : String Returns the version of the pmml. """ version = '4.4' return version
162f6e0ffb4c4741fafe2aa16d6fceed16bae99a
3,644,381
import torch def customsoftmax(inp, multihotmask): """ Custom Softmax """ soft = F.softmax(inp, dim=1) # This takes the mask * softmax ( sums it up hence summing up the classes in border # then takes of summed up version vs no summed version return torch.log( torch.max(soft, (multihotmask * (soft * multihotmask).sum(1, keepdim=True))) )
a0db0926aa9ed804bfab54cfaaf7c4a031809aae
3,644,382
import scipy def mandoline( D_src: np.ndarray, D_tgt: np.ndarray, edge_list: np.ndarray, sigma: float=None, ): """ Mandoline solver. Args: D_src: (n_src x d) matrix of (example, slices) for the source distribution. D_tgt: (n_tgt x d) matrix of (example, slices) for the source distribution. edge_list: list of edge correlations between slices that should be modeled. sigma: optional parameter that activates RBF kernel-based KLIEP with scale `sigma`. Returns: SimpleNamespace that contains opt: result of scipy.optimize Phi_D_src: source potential matrix used in Mandoline Phi_D_tgt: target potential matrix used in Mandoline n_src: number of source samples n_tgt: number of target samples edge_list: the `edge_list` parameter passed as input """ # Copy and binarize the input matrices to -1/1 D_src, D_tgt = np.copy(D_src), np.copy(D_tgt) if np.min(D_src) == 0: D_src[D_src == 0] = -1 D_tgt[D_tgt == 0] = -1 # Edge list encoding dependencies between gs if edge_list is not None: edge_list = np.array(edge_list) # Create the potential matrices Phi_D_tgt, Phi_D_src = Phi(D_tgt, edge_list), Phi(D_src, edge_list) # Number of examples n_src, n_tgt = Phi_D_src.shape[0], Phi_D_tgt.shape[0] def f(x): obj = Phi_D_tgt.dot(x).sum() - n_tgt * scipy.special.logsumexp(Phi_D_src.dot(x)) return -obj # Set the kernel kernel = partial(skmetrics.rbf_kernel, gamma=sigma) def llkliep_f(x): obj = kernel( Phi_D_tgt, x[:, np.newaxis] ).sum() - n_tgt * scipy.special.logsumexp(kernel(Phi_D_src, x[:, np.newaxis])) return -obj # Solve if not sigma: opt = scipy.optimize.minimize( f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS" ) else: opt = scipy.optimize.minimize( llkliep_f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS" ) return SimpleNamespace( opt=opt, Phi_D_src=Phi_D_src, Phi_D_tgt=Phi_D_tgt, n_src=n_src, n_tgt=n_tgt, edge_list=edge_list, )
5b7817e1ff252724f61572ac6f103ec963c257dd
3,644,383
def service_transformer_info_get(service): # noqa: E501 """Retrieve transformer info Provides information about the transformer. # noqa: E501 :param service: Inxight_Drugs service :rtype: TransformerInfo """ return transformer[service].info
a12d4d19efe7bf8a3c185213790366339aad8c9f
3,644,384
def create_app(config=None, app_name=None): """Create a Flask app.""" if app_name is None: app_name = DefaultConfig.PROJECT app = Flask(app_name, instance_path=INSTANCE_FOLDER_PATH, instance_relative_config=True) configure_app(app, config) configure_hook(app) configure_blueprints(app) configure_extensions(app) configure_logging(app) configure_template_filters(app) configure_error_handlers(app) configure_cli(app) return app
95f5191fc64f5656156fc69bd9565b0a754e014c
3,644,385
def read_translocations_tumors(gene_A, gene_B,\ tumor_barcodes,\ data_location=default_location): """ For a given set of tumor barcode and a gene, finds with a lookup the mutation for this particular gene on the TCGA dataset. INPUT: - gene_A (str): first gene of translocation - gene_B (str): second gene of translocation - tumor_barcodes (list): list of tumor barcodes - data_location (str, optional): where data is located OUTPUT: - indicator list with 1 on tumor barcodes with a translocation """ translocated_genes = [gene_A, gene_B] # Read data and filter df = pd.read_csv(data_location, sep='\t') df = df[np.isin(df.Gene_A, translocated_genes)] df = df[np.isin(df.Gene_B, translocated_genes)] # Common barcode length barcode_length = np.unique([len(e) for e in df['sampleId'].values]) if barcode_length.shape[0] > 1: raise ValueError('File does not the same barcoding length') barcode_length = barcode_length[0] print(barcode_length) # Map translocated tumors translocated_barcodes = df['sampleId'].values.astype(str) translocated_barcodes = [e.replace('.', '-') for e in translocated_barcodes] print(translocated_barcodes) translocated_tumors = np.where(np.isin([e[5:5+barcode_length] for e in tumor_barcodes], translocated_barcodes)) print(translocated_barcodes) is_translocated = np.zeros(len(tumor_barcodes)) is_translocated[translocated_tumors] = 1 return is_translocated
7ae16c2898272676ab1ab2bccde4ca3958fbb4a0
3,644,386
import numbers def simplify_if_constant(symbol, keep_domains=False): """ Utility function to simplify an expression tree if it evalutes to a constant scalar, vector or matrix """ if keep_domains is True: domain = symbol.domain auxiliary_domains = symbol.auxiliary_domains else: domain = None auxiliary_domains = None if symbol.is_constant(): result = symbol.evaluate_ignoring_errors() if result is not None: if ( isinstance(result, numbers.Number) or (isinstance(result, np.ndarray) and result.ndim == 0) or isinstance(result, np.bool_) ): return pybamm.Scalar(result) elif isinstance(result, np.ndarray) or issparse(result): if result.ndim == 1 or result.shape[1] == 1: return pybamm.Vector( result, domain=domain, auxiliary_domains=auxiliary_domains ) else: # Turn matrix of zeros into sparse matrix if isinstance(result, np.ndarray) and np.all(result == 0): result = csr_matrix(result) return pybamm.Matrix( result, domain=domain, auxiliary_domains=auxiliary_domains ) return symbol
c696ec4a7c81251448c97afe92c32f275284f71e
3,644,387
def is_visible(window): """ Check whether the window is visible or not. """ return lib.is_visible(window)
23f146625dcaa3f473ddec1684b9f222496bae48
3,644,388
from typing import Optional import copy def fix_mol( mol: Chem.rdchem.Mol, n_iter: int = 1, remove_singleton: bool = False, largest_only: bool = False, inplace: bool = False, ) -> Optional[Chem.rdchem.Mol]: """Fix error in molecule using a greedy approach. Args: mol: input molecule to fix n_iter: Number of valence fix iteration to apply remove_singleton: Whether `adjust_singleton` should be applied largest_only: Whether only the largest fragment should be kept inplace: Whether to return a copy of the mol or perform in place operation Returns: Fixed molecule. """ if not inplace: mol = copy.copy(mol) m = sanitize_mol(mol) or mol # fail back to mol when the fixer fail if m is not None: m = remove_dummies(m) for _ in range(n_iter): m = fix_valence(m) if remove_singleton: m = adjust_singleton(m) if largest_only: # m = max(Chem.rdmolops.GetMolFrags(m, asMols=True, sanitizeFrags=False), key=lambda m: m.GetNumAtoms()) m = rdMolStandardize.FragmentParent(m, skipStandardize=True) return m
6d745d9ec308e577b73243850e6f46c93c5ff24f
3,644,389
from typing import Iterable from typing import List def permutation_circuit(swaps: Iterable[List[Swap[_V]]]) -> PermutationCircuit: """Produce a circuit description of a list of swaps. With a given permutation and permuter you can compute the swaps using the permuter function then feed it into this circuit function to obtain a circuit description. Args: swaps: An iterable of swaps to perform. Returns: A MappingCircuit with the circuit and a mapping of node to qubit in the circuit. """ # Construct a circuit with each unique node id becoming a quantum register of size 1. dag = DAGCircuit() swap_list = list(swaps) # Set of unique nodes used in the swaps. nodes = { swap_node for swap_step in swap_list for swap_nodes in swap_step for swap_node in swap_nodes } node_qargs = {node: QuantumRegister(1) for node in nodes} for qubit in node_qargs.values(): dag.add_qreg(qubit) inputmap = {node: q[0] for node, q in node_qargs.items()} # Apply swaps to the circuit. for swap_step in swap_list: for swap0, swap1 in swap_step: dag.apply_operation_back(SwapGate(), [inputmap[swap0], inputmap[swap1]]) return PermutationCircuit(dag, inputmap)
c45d5fea5974c3bfb7e695ddc366d4948203e1d1
3,644,390
def Add(a, b): """ Adds two numbers, throws on overflow. """ c = a + b Require(c >= a) return c
cf6c04ed1f5f2f6782e6b91aea739c7e54c1dfe6
3,644,391
from typing import Dict from typing import Type def remap_shared_output_descriptions(output_descriptions: Dict[str, str], outputs: Dict[str, Type]) -> Dict[str, str]: """ Deals with mixed styles of return value descriptions used in docstrings. If the docstring contains a single entry of return value description, that output description is shared by each output variable. :param output_descriptions: Dict of output variable names mapping to output description :param outputs: Interface outputs :return: Dict of output variable names mapping to shared output description """ # no need to remap if len(output_descriptions) != 1: return output_descriptions _, shared_description = next(iter(output_descriptions.items())) return {k: shared_description for k, _ in outputs.items()}
06d589016a747230f88aa3507bd751fd30095222
3,644,392
def dist_matrix(): """Fix dist_matrix for the next two tests.""" dist_matrix = np.array([[0, 4, 5, 6], [4, 0, 7, 8], [5, 7, 0, 9], [6, 8, 9, 0]]) return dist_matrix
5bc3e5da5a6c76fd91858a697e2db183c74eb03f
3,644,393
def fitarg_rename(fitarg, ren): """Rename variable names in ``fitarg`` with rename function. :: #simple renaming fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1}, lambda pname: 'y' if pname=='x' else pname) #{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1}, #prefixing figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1}, lambda pname: 'prefix_'+pname) #{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1} """ tmp = ren if isinstance(ren, str): ren = lambda x: tmp + '_' + x ret = {} prefix = ['limit_', 'fix_', 'error_', ] for k, v in fitarg.items(): vn = k pf = '' for p in prefix: if k.startswith(p): vn = k[len(p):] pf = p newvn = pf + ren(vn) ret[newvn] = v return ret
151233d0f18eaea564afbc6d600d576407504b35
3,644,394
def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) # new axis order axis_order = (1, 0) + tuple(range(2, tensor.dim())) # Transpose: (N, C, D, H, W) -> (C, N, D, H, W) transposed = tensor.permute(axis_order) # Flatten: (C, N, D, H, W) -> (C, N * D * H * W) return transposed.contiguous().view(C, -1)
e7586da0abfdea639b3bb760fe31fca1cc849d1d
3,644,395
def node_to_edge(edges, directed=True): """ From list of edges, record per node, incoming and outgoing edges """ outgoing = defaultdict(set) incoming = defaultdict(set) if directed else outgoing nodes = set() for i, edge in enumerate(edges): a, b, = edge[:2] outgoing[a].add(i) incoming[b].add(i) nodes.add(a) nodes.add(b) nodes = sorted(nodes) if directed: return outgoing, incoming, nodes return outgoing, nodes
7e3f7bf93bbf19355b3329762a3531504bbc53a4
3,644,397
from typing import Counter def grouping_cumulative(df, col_index, col_column): """ compute histogram statistic over selected column and in addition group this histograms :param DataFrame df: rich table :param str col_index: column which will be used s index in resulting table :param str col_column: column used for computing a histogram :return DF: >>> np.random.seed(0) >>> df = pd.DataFrame() >>> df['result'] = np.random.randint(0, 2, 50) >>> df['user'] = np.array(list('abc'))[np.random.randint(0, 3, 50)] >>> grouping_cumulative(df, 'user', 'result').astype(int) # doctest: +NORMALIZE_WHITESPACE 0 1 user a 10 12 b 4 9 c 6 9 """ df_counts = pd.DataFrame() for idx, dfg in df[[col_index, col_column]].groupby(col_index): counts = dict(Counter(dfg[col_column])) counts[col_index] = idx df_counts = df_counts.append(counts, ignore_index=True) df_counts.set_index(col_index, inplace=True) return df_counts
f99b1c2cc4e7bc4e3a3af02414ba82bd057607e9
3,644,398
def _get_matching_stream(smap, itag): """ Return the url and signature for a stream matching itag in smap. """ for x in smap: if x['itag'] == itag and x.get("s"): return x['url'], x['s'] raise IOError("Sorry this video is not currently supported by pafy")
dc83fd3207d5ab4e1c85eb719f5f7d023131565e
3,644,399
import functools def Debounce(threshold=100): """ Simple debouncing decorator for apigpio callbacks. Example: `@Debouncer() def my_cb(gpio, level, tick) print('gpio cb: {} {} {}'.format(gpio, level, tick)) ` The threshold can be given to the decorator as an argument (in millisec). This decorator can be used both on function and object's methods. Warning: as the debouncer uses the tick from pigpio, which wraps around after approximately 1 hour 12 minutes, you could theoretically miss one call if your callback is called twice with that interval. """ threshold *= 1000 max_tick = 0xFFFFFFFF class _decorated(object): def __init__(self, pigpio_cb): self._fn = pigpio_cb self.last = 0 self.is_method = False def __call__(self, *args, **kwargs): if self.is_method: tick = args[3] else: tick = args[2] if self.last > tick: delay = max_tick-self.last + tick else: delay = tick - self.last if delay > threshold: self._fn(*args, **kwargs) print('call passed by debouncer {} {} {}' .format(tick, self.last, threshold)) self.last = tick else: print('call filtered out by debouncer {} {} {}' .format(tick, self.last, threshold)) def __get__(self, instance, type=None): # with is called when an instance of `_decorated` is used as a class # attribute, which is the case when decorating a method in a class self.is_method = True return functools.partial(self, instance) return _decorated
156b128ffaa579ead371bff3c4b4f20a2a05646b
3,644,400
def int_to_uuid(number): """ convert a positive integer to a UUID : a string of characters from `symbols` that is at least 3 letters long""" assert isinstance(number,int) and number >= 0 if number == 0: return '000' symbol_string = '' while number > 0: remainder = number % base number //= base symbol_string = encode_symbols[remainder] + symbol_string return symbol_string.rjust(3,'0')
49ce7bfeb4e11c90b2589b8b4003c3135ba78f53
3,644,403
def count_digit(n, digit): """Return how many times digit appears in n. >>> count_digit(55055, 5) 4 """ if n == 0: return 0 else: if n%10 == digit: return count_digit(n//10, digit) + 1 else: return count_digit(n//10, digit)
29cf3db8cca85e14b3b537f96246803d8176441d
3,644,405
def cal_chisquare(data, f, pepoch, bin_profile, F1, F2, F3, F4, parallel=False): """ calculate the chisquare distribution for frequency search on the pepoch time. """ chi_square = np.zeros(len(f), dtype=np.float64) t0 = pepoch if parallel: for i in prange(len(f)): phi = (data-t0)*f[i] + (1.0/2.0)*((data-t0)**2)*F1 + (1.0/6.0)*((data-t0)**3)*F2 +\ (1.0/24.0)*((data-t0)**4)*F3 + (1.0/120.0)*((data-t0)**5)*F4 phi = phi - np.floor(phi) #counts = numba_histogram(phi, bin_profile)[0] #NOTE: The histogram bin should give the edge of bin, instead of the bin number. #NOTE: For those pulse with narrow peak, it will be incorrect while calculate the chisquare counts = np.histogram(phi, np.linspace(0, 1, bin_profile+1)[:-1])[0] expectation = np.mean(counts) chi_square[i] = np.sum( (counts - expectation)**2 / expectation ) else: for i in range(len(f)): phi = (data-t0)*f[i] + (1.0/2.0)*((data-t0)**2)*F1 + (1.0/6.0)*((data-t0)**3)*F2 +\ (1.0/24.0)*((data-t0)**4)*F3 + (1.0/120.0)*((data-t0)**5)*F4 phi = phi - np.floor(phi) #counts = numba_histogram(phi, bin_profile)[0] #NOTE: The histogram bin should give the edge of bin, instead of the bin number. #NOTE: For those pulse with narrow peak, it will be incorrect while calculate the chisquare counts = np.histogram(phi, np.linspace(0, 1, bin_profile+1)[:-1])[0] expectation = np.mean(counts) chi_square[i] = np.sum( (counts - expectation)**2 / expectation ) return chi_square
13dfc33cd975758a3f6c64ff2da4f91409cfdae4
3,644,406
def rng() -> np.random.Generator: """Random number generator.""" return np.random.default_rng(42)
2c2f88eed71c9429edc25a06890266f5b7e8fc22
3,644,408
def add_values_in_dict(sample_dict, key, list_of_values): """Append multiple values to a key in the given dictionary""" if key not in sample_dict: sample_dict[key] = list() sample_dict[key].extend(list_of_values) temp_list = sample_dict[key] temp_list = list(set(temp_list)) # remove duplicates sample_dict[key] = temp_list return sample_dict
8c30b50256fd16eb1b9eefae5cc6ab5be58fe85f
3,644,409
def parse_length(line, p) -> int: """ parse length specifer for note or rest """ n_len = voices[ivc].meter.dlen # start with default length try: if n_len <= 0: SyntaxError(f"got len<=0 from current voice {line[p]}") if line[p].isdigit(): # multiply note length fac = parse_uint() if not fac: fac = 1 n_len *= fac if line[p] == '/': # divide note length while line[p] == '/': p += 1 if line[p].isdigit(): fac = parse_uint() else: fac = 2 if n_len % fac: SyntaxError(f"Bad length divisor {line[p-1]}") return n_len n_len = n_len/fac except SyntaxError as se: print(f"{se} Cannot proceed without default length. Emergency stop.") exit(1) return n_len
cee6c83eecbea455a53c3d4ac9a778d7351b66e0
3,644,410
def get_dual_shapes_and_types(bounds_elided): """Get shapes and types of dual vars.""" dual_shapes = [] dual_types = [] layer_sizes = utils.layer_sizes_from_bounds(bounds_elided) for it in range(len(layer_sizes)): m = layer_sizes[it] m = [m] if isinstance(m, int) else list(m) if it < len(layer_sizes)-1: n = layer_sizes[it + 1] n = [n] if isinstance(n, int) else list(n) shapes = { 'lam': [1] + n, 'nu': [1] + m, 'muminus': [1] + n, 'muplus': [1] + n, 'nu_quad': [], 'muminus2': [], } types = { 'lam': utils.DualVarTypes.EQUALITY, 'nu': utils.DualVarTypes.INEQUALITY, 'muminus': utils.DualVarTypes.INEQUALITY, 'muplus': utils.DualVarTypes.INEQUALITY, 'nu_quad': utils.DualVarTypes.INEQUALITY, 'muminus2': utils.DualVarTypes.INEQUALITY, } dual_shapes.append(DualVar(**{ k: np.array(s) for k, s in shapes.items()})) dual_types.append(DualVar(**types)) else: shapes = {'nu': [1] + m, 'nu_quad': []} types = {'nu': utils.DualVarTypes.INEQUALITY, 'nu_quad': utils.DualVarTypes.INEQUALITY} dual_shapes.append(DualVarFin(**{ k: np.array(s) for k, s in shapes.items()})) dual_types.append(DualVarFin(**types)) # Add kappa N = sum([np.prod(np.array(i)) for i in layer_sizes]) dual_shapes.append(np.array([1, N+1])) dual_types.append(utils.DualVarTypes.INEQUALITY) return dual_shapes, dual_types
297a305d8ef71d614eae21c3fc5c52ef08b271a3
3,644,411
def linear_search(alist, key): """ Return index of key in alist . Return -1 if key not present.""" for i in range(len(alist)): if alist[i] == key: return i return -1
ab4c0517f9103a43509b0ba511c75fe03ea6e043
3,644,412
def overlap_integral(xi, yi, zi, nxi, nyi, nzi, beta_i, xj, yj, zj, nxj, nyj, nzj, beta_j): """ overlap <i|j> between unnormalized Cartesian GTOs by numerical integration on a multicenter Becke grid Parameters ---------- xi,yi,zi : floats Cartesian positions of center i nxi,nyi,nzi : int >= 0 powers of Cartesian primitive GTO i beta_i : float > 0 exponent of radial part of orbital i xj,yj,zj : floats Cartesian positions of center j nxj,nyj,nzj : int >= 0 powers of Cartesian primitive GTO j beta_j : float > 0 exponent of radial part of orbital j """ # unnormalized bra and ket Gaussian type orbitals def CGTOi(x,y,z): dx, dy, dz = x-xi, y-yi, z-zi dr2 = dx*dx+dy*dy+dz*dz return pow(dx, nxi)*pow(dy,nyi)*pow(dz,nzi) * np.exp(-beta_i * dr2) def CGTOj(x,y,z): dx, dy, dz = x-xj, y-yj, z-zj dr2 = dx*dx+dy*dy+dz*dz return pow(dx, nxj)*pow(dy,nyj)*pow(dz,nzj) * np.exp(-beta_j * dr2) def integrand(x,y,z): return CGTOi(x,y,z) * CGTOj(x,y,z) # place a spherical grid on each center: ri, rj atoms = [(1, (xi, yi, zi)), (1, (xj, yj, zj))] # do the integral numerically olap = becke.integral(atoms, integrand) return olap
500da2037d5e9f880f239788156cc717163b6b0c
3,644,413
def save_project_id(config: Config, project_id: int): """Save the project ID in the project data""" data_dir = config.project.data_dir filename = data_dir / DEFAULT_PROJECTID_FILENAME with open(filename, "w") as f: return f.write(str(project_id))
9769067d222c8430764a2abd4def67a9ce45e49a
3,644,414
async def session_start(): """ session_start: Creates a new database session for external functions and returns it - Keep in mind that this is only for external functions that require multiple transactions - Such as adding songs :return: A new database session """ return session_maker()
cb84d30a8a89bdf58c63114fa84558d7567396bd
3,644,415
from pm4py.objects.bpmn.obj import BPMN from pm4py.objects.bpmn.util.sorting import get_sorted_nodes_edges from typing import Optional from typing import Dict from typing import Any import tempfile def apply(bpmn_graph: BPMN, parameters: Optional[Dict[Any, Any]] = None) -> graphviz.Digraph: """ Visualize a BPMN graph Parameters ------------- bpmn_graph BPMN graph parameters Parameters of the visualization, including: - Parameters.FORMAT: the format of the visualization - Parameters.RANKDIR: the direction of the representation (default: LR) Returns ------------ gviz Graphviz representation """ if parameters is None: parameters = {} image_format = exec_utils.get_param_value(Parameters.FORMAT, parameters, "png") rankdir = exec_utils.get_param_value(Parameters.RANKDIR, parameters, "LR") font_size = exec_utils.get_param_value(Parameters.FONT_SIZE, parameters, 12) font_size = str(font_size) bgcolor = exec_utils.get_param_value(Parameters.BGCOLOR, parameters, "transparent") filename = tempfile.NamedTemporaryFile(suffix='.gv') viz = Digraph("", filename=filename.name, engine='dot', graph_attr={'bgcolor': bgcolor}) viz.graph_attr['rankdir'] = rankdir nodes, edges = get_sorted_nodes_edges(bpmn_graph) for n in nodes: n_id = str(id(n)) if isinstance(n, BPMN.Task): viz.node(n_id, shape="box", label=n.get_name(), fontsize=font_size) elif isinstance(n, BPMN.StartEvent): viz.node(n_id, label="", shape="circle", style="filled", fillcolor="green", fontsize=font_size) elif isinstance(n, BPMN.EndEvent): viz.node(n_id, label="", shape="circle", style="filled", fillcolor="orange", fontsize=font_size) elif isinstance(n, BPMN.ParallelGateway): viz.node(n_id, label="+", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.ExclusiveGateway): viz.node(n_id, label="X", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.InclusiveGateway): viz.node(n_id, label="O", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.OtherEvent): viz.node(n_id, label="", shape="circle", fontsize=font_size) for e in edges: n_id_1 = str(id(e[0])) n_id_2 = str(id(e[1])) viz.edge(n_id_1, n_id_2) viz.attr(overlap='false') viz.format = image_format return viz
ca1e25dfe758712125327717e01cba36788b8b38
3,644,416
def _predict_exp(data, paulistring): """Compute expectation values of paulistring given bitstring data.""" expectation_value = 0 for a in data: val = 1 for i, pauli in enumerate(paulistring): idx = a[i] if pauli == "I": continue elif pauli == "X": ls = [1, 1, -1, -1] elif pauli == "Y": ls = [-1, 1, 1, -1] elif pauli == "Z": ls = [1, -1, 1, -1] val *= ls[idx] expectation_value += val / len(data) return expectation_value
32737920e750780655ba85ae9e57d6e3cd0f194c
3,644,417
import math def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi, II_em, II_em_ps, II_prt, II_no_em_nu18, c00100, pre_c04600, c04600): """ Computes Adjusted Gross Income (AGI), c00100, and compute personal exemption amount, c04600. """ # calculate AGI assuming no foreign earned income exclusion c00100 = ymod1 + c02500 - c02900 + taxable_ubi # calculate personal exemption amount if II_no_em_nu18: # repeal of personal exemptions for deps. under 18 pre_c04600 = max(0, XTOT - nu18) * II_em else: pre_c04600 = XTOT * II_em if DSI: pre_c04600 = 0. # phase-out personal exemption amount if exact == 1: # exact calculation as on tax forms line5 = max(0., c00100 - II_em_ps[MARS - 1]) line6 = math.ceil(line5 / (2500. / sep)) line7 = II_prt * line6 c04600 = max(0., pre_c04600 * (1. - line7)) else: # smoothed calculation needed for sensible mtr calculation dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1]) dispc_denom = 2500. / sep dispc = min(1., max(0., dispc_numer / dispc_denom)) c04600 = pre_c04600 * (1. - dispc) return (c00100, pre_c04600, c04600)
aed1c311bc6b46b46bfea3e9756cd73933c37ca9
3,644,418
def tokenize(headline_list): """ Takes list of headlines as input and returns a list of lists of tokens. """ tokenized = [] for headline in headline_list: tokens = word_tokenize(headline) tokenized.append(tokens) return tokenized
e5cf957a72d6d08d95787bf0f2222e525727c54a
3,644,419
def create_en_sentiment_component(nlp: Language, name: str, force: bool) -> Language: """ Allows the English sentiment to be added to a spaCy pipe using nlp.add_pipe("asent_en_v1"). """ LEXICON.update(E_LEXICON) return Asent( nlp, name=name, lexicon=LEXICON, intensifiers=INTENSIFIERS, negations=NEGATIONS, contrastive_conjugations=CONTRASTIVE_CONJ, lowercase=True, lemmatize=False, force=force, )
f66ab4d86da2d42adb7c8da95cfdff9517dcc34f
3,644,420
import json def lambda_handler(event, context): """ 店舗一覧情報を返す Parameters ---------- event : dict フロントより渡されたパラメータ context : dict コンテキスト内容。 Returns ------- shop_list : dict 店舗一覧情報 """ # パラメータログ logger.info(event) try: shop_list = get_shop_list() except Exception as e: logger.exception('Occur Exception: %s', e) return utils.create_error_response('Error') body = json.dumps( shop_list, default=utils.decimal_to_int, ensure_ascii=False) return utils.create_success_response(body)
48eebf18d34e50a98d00bd589b9d8a0712b9f985
3,644,421
import warnings def mask_land_ocean(data, land_mask, ocean=False): """Mask land or ocean values using a land binary mask. Parameters ---------- data: xarray.DataArray This input array can only have one of 2, 3 or 4 dimensions. All spatial dimensions should coincide with those of the land binary mask. land_mask: xarray.DataArray This array must have the same spatial extent as the input data. Though it can have different times or levels. It can be binary or not, because internally it will make sure of it. Sometimes these masks actually contain a range of values from 0 to 1. ocean: bool, optional Whether the user wants to mask land or ocean values. Default is to mask ocean values (False). Returns ------- xarray.Datarray same as input data but with masked values in either land or ocean. """ # noqa # remove numpy warning regarding nan_policy msg = 'Mean of empty slice' warnings.filterwarnings('ignore', message=msg) # get number of dimensions of both data arrays ndim_ds = len(data.dims) ndim_lm = len(land_mask.dims) # get dimensions of dataset if ndim_ds == 2: ntim = None nlat, mlon = data.shape elif ndim_ds == 3: ntim, nlat, mlon = data.shape elif ndim_ds == 4: ntim, nlev, nlat, mlon = data.shape else: msg = 'only 2, 3 or 4 dimensions allowed for data set' raise TypeError(msg) # get dimensions of land mask if ndim_lm == 2: lntim = None lnlat, lmlon = land_mask.shape elif ndim_lm == 3: lntim, lnlat, lmlon = land_mask.shape else: msg = 'only 2 or 3 dimensions allowed for land mask' raise TypeError(msg) # make sure dims agree if nlat != lnlat or mlon != lmlon: msg = 'spatial coordinates do not agree' raise ValueError(msg) # get a single land mask if many if lntim is not None or lntim == 1: land_mask = land_mask[0] # convert mask to binary if not already land_mask = binary_mask(land_mask) # create mask 1 (land) = True, 0 (ocean) = False mask = land_mask.values == 1 # tile mask to number of times if ndim_ds == 2: tmask = mask elif ndim_ds == 3: tmask = np.tile(mask, (ntim, 1, 1)) else: tmask = np.tile(mask, (ntim, 1, 1, 1)) # create masked array values = np.array(data.values) if ocean is True: maskval = np.ma.masked_array(values, tmask) else: maskval = np.ma.masked_array(values, tmask == False) # noqa E712 # replace values newdata = data.copy() newdata.values = maskval return newdata
97d32c2720db12e47738a58d2152d7052af095ed
3,644,422
def create_project_type(project_type_params): """ :param project_type_params: The parameters for creating an ProjectType instance -- the dict should include the 'type' key, which specifies the ProjectType subclass name, and key/value pairs matching constructor arguments for that ProjectType subclass. :type project_type_params: dict :return: The project_type instance :rtype: project_type.project_type.ProjectType """ project_type_params = project_type_params.copy() project_type_name = project_type_params.pop('type') project_type_class = get_project_type_subclass(project_type_name) if project_type_class: return project_type_class(**project_type_params) # create object using project_type_params as constructor args # Not yet implemented other project types return None
446961674985a2f5a64417d6f1f9bc6b39f7fbe4
3,644,423
def env_str(env_name: str, default: str) -> str: """ Get the environment variable's value convert into string """ return getenv(env_name, default)
529adfcfe770a39a5997d92792fdd2c857b32a41
3,644,424
def extract_sigma_var_names(filename_nam): """ Parses a 'sigma.nam' file containing the variable names, and outputs a list of these names. Some vector components contain a semicolon in their name; if so, break the name at the semicolon and keep just the 1st part. """ var_names = [] with open(filename_nam, 'r') as file: for line in file: var_name = line.strip() # check for semicolon if ';' in var_name: var_name = var_name.split(';')[0] var_names.append(var_name) return var_names
930e855d47c4303cac28e9973982392489fb577d
3,644,426
def vectors_to_arrays(vectors): """ Convert 1d vectors (lists, arrays or pandas.Series) to C contiguous 1d arrays. Arrays must be in C contiguous order for us to pass their memory pointers to GMT. If any are not, convert them to C order (which requires copying the memory). This usually happens when vectors are columns of a 2d array or have been sliced. If a vector is a list or pandas.Series, get the underlying numpy array. Parameters ---------- vectors : list of lists, 1d arrays or pandas.Series The vectors that must be converted. Returns ------- arrays : list of 1d arrays The converted numpy arrays Examples -------- >>> import numpy as np >>> import pandas as pd >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> vectors = [data[:, 0], data[:, 1], pd.Series(data=[-1, -2, -3])] >>> all(i.flags.c_contiguous for i in vectors) False >>> all(isinstance(i, np.ndarray) for i in vectors) False >>> arrays = vectors_to_arrays(vectors) >>> all(i.flags.c_contiguous for i in arrays) True >>> all(isinstance(i, np.ndarray) for i in arrays) True >>> data = [[1, 2], (3, 4), range(5, 7)] >>> all(isinstance(i, np.ndarray) for i in vectors_to_arrays(data)) True """ arrays = [as_c_contiguous(np.asarray(i)) for i in vectors] return arrays
c9a3878f2d1099ffd985525931f05df1b8631c46
3,644,427
import random import string def random_name_gen(size=6): """Generate a random python attribute name.""" return ''.join( [random.choice(string.ascii_uppercase)] + [random.choice(string.ascii_uppercase + string.digits) for i in range(size - 1)] ) if size > 0 else ''
67ade3cde47fffc126cbdb11f01ffda2672d021c
3,644,428
def is_identity(u, tol=1e-15): """Test if a matrix is identity. Args: u: np.ndarray Matrix to be checked. tol: float Threshold below which two matrix elements are considered equal. """ dims = np.array(u).shape if dims[0] != dims[1]: raise Exception("Input matrix is not square.") return np.allclose(u, np.eye(u.shape[0]), atol=tol)
160f33651b9d79448167423542cd1e2ad0bd3110
3,644,430
def get_metrics(): """ Collects various system metrics and returns a list of objects. """ metrics = {} metrics.update(get_memory_metrics()) metrics.update(get_cpu_metrics()) metrics.update(get_disk_metrics()) return metrics
d4055e0eb23d9babb9882bc3c089af293c638def
3,644,431
import inspect def create_fun(name: str, obj, options: dict): """ Generate a dictionnary that contains the information about a function **Parameters** > **name:** `str` -- name of the function as returned by `inspect.getmembers` > **obj:** `object` -- object of the function as returned by `inspect.getmembers` > **options:** `dict` -- extended options **Returns** > `dict` -- with keys: > - *name*, *obj* -- the function name and object as returned by `inspect.getmembers` > - *module* -- name of the module > - *path* -- path of the module file > - *doc* -- docstring of the function > - *source* -- source code of the function > - *args* -- arguments of the function as a `inspect.signature` object """ ignore_prefix = options.get("ignore_prefix") if ignore_prefix is not None and name[:len(ignore_prefix)]==ignore_prefix: return None fun = {} fun["name"] = name if name else 'undefined' fun["obj"] = obj fun["module"] = inspect.getmodule(obj).__name__ fun["path"] = inspect.getmodule(obj).__file__ fun["doc"] = inspect.getdoc(obj) or "" fun["source"] = rm_docstring_from_source(inspect.getsource(obj)) fun["args"] = inspect.signature(obj) return fun
f95e6fab1ed0cf6a10574b790e81933c40b924c4
3,644,433
def serial_ss(file_read, forward_rate, file_rateconstant, file_energy, matrix, species_list, factor, initial_y, t_final, third_body=None, chemkin_data=None, smiles=None, chemkin=True): """ Iteratively solves the system of ODEs for different rate constants generated from the data file in serial Parameters ---------- file_read : str path of the 'param_set' file where all the parameter combinations are listed forward_rate : list A list of forward reaction rates for all the reactions in the mechanism file_rateconstant : str path to the file `complete_rateconstantlist.dat` file_energy : str path to the file 'free_energy_library.dat' matrix : ndarray stoichiometric matrix species_list : list A list of unique species in the mechanism initial_y : list A list of initial concentrations t_final : float final time in seconds third_body : ndarray matrix with third body efficiencies chemkin_data :ndarray the data from parsed chemkin reaction file smiles : dict the smiles dictionary generated from species_smiles.dat file factor : float conversion factor from given unit of energy to kJ chemkin : bool indicates if chemkin files are read as input files default = True Returns ---------- : list A list of final concentrations of all the species at t_final for all the given combinations of parameters listed in 'param_set.txt' file """ read_file = open(file_read, "r") results = [] for pos, data in enumerate(read_file): result = func_solv(data, forward_rate, file_rateconstant, file_energy, matrix, species_list, initial_y, t_final, factor, third_body, pos, chemkin_data, smiles) results.append(result) return results
f23693826c3507d9a2327b4b492f20469fca963f
3,644,434
def contains_left_button(buttons) -> bool: """ Test if the buttons contains the left mouse button. The "buttons" should be values returned by get_click() or get_mouse() :param buttons: the buttons to be tested :return: if the buttons contains the left mouse button """ return (buttons & QtCore.Qt.LeftButton) > 0
a5cde64ce1d1fa5fd1fe57988f8b60db03fc2dcf
3,644,435
def is_bst(t: BST) -> bool: """Returns true if t is a valid BST object, false otherwise. Invariant: for each node n in t, if n.left exists, then n.left <= n, and if n.right exists, then n.right >= n.""" if not isinstance(t, BST): return False if t._root and t._root.parent is not None: return False return all_bst_nodes(t._root) and has_bst_property(t._root)
5180d01f8306f79b6ed4551e7d2bd4046a088ac2
3,644,438
def are_embedding_layer_positions_ok_for_testing(model): """ Test data can only be generated if all embeddings layers are positioned directly behind the input nodes """ def count_embedding_layers(model): layers = model.layers result = 0 for layer in layers: if isinstance(layer, keras.layers.Embedding): result += 1 layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential']: result += count_embedding_layers(layer) return result def count_embedding_layers_at_input_nodes(model): result = 0 for input_layer in get_model_input_layers(model): if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, keras.layers.Embedding): result += 1 return result return count_embedding_layers(model) == count_embedding_layers_at_input_nodes(model)
4317cdc11e0b0acf84fda0c2633400851075e124
3,644,439
from typing import Any def all_tasks_stopped(tasks_state: Any) -> bool: """ Checks if all tasks are stopped or if any are still running. Parameters --------- tasks_state: Any Task state dictionary object Returns -------- response: bool True if all tasks are stopped. """ for t in tasks_state["tasks"]: if t["lastStatus"] in ("PENDING", "RUNNING"): return False return True
98edffe71052cc114a7dda37a17b3a346ef59ef8
3,644,440
import PIL def enhance_color(image, factor): """Change the strength of colors in an image. This function has identical outputs to ``PIL.ImageEnhance.Color``. Added in 0.4.0. **Supported dtypes**: * ``uint8``: yes; fully tested * ``uint16``: no * ``uint32``: no * ``uint64``: no * ``int8``: no * ``int16``: no * ``int32``: no * ``int64``: no * ``float16``: no * ``float32``: no * ``float64``: no * ``float128``: no * ``bool``: no Parameters ---------- image : ndarray The image to modify. factor : number Colorfulness of the output image. Values close to ``0.0`` lead to grayscale images, values above ``1.0`` increase the strength of colors. Sane values are roughly in ``[0.0, 3.0]``. Returns ------- ndarray Color-modified image. """ return _apply_enhance_func(image, PIL.ImageEnhance.Color, factor)
f6654fc0b4dfebecf221e4a34ec0c894e1c72d1f
3,644,441
def random_deceleration(most_comfortable_deceleration, lane_pos): """ Return a deceleration based on given attribute of the vehicle :param most_comfortable_deceleration: the given attribute of the vehicle :param lane_pos: y :return: the deceleration adopted by human driver """ if lane_pos: sigma = 0.3 else: sigma = 0.5 return np.random.normal(most_comfortable_deceleration, sigma)
c5e4f9ca16285c020b9b7f2376e0b43f198d5173
3,644,442
def dataclass_fields(dc): """Returns a dataclass's fields dictionary.""" return {name: getattr(dc, name) for name in dc.__dataclass_fields__}
4b82af3bfbc02f7bbfcf1aecb6f6501ef10d86e1
3,644,443
from mabel import DictSet, Reader from ...internals.group_by import GroupBy def SqlReader(sql_statement: str, **kwargs): """ Use basic SQL queries to filter Reader. Parameters: sql_statement: string kwargs: parameters to pass to the Reader Note: `select` is taken from SQL SELECT `dataset` is taken from SQL FROM `filters` is taken from SQL WHERE """ # some imports here to remove cyclic imports sql = SqlParser(sql_statement) get_logger().info(repr(sql)) actual_select = sql.select_expression if sql.select_expression is None: actual_select = "*" elif sql.select_expression != "*": actual_select = sql.select_expression + ", *" reducer = None if sql.select_expression == "COUNT(*)": reducer = lambda x: {"*": "*"} # FROM clause # WHERE clause if isinstance(sql.dataset, list): # it's a list if it's been parsed into a SQL statement, # this is how subqueries are interpretted - the parser # doesn't extract a dataset name - it collects parts of # a SQL statement which it can then pass to a SqlReader # to get back a dataset - which we then use as the # dataset for the outer query. reader = SqlReader("".join(sql.dataset), **kwargs) else: reader = Reader( select=actual_select, dataset=sql.dataset, filters=sql.where_expression, **kwargs, ) # GROUP BY clause if sql.group_by or any( [ t["type"] == TOKENS.AGGREGATOR for t in sql.select_evaluator.tokens ] # type:ignore ): # convert the clause into something we can pass to GroupBy if sql.group_by: groups = [ group.strip() for group in sql.group_by.split(",") if group.strip() != "" ] else: groups = ["*"] # we're not really grouping aggregations = [] renames = [] for t in sql.select_evaluator.tokens: # type:ignore if t["type"] == TOKENS.AGGREGATOR: aggregations.append((t["value"], t["parameters"][0]["value"])) if t["as"]: t["raw"] = get_function_name(t) renames.append(t) elif t["type"] == TOKENS.VARIABLE and t["value"] not in groups: raise InvalidSqlError( "Invalid SQL - SELECT clause in a statement with a GROUP BY clause must be made of aggregations or items from the GROUP BY clause." ) if aggregations: grouped = GroupBy(reader, groups).aggregate(aggregations) else: grouped = GroupBy(reader, groups).groups() # there could be 250000 groups, so we're not going to load them into memory reader = DictSet(grouped) # HAVING clause # if we have a HAVING clause, filter the grouped data by it if sql.having: reader = reader.filter(sql.having) # SELECT clause renames = {} # type:ignore for t in sql.select_evaluator.tokens: # type:ignore if t["as"]: renames[get_function_name(t)] = t["as"] def _perform_renames(row): for k, v in [(k, v) for k, v in row.items()]: if k in renames: row[renames[k]] = row.pop(k, row.get(renames[k])) return row if renames: reader = DictSet(map(_perform_renames, reader)) reader = reader.select(sql.select_evaluator.fields()) # type:ignore # disctinct now we have only the columns we're interested in if sql.distinct: reader = reader.distinct() # ORDER BY clause if sql.order_by: take = 10000 # the Query UI is currently set to 2000 if sql.limit: take = int(sql.limit) reader = DictSet( reader.sort_and_take( column=sql.order_by, take=take, descending=sql.order_descending ) ) # LIMIT clause if sql.limit: reader = reader.take(sql.limit) return reader
0354dd8b4d8cc6913cc1887b96aba6a06613ffe5
3,644,447
def _handle_consent_confirmation(user, is_confirmed): """ Return server response given user consent. Args: user (fence.models.User): authN'd user is_confirmed (str): confirmation param """ if is_confirmed == "yes": # user has already given consent, continue flow response = server.create_authorization_response(grant_user=user) else: # user did not give consent response = server.create_authorization_response(grant_user=None) return response
c4f61ed8465616a4fad912d02e81840eb9d34604
3,644,448
import math def local_coherence(img, window_s=WSIZ): """ Calculate the coherence according to methdology described in: Bazen, Asker M., and Sabih H. Gerez. "Segmentation of fingerprint images." ProRISC 2001 Workshop on Circuits, Systems and Signal Processing. Veldhoven, The Netherlands, 2001. """ coherence = [] rs = window_s cs = window_s for r in range(4, img.shape[0] - rs, rs): for c in range(4, img.shape[1] - cs, cs): window = img[r:r + rs, c:c + cs] if window.var() != 0: # Need variance because of the constraint (gxx + gyy) < 0 gx = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 1, 0, ksize=5))).flatten() gy = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 0, 1, ksize=5))).flatten() gxx = sum([int(x) ** 2 for x in gx]) gyy = sum([int(y) ** 2 for y in gy]) gxy = sum([int(x) * int(y) for x, y in zip(gx, gy)]) assert gxx + gyy != 0 coherence.append(math.sqrt((math.pow((gxx - gyy), 2) + 4 * math.pow(gxy, 2))) / (gxx + gyy)) return coherence
d360b388d743a3ada1004be8367ed2d105f7857a
3,644,449
def storeIDToWebID(key, storeid): """ Takes a key (int) and storeid (int) and produces a webid (a 16-character str suitable for including in URLs) """ i = key ^ storeid l = list('%0.16x' % (i,)) for nybbleid in range(0, 8): a, b = _swapat(key, nybbleid) _swap(l, a, b) return ''.join(l)
38d9bffaa98c2191e818edd969d51873bb077094
3,644,450
def _jupyter_server_extension_paths(): """ Set up the server extension for collecting metrics """ return [{"module": "jupyter_resource_usage"}]
f59c343dd8bcdb4755c725107b3c83f12978e9ef
3,644,451
import collections def _make_ordered_node_map( pipeline: p_pb2.Pipeline ) -> 'collections.OrderedDict[str, p_pb2.PipelineNode]': """Helper function to prepare the Pipeline proto for DAG traversal. Args: pipeline: The input Pipeline proto. Since we expect this to come from the compiler, we assume that it is already topologically sorted. Returns: An OrderedDict that map node_ids to PipelineNodes. """ node_map = collections.OrderedDict() for pipeline_or_node in pipeline.nodes: node_id = pipeline_or_node.pipeline_node.node_info.id node_map[node_id] = pipeline_or_node.pipeline_node return node_map
c0f7af61adf114a3b2211d82de050bf5e1f4e681
3,644,452
from typing import Optional from typing import Tuple def fmin_b_bfgs(func, x0, args=(), options=None): """ The BFGS algorithm from Algorithm 6.1 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 136-143 with bounded parameters, using the active set approach from, Byrd, R. H., Lu, P., Nocedal, J., & Zhu, C. (1995). 'A Limited Memory Algorithm for Bound Constrained Optimization.' SIAM Journal on Scientific Computing, 16(5), 1190–1208. doi:10.1137/0916069 Notes: We utilise boolean arithmetic to avoid jax.cond calls which don't work on accelerators. A side effect is that we perform more gradient evaluations than scipy's BFGS func: callable Function of the form f(x) where x is a flat ndarray and returns a real scalar. The function should be composed of operations with vjp defined. If func is jittable then fmin_bfgs is jittable. If func is not jittable, then _nojit should be set to True. x0: ndarray initial variable args: tuple, optional Extra arguments to pass to func as func(x,*args) options: Optional dict of parameters maxiter: int Maximum number of evaluations norm: float Order of norm for convergence check. Default inf. gtol: flat Terminates minimization when |grad|_norm < g_tol ls_maxiter: int Maximum number of linesearch iterations bounds: 2-tuple of two vectors specifying the lower and upper bounds. e.g. (l, u) where l and u have the same size as x0. For parameters x_i without constraints the corresponding l_i=-jnp.inf and u_i=jnp.inf. Specifying l=None or u=None means no constraints on that side. Returns: BFGSResults """ if options is None: options = dict() maxiter: Optional[int] = options.get('maxiter', None) norm: float = options.get('norm', jnp.inf) gtol: float = options.get('gtol', 1e-5) ls_maxiter: int = options.get('ls_maxiter', 10) bounds: Tuple[jnp.ndarray, jnp.ndarray] = tuple(options.get('bounds', (None, None))) state = BFGSResults(converged=False, failed=False, k=0, nfev=0, ngev=0, nhev=0, x_k=x0, f_k=None, g_k=None, H_k=None, status=None, ls_status=jnp.array(0)) if maxiter is None: maxiter = jnp.size(x0) * 200 d = x0.shape[0] l = bounds[0] u = bounds[1] if l is None: l = -jnp.inf * jnp.ones_like(x0) if u is None: u = jnp.inf * jnp.ones_like(x0) l,u = jnp.where(l<u, l, u), jnp.where(l<u,u, l) def project(x,l,u): return jnp.clip(x,l, u) def get_active_set(x, l, u): return jnp.where((x==l) | (x==u)) def func_with_args(x): return func(x, *args) def get_generalised_Cauchy_point(xk, gk, l, u): def func(t): return func_with_args(project(xk - t* gk, l, u)) initial_H = jnp.eye(d) initial_H = options.get('hess_inv', initial_H) value_and_grad = jax.value_and_grad(func_with_args) f_0, g_0 = value_and_grad(x0) state = state._replace(f_k=f_0, g_k=g_0, H_k=initial_H, nfev=state.nfev + 1, ngev=state.ngev + 1, converged=jnp.linalg.norm(g_0, ord=norm) < gtol) def body(state): p_k = -(state.H_k @ state.g_k) line_search_results = line_search(value_and_grad, state.x_k, p_k, old_fval=state.f_k, gfk=state.g_k, maxiter=ls_maxiter) state = state._replace(nfev=state.nfev + line_search_results.nfev, ngev=state.ngev + line_search_results.ngev, failed=line_search_results.failed, ls_status=line_search_results.status) s_k = line_search_results.a_k * p_k x_kp1 = state.x_k + s_k f_kp1 = line_search_results.f_k g_kp1 = line_search_results.g_k # print(g_kp1) y_k = g_kp1 - state.g_k rho_k = jnp.reciprocal(y_k @ s_k) sy_k = s_k[:, None] * y_k[None, :] w = jnp.eye(d) - rho_k * sy_k H_kp1 = jnp.where(jnp.isfinite(rho_k), jnp.linalg.multi_dot([w, state.H_k, w.T]) + rho_k * s_k[:, None] * s_k[None, :], state.H_k) converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol state = state._replace(converged=converged, k=state.k + 1, x_k=x_kp1, f_k=f_kp1, g_k=g_kp1, H_k=H_kp1 ) return state state = while_loop( lambda state: (~ state.converged) & (~state.failed) & (state.k < maxiter), body, state) state = state._replace(status=jnp.where(state.converged, jnp.array(0), # converged jnp.where(state.k == maxiter, jnp.array(1), # max iters reached jnp.where(state.failed, jnp.array(2) + state.ls_status, # ls failed (+ reason) jnp.array(-1))))) # undefined return state
0f8ce3e1873b9a5a955b95489ea454e3c9813524
3,644,454
def find_broken_in_text(text, ignore_substrings=None): """Find broken links """ links = _find(text, ignore_substrings=ignore_substrings) responses = [_check_if_broken(link) for link in links] return [res.url for res in responses if res.broken]
43075b6abb1ba8e7fc6f163e1afd1d6f305d99ab
3,644,455
def home(): """ route for the index page""" return jsonify({"message" : "welcome to fast_Food_Fast online restaurant"})
cad54560f01361ff6d9fed1d117f8b50eff59b50
3,644,457
def singlediode_voc(effective_irradiance, temp_cell, module_parameters): """ Calculate voc using the singlediode model. Parameters ---------- effective_irradiance temp_cell module_parameters Returns ------- """ photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \ calcparams_singlediode(effective_irradiance, temp_cell, module_parameters) # out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, # method='newton') v_oc = pvlib.singlediode.bishop88_v_from_i(0, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, method='newton') return v_oc
b47cff93f7ad51ce026fd1e28c6427bcaca0a639
3,644,458
from apysc._file import file_util from typing import Optional from typing import List def _exec_document_lint_and_script( limit_count: Optional[int] = None) -> List[str]: """ Execute each runnable scripts in the documents and check with each lint. Parameters ---------- limit_count : int or None, optional Limitation of the script execution count. Returns ------- executed_scripts : list of str List of executed Python scripts. """ md_file_paths: List[str] = \ file_util.get_specified_ext_file_paths_recursively( extension='.md', dir_path='./docs_src/') hashed_vals: List[str] md_file_paths, hashed_vals = _slice_md_file_by_hashed_val( md_file_paths=md_file_paths) script_data_list: List[_ScriptData] = _make_script_data_list( md_file_paths=md_file_paths, hashed_vals=hashed_vals, limit_count=limit_count) workers: int = max(mp.cpu_count() // 2, 1) logger.info(msg="Document's code block flake8 checking started...") with mp.Pool(workers) as p: p.map(func=_check_code_block_with_flake8, iterable=script_data_list) logger.info(msg="Document's code block numdoclint checking started...") with mp.Pool(workers) as p: p.map( func=_check_code_block_with_numdoclint, iterable=script_data_list) logger.info(msg="Document's code block mypy checking started...") with mp.Pool(workers) as p: p.map(func=_check_code_block_with_mypy, iterable=script_data_list) logger.info(msg="Document's scripts execution started...") with mp.Pool(workers) as p: run_return_data_list: List[_RunReturnData] = p.map( func=_run_code_block_script, iterable=script_data_list) _move_code_block_outputs() _validate_script_return_data(return_data_list=run_return_data_list) _save_hashed_val(script_data_list=script_data_list) executed_scripts: List[str] = [ script_data['runnable_script'] for script_data in script_data_list] return executed_scripts
a9406dfc5180c5b0f820740ae99522d8141af22d
3,644,459
from typing import Dict from typing import Union def _apply_result_filters(key_gender_token_counters: Dict[Union[str, int], GenderTokenCounters], diff: bool, sort: bool, limit: int, remove_swords: bool) -> KeyGenderTokenResponse: """ A private helper function for applying optional keyword arguments to the output of GenderProximityAnalysis methods, allowing the user to sort, diff, limit, and remove stopwords from the output. These transformations do not mutate the input. :param key_gender_token_counters: a dictionary shaped Dict[Union[str, int], GenderTokenCounters] :param diff: return the difference in token occurrences across Genders. :param sort: return an array of the shape Sequence[Tuple[str, int]] :param limit: if sort==True, return only n=limit token occurrences. :param remove_swords: remove stop words from output. :return: a dictionary of the shape Dict[Union[str, int], GenderTokenResponse] >>> test_counter_1 = Counter({'foo': 1, 'bar': 2, 'own': 2}) >>> test_counter_2 = Counter({'foo': 5, 'baz': 2}) >>> test = {'doc': {'Male': test_counter_1, 'Female': test_counter_2}} >>> _apply_result_filters(test, diff=True, sort=False, limit=10, remove_swords=False).get('doc') {'Male': Counter({'bar': 2, 'own': 2, 'foo': -4}), 'Female': Counter({'foo': 4, 'baz': 2})} >>> _apply_result_filters(test, diff=False, sort=True, limit=10, remove_swords=False).get('doc') {'Male': [('bar', 2), ('own', 2), ('foo', 1)], 'Female': [('foo', 5), ('baz', 2)]} >>> _apply_result_filters(test, diff=False, sort=False, limit=10, remove_swords=True).get('doc') {'Male': Counter({'bar': 2, 'foo': 1}), 'Female': Counter({'foo': 5, 'baz': 2})} >>> _apply_result_filters(test, diff=True, sort=True, limit=10, remove_swords=False).get('doc') {'Male': [('bar', 2), ('own', 2), ('foo', -4)], 'Female': [('foo', 4), ('baz', 2)]} """ output = {} for key, gender_token_counters in key_gender_token_counters.items(): if remove_swords: output[key] = _remove_swords(gender_token_counters) else: output[key] = gender_token_counters if diff: output[key] = _diff_gender_token_counters(output[key]) if sort: output[key] = _sort_gender_token_counters(output[key], limit=limit) return output
120bb37936293810796ad6e62cee6b3c0bccabe4
3,644,462
def blog_delete(request): """Delete blog entry by id.""" blog_id = int(request.params.get('id')) entry = BlogRecordService.by_id(blog_id, request) if not entry: return HTTPNotFound() request.dbsession.delete(entry) return HTTPFound(location=request.route_url('home'))
4e1b9a19cd3a33743479de69ee1fbc4ffc9f9a42
3,644,463
import aiohttp async def get_ios_cfw(): """Gets all apps on ios.cfw.guide Returns ------- dict "ios, jailbreaks, devices" """ async with aiohttp.ClientSession() as session: async with session.get("https://api.appledb.dev/main.json") as resp: if resp.status == 200: data = await resp.json() return data
dfb0dfafef2ef8e27940bc7a154cd4a35f863017
3,644,464
def server_error(errorMsg): """ Shorthand for returning error message. """ resp = HttpResponse(status=502) resp.write("<h3>502 BAD GATEWAY: </h3>") resp.write("<p>ERROR: {}</p>".format(errorMsg)) return resp
cadadfc0a8c0098832ca08080e3602bdaf01ffc4
3,644,465
import re def protein_variant(variant): """ Return an HGVS_ variant string containing only the protein changes in a coding HGVS_ variant string. If all variants are synonymous, returns the synonymous variant code. If the variant is wild type, returns the wild type variant. :param str variant: coding variant string :return: protein variant string (or synonymous or wild type) :rtype: str """ if len(variant) == 0: raise ValueError("Empty variant string.") elif variant == WILD_TYPE_VARIANT: return WILD_TYPE_VARIANT elif variant == SYNONYMOUS_VARIANT: return SYNONYMOUS_VARIANT else: matches = re.findall("\((p\.\S*)\)", variant) if len(matches) == 0: raise ValueError("Invalid coding variant string.") # uniqify and remove synonymous seen = {"p.=": True} unique_matches = list() for v in matches: if v in seen: continue else: seen[v] = True unique_matches.append(v) if len(unique_matches) == 0: return SYNONYMOUS_VARIANT else: return ", ".join(unique_matches)
ed9d11759ed5d09f76daa757b9e75d00bfd0f029
3,644,466
import scipy from functools import reduce def calc_predictability_trace_of_avg_cov(x, k, p, ndim=False): """ The main evaluation criterion of GPFA, i.e., equation (2) from the paper. :param x: data array :param k: number of neighbors for estimate :param p: number of past time steps to consider :param ndim: n-dimensional evaluation if True :return: estimated variance in the next time step """ def _cov(t): successors = neighbors[t] + 1 successors = successors[successors<N] suc_dat = x[successors] return np.array(np.cov(suc_dat.T), ndmin=2) # pairwise distances of data points if x.ndim == 1: x = np.array(x, ndmin=2).T N, _ = x.shape y = concatenate_past(x, p=p) tree = scipy.spatial.cKDTree(y) neighbors = [tree.query(y[i], k=k+1)[1] for i in xrange(y.shape[0])] assert len(neighbors) == N covariances = map(_cov, range(p-1, N-1)) covariance = reduce(lambda a,b: a+b, covariances) / (N-p) if ndim: E, _ = np.linalg.eigh(covariance) return E result = np.trace(covariance) assert np.isfinite(result) return result
a803847ce8f8791edf44d3ba102137e69836f410
3,644,469
from typing import Dict from typing import Sequence def nx_to_loreleai(graph: nx.Graph, relation_map: Dict[str, Predicate] = None) -> Sequence[Atom]: """ Converts a NetworkX graph into Loreleai representation To indicate the type of relations and nodes, the functions looks for a 'type' attribute Arguments: graph: NetworkX graph relation_map: maps from edge types to predicates """ literals = [] if relation_map is None: relation_map = {} for (u, v, t) in graph.edges.data('type', default=None): literals.append(relation_map[t](u, v)) return literals
f847c26d0831bf6bbaf16eabe5a32d27118550da
3,644,470
def _kohn_sham_iteration( density, external_potential, grids, num_electrons, xc_energy_density_fn, interaction_fn, enforce_reflection_symmetry): """One iteration of Kohn-Sham calculation.""" # NOTE(leeley): Since num_electrons in KohnShamState need to specify as # static argument in jit function, this function can not directly take # KohnShamState as input arguments. The related attributes in KohnShamState # are used as input arguments for this helper function. if enforce_reflection_symmetry: xc_energy_density_fn = _flip_and_average_on_center_fn(xc_energy_density_fn) hartree_potential = scf.get_hartree_potential( density=density, grids=grids, interaction_fn=interaction_fn) xc_potential = scf.get_xc_potential( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=grids) ks_potential = hartree_potential + xc_potential + external_potential xc_energy_density = xc_energy_density_fn(density) # Solve Kohn-Sham equation. density, total_eigen_energies, gap = scf.solve_noninteracting_system( external_potential=ks_potential, num_electrons=num_electrons, grids=grids) total_energy = ( # kinetic energy = total_eigen_energies - external_potential_energy total_eigen_energies - scf.get_external_potential_energy( external_potential=ks_potential, density=density, grids=grids) # Hartree energy + scf.get_hartree_energy( density=density, grids=grids, interaction_fn=interaction_fn) # xc energy + scf.get_xc_energy( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=grids) # external energy + scf.get_external_potential_energy( external_potential=external_potential, density=density, grids=grids) ) if enforce_reflection_symmetry: density = _flip_and_average_on_center(density) return ( density, total_energy, hartree_potential, xc_potential, xc_energy_density, gap)
81ecffb04d0bc76b31187708c3502acead8653ab
3,644,471
def get_sync_func_driver(physical_mesh): """Get the sync function on the driver.""" def sync_func_driver(): assert isinstance(physical_mesh, LocalPhysicalDeviceMesh) physical_mesh.devices[0].synchronize_all_activity() return sync_func_driver
13dee330aa22524c52272c1969f3acba23f4378f
3,644,472
def get_nc_BGrid_GFDL(grdfile): """ Bgrd = get_nc_BGrid_GFDL(grdfile) Load B-Grid grid object for GFDL CM2.1 from netCDF grid file """ nc = pyroms.io.Dataset(grdfile) lon_t = nc.variables['geolon_t'][:] lat_t = nc.variables['geolat_t'][:] lon_uv = nc.variables['geolon_c'][:] lat_uv = nc.variables['geolat_c'][:] h = nc.variables['ht'][:] f = nc.variables['coriolis_param'][:] kmt = nc.variables['kmt'][:] z_t = nc.variables['st_ocean'][:] z_t_edges = nc.variables['st_edges_ocean'][:] kmu = nc.variables['kmu'][:] z_uv = nc.variables['sw_ocean'][:] z_uv_edges = nc.variables['sw_edges_ocean'][:] # compute mask at t-point M_t, L_t = kmt.shape N_t = z_t.shape[0] mask_t = np.zeros((N_t, M_t, L_t)) for j in range(M_t): for i in range(L_t): try: mask_t[0:kmt[j,i], j,i] = 1 except: mask_t[:, j,i] = 0 # compute mask at uv-point M_uv, L_uv = kmu.shape N_uv = z_uv.shape[0] mask_uv = np.zeros((N_uv, M_uv, L_uv)) for j in range(M_uv): for i in range(L_uv): try: mask_uv[0:kmt[j,i], j,i] = 1 except: mask_uv[:, j,i] = 0 return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \ mask_t, mask_uv, h, z_t, z_t_edges, \ z_uv, z_uv_edges, f)
0b6f844676da7f357334640eafdb3039127df912
3,644,473
def _getTimeDistORSlocal(fromLocs, toLocs, travelMode, port, speedMPS): """ Generate two dictionaries, one for time, another for distance, using ORS-local Parameters ---------- fromLocs: list, Required The start node coordinates in format of [[lat, lon], [lat, lon], ... ] toLocs: list, Required The End node coordinates in format of [[lat, lon], [lat, lon], ... ] travelMode: string, Required The travel mode for ORS, options are 'fastest', 'pedestrian', 'cycling', 'truck' port: string, Required localhost connection port speedMPS: float, Required A constant speed for calculation returns ------- timeSecs: dictionary A dictionary for time from nodes to nodes, unit is in [seconds] distMeters: dictionary A dictionary for distance from nodes to nodes, unit is in [meters] """ if (fromLocs == toLocs): locs = fromLocs.copy() [timeSecs, distMeters] = orsLocalGetTimeDistAll2All(locs, travelMode, port) elif (len(fromLocs) == 1): fromLoc = fromLocs[0] [timeSecs, distMeters] = orsLocalGetTimeDistOne2Many(fromLoc, toLocs, travelMode, port) elif (len(toLocs) == 1): toLoc = toLocs[0] [timeSecs, distMeters] = orsLocalGetTimeDistMany2One(fromLocs, toLoc, travelMode, port) else: for i in range(len(fromLocs)): [timeRow, distRow] = orsLocalGetTimeDistOne2Many(fromLocs[i], toLocs, routeType, port) for j in range(len(toLocs)): distMeters[i, j] = distRow[0, j] timeSecs[i, j] = timeRow[0, j] if (speedMPS != None): for i in range(len(fromLocs)): for j in range(len(toLocs)): timeSecs[i, j] = distMeters[i, j] / speedMPS return [timeSecs, distMeters]
67feca093769c4cef4f4383cb6eaca4e0f584019
3,644,474
def sequence_plus_one(x_init, iter, dtype=int): """ Mathematical sequence: x_n = x_0 + n :param x_init: initial values of the sequence :param iter: iteration until the sequence should be evaluated :param dtype: data type to cast to (either int of float) :return: element at the given iteration and array of the whole sequence """ def iter_function(x_seq, i, x_init): return x_seq[0, :] + i return sequence(x_init, iter, iter_function, dtype)
ec84cdb2f98147d2d1d967f7a071d3124ccdf54a
3,644,475
def _is_test_product_type(product_type): """Returns whether the given product type is for tests purposes or not.""" return product_type in ( apple_product_type.ui_test_bundle, apple_product_type.unit_test_bundle, )
41847ab87e4a8a0dfc2b2758b70b5f7b5d7b952b
3,644,476
def _synced(method, self, args, kwargs): """Underlying synchronized wrapper.""" with self._lock: return method(*args, **kwargs)
54ca3cf69742550bd34ff3d2299a2d84f78577a3
3,644,477