content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def getn_hidden_area(*args): """getn_hidden_area(int n) -> hidden_area_t""" return _idaapi.getn_hidden_area(*args)
3265d4258ce6717e8ca23bd10754e1b1648d4217
3,640,067
def cdist(X: DNDarray, Y: DNDarray = None, quadratic_expansion: bool = False) -> DNDarray: """ Calculate Euclidian distance between two DNDarrays: .. math:: d(x,y) = \\sqrt{(|x-y|^2)} Returns 2D DNDarray of size :math: `m \\times n` Parameters ---------- X : DNDarray 2D array of size :math: `m \\times f` Y : DNDarray 2D array of size :math: `n \\times f` quadratic_expansion : bool Whether to use quadratic expansion for :math:`\\sqrt{(|x-y|^2)}` (Might yield speed-up) """ if quadratic_expansion: return _dist(X, Y, _euclidian_fast) else: return _dist(X, Y, _euclidian)
14a2368ff0717ff04e0477699ff13d20f359ba0d
3,640,068
def popcount_u8(x: np.ndarray): """Return the total bit count of a uint8 array""" if x.dtype != np.uint8: raise ValueError("input dtype must be uint8") count = 0 # for each item look-up the number of bits in the LUT for elem in x.flat: count += u8_count_lut[elem] return count
e85c07b3df7dcd993c0f1cc7f9dbecd97e8be317
3,640,069
from scipy import stats def split_errorRC(tr, t1, t2, q, Emat, maxdt, ddt, dphi): """ Calculates error bars based on a F-test and a given confidence interval q. Note ---- This version uses a Fisher transformation for correlation-type misfit. Parameters ---------- tr : :class:`~obspy.core.Trace` Seismogram t1 : :class:`~obspy.core.utcdatetime.UTCDateTime` Start time of picking window t2 : :class:`~obspy.core.utcdatetime.UTCDateTime` End time of picking window q : float Confidence level Emat : :class:`~numpy.ndarray` Energy minimization matrix Returns ------- err_dtt : float Error in dt estimate (sec) err_phi : float Error in phi estimate (degrees) err_contour : :class:`~numpy.ndarray` Error contour for plotting """ phi = np.arange(-90.0, 90.0, dphi)*np.pi/180. dtt = np.arange(0., maxdt, ddt) # Copy trace to avoid overriding tr_tmp = tr.copy() tr_tmp.trim(t1, t2) # Get degrees of freedom dof = split_dof(tr_tmp) if dof <= 3: dof = 3.01 print( "Degrees of freedom < 3. Fixing to DOF = 3, which may " + "result in inaccurate errors") n_par = 2 # Fisher transformation vmin = np.arctanh(Emat.min()) # Error contour zrr_contour = vmin + (vmin*np.sign(vmin)*n_par/(dof - n_par) * stats.f.ppf(1. - q, n_par, dof - n_par)) *\ np.sqrt(1./(dof-3)) # Back transformation err_contour = np.tanh(zrr_contour) # Estimate uncertainty (q confidence interval) err = np.where(Emat < err_contour) err_phi = max( 0.25*(phi[max(err[0])] - phi[min(err[0])])*180./np.pi, 0.25*dphi) err_dtt = max(0.25*(dtt[max(err[1])] - dtt[min(err[1])]), 0.25*ddt) return err_dtt, err_phi, err_contour
3155031382c881a15a8a300d6656cae1fc0fee64
3,640,070
import copy def filter_parts(settings): """ Remove grouped components and glyphs that have been deleted or split. """ parts = [] temp = copy.copy(settings['glyphs']) for glyph in settings['glyphs']: name = glyph['class_name'] if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"): parts.append(glyph) temp.remove(glyph) settings['glyphs'] = temp # Remove from the training glyphs as well temp2 = copy.copy(settings['training_glyphs']) for glyph in settings['training_glyphs']: name = glyph['class_name'] if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"): temp2.remove(glyph) settings['training_glyphs'] = temp2 return parts
f8d6a59eeeb314619fd4c332e2594dee3543ee9c
3,640,071
def kernel_zz(Y, X, Z): """ Kernel zz for second derivative of the potential generated by a sphere """ radius = np.sqrt(Y ** 2 + X ** 2 + Z ** 2) r2 = radius*radius r5 = r2*r2*radius kernel = (3*Z**2 - r2)/r5 return kernel
14f36fe23531994cd40c74d26b91477d266ca21c
3,640,072
def getAccentedVocal(vocal, acc_type="g"): """ It returns given vocal with grave or acute accent """ vocals = {'a': {'g': u'\xe0', 'a': u'\xe1'}, 'e': {'g': u'\xe8', 'a': u'\xe9'}, 'i': {'g': u'\xec', 'a': u'\xed'}, 'o': {'g': u'\xf2', 'a': u'\xf3'}, 'u': {'g': u'\xf9', 'a': u'\xfa'}} return vocals[vocal][acc_type]
cfec276dac32e6ff092eee4f1fc84b412c5c915c
3,640,073
def env_initialize(env, train_mode=True, brain_idx=0, idx=0, verbose=False): """ Setup environment and return info """ # get the default brain brain_name = env.brain_names[brain_idx] brain = env.brains[brain_name] # reset the environment env_info = env.reset(train_mode=train_mode)[brain_name] # examine the state space and action space state = env_info.vector_observations[idx] state_size = len(state) action_size = brain.vector_action_space_size if verbose: # number of agents in the environment print(f'Number of agents: {len(env_info.agents)}') print(f'Number of actions: {action_size}') print(f'States have length: {state_size}') print(f'States look like: {state}') return (brain, brain_name, state, action_size, state_size)
3c951a77009cca8c876c36965ec33781dd2c08dd
3,640,074
def lorentzianfit(x, y, parent=None, name=None): """Compute Lorentzian fit Returns (yfit, params), where yfit is the fitted curve and params are the fitting parameters""" dx = np.max(x) - np.min(x) dy = np.max(y) - np.min(y) sigma = dx * 0.1 amp = fit.LorentzianModel.get_amp_from_amplitude(dy, sigma) a = FitParam(_("Amplitude"), amp, 0.0, amp * 1.2) b = FitParam(_("Base line"), np.min(y), np.min(y) - 0.1 * dy, np.max(y)) sigma = FitParam(_("Std-dev") + " (σ)", sigma, sigma * 0.2, sigma * 10) mu = FitParam(_("Mean") + " (μ)", xpeak(x, y), np.min(x), np.max(x)) params = [a, sigma, mu, b] def fitfunc(x, params): return fit.LorentzianModel.func(x, *params) values = guifit( x, y, fitfunc, params, parent=parent, wintitle=_("Lorentzian fit"), name=name ) if values: return fitfunc(x, values), params
cd221c3483ee7f54ac49baaeaf617ef8ec2b7fa7
3,640,075
def tf_quat(T): """ Return quaternion from 4x4 homogeneous transform """ assert T.shape == (4, 4) return rot2quat(tf_rot(T))
7fb2a7b136201ec0e6a92faf2cc030830df46fa5
3,640,076
def solve2(lines): """Solve the problem.""" result = 0 for group in parse_answers2(lines): result += len(group) return result
5990b61e713733ba855937b8191b8a8a4f503873
3,640,077
def get_contract_type(timestamp: int, due_timestamp: int) -> str: """Get the contract_type Input the timestamp and due_timestamp. Return which contract_type is. Args: timestamp: The target timestamp, you want to know. due_timestamp: The due timestamp of the contract. Returns: The contract_type name. Raises: RuntimeError: An error occurred timestamp gt due_timestamp. """ minus = due_timestamp - timestamp if minus < 0: raise RuntimeError("the timestamp more than due_timestamp") if minus < 7 * 24 * 60 * 60 * 1000: return CONTRACT_TYPE_THIS_WEEK elif minus < 14 * 24 * 60 * 60 * 1000: return CONTRACT_TYPE_NEXT_WEEK else: return CONTRACT_TYPE_QUARTER
3b3a084f786c82a5fc1b2a7a051e9005b3df5f0a
3,640,078
from typing import Any from typing import Optional from typing import Union from typing import Type from typing import Tuple from typing import Sequence from typing import cast def is_sequence_of(obj: Any, types: Optional[Union[Type[object], Tuple[Type[object], ...]]] = None, depth: Optional[int] = None, shape: Optional[Sequence[int]] = None ) -> bool: """ Test if object is a sequence of entirely certain class(es). Args: obj: The object to test. types: Allowed type(s). If omitted, we just test the depth/shape. depth: Level of nesting, ie if ``depth=2`` we expect a sequence of sequences. Default 1 unless ``shape`` is supplied. shape: The shape of the sequence, ie its length in each dimension. If ``depth`` is omitted, but ``shape`` included, we set ``depth = len(shape)``. Returns: bool: ``True`` if every item in ``obj`` matches ``types``. """ if not is_sequence(obj): return False if shape is None or shape == (): next_shape: Optional[Tuple[int]] = None if depth is None: depth = 1 else: if depth is None: depth = len(shape) elif depth != len(shape): raise ValueError('inconsistent depth and shape') if len(obj) != shape[0]: return False next_shape = cast(Tuple[int], shape[1:]) for item in obj: if depth > 1: if not is_sequence_of(item, types, depth=depth - 1, shape=next_shape): return False elif types is not None and not isinstance(item, types): return False return True
3762454785563c7787451efad143547f97ae8994
3,640,079
def _parse_tree_height(sent): """ Gets the height of the parse tree for a sentence. """ children = list(sent._.children) if not children: return 0 else: return max(_parse_tree_height(child) for child in children) + 1
d6de5c1078701eeeb370c917478d93e7653d7f4f
3,640,081
def pandas_loss_p_g_i_t(c_m, lgd, ead, new): """ Distribution of losses at time t. long format (N_MC, G, K, T).""" mat_4D = loss_g_i_t(c_m, lgd, ead, new) names = ['paths', 'group_ID', 'credit_rating_rank', 'time_steps'] index = pds.MultiIndex.from_product([range(s)for s in mat_4D.shape], names=names) df = pds.DataFrame({'loss_p_g_i_t': mat_4D.flatten()}, index=index)['loss_p_g_i_t'] df = pds.Series.to_frame(df) df['loss_p_g_i_t_ID'] = np.arange(len(df)) df.insert(0, 'portfolio_ID', 'pilot 1 Bank A', allow_duplicates=False) return df
65e9db48eab0a40596b205a7304bd225eb5c93d0
3,640,082
def find_available_pacs(pacs, pac_to_unstuck=None, pac_to_super=None, pac_to_normal=None): """ Finds the available pacs that are not assigned """ available_pacs = pacs['mine'] if pac_to_unstuck is not None: available_pacs = [x for x in available_pacs if x['id'] not in pac_to_unstuck.keys()] if pac_to_super is not None: available_pacs = [x for x in available_pacs if x['id'] not in pac_to_super.keys()] if pac_to_normal is not None: available_pacs = [x for x in available_pacs if x['id'] not in pac_to_normal.keys()] return available_pacs
4b6674fd87db2127d5fffa781431ccc9a9ff775a
3,640,084
async def login_for_access_token( form_data: OAuth2PasswordRequestForm = Depends(), ): """ Log in to your account using oauth2 authorization. In response we get an jwt authorization token which is used for granting access to data """ is_auth, scope = await authenticate_authority( form_data.username, form_data.password ) if not is_auth: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect username or password", ) access_token_expires = timedelta( minutes=security_config.ACCESS_TOKEN_EXPIRE_MINUTES ) access_token = create_access_token( data={"sub": form_data.username, "scopes": [scope]}, expires_time=access_token_expires, ) return {"access_token": access_token, "token_type": "bearer"}
441326317f0f13275ad33e369efe419a605ac4eb
3,640,085
def get_plain_expressions(s): """Return a list of plain, non-nested shell expressions found in the shell string s. These are shell expressions that do not further contain a nested expression and can therefore be resolved indenpendently. For example:: >>> get_plain_expressions("${_pyname%${_pyname#?}}") ['${_pyname#?}'] """ return _get_non_nested_expressions(s)
a3b0f6812ffe361e291b28c4273ca7cc975eb1e7
3,640,086
def create_indices(dims): """Create lists of indices""" return [range(1,dim+1) for dim in dims]
1a83b59eb1ca2b24b9db3c9eec05db7335938cae
3,640,087
def observed_property(property_name, default, cast=None): """Default must be immutable.""" hidden_property_name = "_" + property_name if cast is None: if cast is False: cast = lambda x: x else: cast = type(default) def getter(self): try: return getattr(self, hidden_property_name) except AttributeError: return default def deleter(self): try: delattr(self, hidden_property_name) except AttributeError: pass def setter(self, value): value = cast(value) if value == default: try: delattr(self, hidden_property_name) except AttributeError: pass else: setattr(self, hidden_property_name, value) return property(getter, observed(setter), observed(deleter))
7358557b221b5d4fa18fbd29cd02b47823cfdfe0
3,640,088
from typing import Callable from io import StringIO def query_helper( source: S3Ref, query: str, dest: S3Ref = None, transform: Callable = None ) -> StringIO: """ query_helper runs the given s3_select query on the given object. - The results are saved in a in memory file (StringIO) and returned. - If dest is specified, the file is copied to the provided S3Ref - If transform callable is specified, tranform is called first with the temp file before uploading to the destination s3. """ event_stream = s3.select_object_content( Bucket=source.bucket, Key=source.key, ExpressionType="SQL", Expression=query, InputSerialization={"JSON": {"Type": "LINES"}}, OutputSerialization={"JSON": {}}, ) # Iterate over events in the event stream as they come output = StringIO() for s3_select_event in event_stream["Payload"]: if "Records" in s3_select_event: data = s3_select_event["Records"]["Payload"] output.write(data.decode("utf-8")) if transform: output.seek(0) output = transform(output) if dest is not None: upload(output, dest) output.seek(0) return output
3670734c76f615fe6deb3dfed8305cfc1740b124
3,640,089
def indicator_selector(row, indicator, begin, end): """Return Tons of biomass loss.""" dasy = {} if indicator == 4: return row[2]['value'] for i in range(len(row)): if row[i]['indicator_id'] == indicator and row[i]['year'] >= int(begin) and row[i]['year'] <= int(end): dasy[str(row[i]['year'])] = row[i]['value'] return dasy
329411837633f4e28bea4b2b261b6f4149b92fb1
3,640,090
import math def xy_from_range_bearing(range: float, bearing: float) -> map_funcs.Point: """Given a range in metres and a bearing from the camera this returns the x, y position in metres relative to the runway start.""" theta_deg = bearing - google_earth.RUNWAY_HEADING_DEG x = CAMERA_POSITION_XY.x + range * math.cos(math.radians(theta_deg)) y = CAMERA_POSITION_XY.y + range * math.sin(math.radians(theta_deg)) return map_funcs.Point(x, y)
a2575437b52003660d83b241da13f10687fa4241
3,640,091
def flask_get_modules(): """Return the list of all modules --- tags: - Modules responses: 200: description: A list of modules """ db_list = db.session.query(Module).all() return jsonify(db_list)
21352458773143f785658488e34f9e486c7f818d
3,640,092
def create_user(username, password): """Registra um novo usuario caso nao esteja cadastrado""" if User.query.filter_by(username=username).first(): raise RuntimeError(f'{username} ja esta cadastrado') user = User(username=username, password=generate_password_hash(password)) db.session.add(user) db.session.commit() return user
1a50d31b764cce10d0db78141041deafc15f7c40
3,640,093
import numpy def _get_mesh_colour_scheme(): """Returns colour scheme for MESH (maximum estimated size of hail). :return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`. :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`. """ colour_list = [ [152, 152, 152], [152, 203, 254], [0, 152, 254], [0, 45, 254], [0, 101, 0], [0, 152, 0], [0, 203, 0], [254, 254, 50], [254, 203, 0], [254, 152, 0], [254, 0, 0], [254, 0, 152], [152, 50, 203] ] for i in range(len(colour_list)): colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255 colour_map_object = matplotlib.colors.ListedColormap(colour_list) colour_map_object.set_under(numpy.full(3, 1)) colour_bounds_mm = numpy.array([ 0.1, 15.9, 22.2, 28.6, 34.9, 41.3, 47.6, 54, 60.3, 65, 70, 75, 80, 85 ]) colour_norm_object = matplotlib.colors.BoundaryNorm( colour_bounds_mm, colour_map_object.N) return colour_map_object, colour_norm_object
4301822297d069a6cc289e72b5bf388ffae01cf4
3,640,094
def _manually_create_user(username, pw): """ Create an *active* user, its server directory, and return its userdata dictionary. :param username: str :param pw: str :return: dict """ enc_pass = server._encrypt_password(pw) # Create user directory with default structure (use the server function) user_dir_state = server.init_user_directory(username) single_user_data = user_dir_state single_user_data[server.USER_IS_ACTIVE] = True single_user_data[server.PWD] = enc_pass single_user_data[server.USER_CREATION_TIME] = server.now_timestamp() single_user_data['shared_with_me'] = {} single_user_data['shared_with_others'] = {} single_user_data['shared_files'] = {} server.userdata[username] = single_user_data return single_user_data
21d523ae29121697e63460302d8027499b4d896d
3,640,097
def update_geoscale(df, to_scale): """ Updates df['Location'] based on specified to_scale :param df: df, requires Location column :param to_scale: str, target geoscale :return: df, with 5 digit fips """ # code for when the "Location" is a FIPS based system if to_scale == 'state': df.loc[:, 'Location'] = df['Location'].apply(lambda x: str(x[0:2])) # pad zeros df.loc[:, 'Location'] = df['Location'].apply(lambda x: x.ljust(3 + len(x), '0') if len(x) < 5 else x) elif to_scale == 'national': df.loc[:, 'Location'] = US_FIPS return df
e62083f176cd749a88b2e73774e70140c6c5b9ac
3,640,098
import json def translate(text, from_lang="auto", to_lang="zh-CN"): """translate text, return the result as json""" url = 'https://translate.googleapis.com/translate_a/single?' params = [] params.append('client=gtx') params.append('sl=' + from_lang) params.append('tl=' + to_lang) params.append('hl=en-US') params.append('dt=t') params.append('dt=bd') params.append('dj=1') params.append('source=input') params.append(urlencode({'q': text})) url += '&'.join(params) request = urllib2.Request(url) browser = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" request.add_header('User-Agent', browser) response = urllib2.urlopen(request) return json.loads(response.read().decode('utf8'))
944a5a90f60d8e54c402100e512bbce2bbb407c5
3,640,099
def backproject_to_plane(cam, img_pt, plane): """Back an image point to a specified world plane""" # map to normalized image coordinates npt = np.matrix(npl.solve(cam[0], np.array(list(img_pt)+[1.0]))) M = cam[1].transpose() n = np.matrix(plane[:3]).flatten() d = plane.flat[3] Mt = M * cam[2] Mp = M * npt.transpose() return Mp * (np.dot(n, Mt) - d) / np.dot(n, Mp) - Mt
47ae45103460db5a5447900dda10783c8f92362e
3,640,100
from datetime import datetime def format_cell(cell, datetime_fmt=None): """Format a cell.""" if datetime_fmt and isinstance(cell, datetime): return cell.strftime(datetime_fmt) return cell
8d3fb41bb3d7d3f3b341482e2d050d32092118bf
3,640,101
def optimize(gradients, optim, global_step, summaries, global_norm=None, global_norm_clipped=None, appendix=''): """Modified from sugartensor""" # Add Summary if summaries is None: summaries = ["loss", "learning_rate"] # if "gradient_norm" in summaries: # if global_norm is None: # tf.summary.scalar("global_norm/gradient_norm" + appendix, # clip_ops.global_norm(list(zip(*gradients))[0])) # else: # tf.summary.scalar("global_norm/gradient_norm" + appendix, # global_norm) # if global_norm_clipped is not None: # tf.summary.scalar("global_norm/gradient_norm_clipped" + appendix, # global_norm_clipped) # Add histograms for variables, gradients and gradient norms. for gradient, variable in gradients: if isinstance(gradient, ops.IndexedSlices): grad_values = gradient.values else: grad_values = gradient if grad_values is not None: var_name = variable.name.replace(":", "_") # if "gradients" in summaries: # tf.summary.histogram("gradients/%s" % var_name, grad_values) # if "gradient_norm" in summaries: # tf.summary.scalar("gradient_norm/%s" % var_name, # clip_ops.global_norm([grad_values])) # Gradient Update OP return optim.apply_gradients(gradients, global_step=global_step)
4887d45d5b9eb5a96008daeab5d11c97afed27fd
3,640,102
def _get_desired_asg_capacity(region, stack_name): """Retrieve the desired capacity of the autoscaling group for a specific cluster.""" asg_conn = boto3.client("autoscaling", region_name=region) tags = asg_conn.describe_tags(Filters=[{"Name": "value", "Values": [stack_name]}]) asg_name = tags.get("Tags")[0].get("ResourceId") response = asg_conn.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) return response["AutoScalingGroups"][0]["DesiredCapacity"]
cdcec8493333a001fe3883b0c815da521c571f7a
3,640,103
def _default_geo_type_precision(): """ default digits after decimal for geo types """ return 4
eef082c8a8b38f4ede7bfb5d631b2679041b650c
3,640,104
import scipy def load_movietimes(filepath_timestamps, filepath_daq): """Load daq and cam time stamps, create muxer""" df = pd.read_csv(filepath_timestamps) # DAQ time stamps with h5py.File(filepath_daq, 'r') as f: daq_stamps = f['systemtime'][:] daq_sampleinterval = f['samplenumber'][:] # remove trailing zeros - may be left over if recording didn't finish properly if 0 in daq_stamps: last_valid_idx = np.argmax(daq_stamps == 0) else: last_valid_idx = len(daq_stamps) - 1 # in case there are no trailing zeros daq_samplenumber = np.cumsum(daq_sampleinterval)[:last_valid_idx, np.newaxis] last_sample = daq_samplenumber[-1, 0] nb_seconds_per_interval, _ = scipy.stats.mode(np.diff(daq_stamps[:last_valid_idx, 0])) # seconds - using mode here to be more robust nb_seconds_per_interval = nb_seconds_per_interval[0] nb_samples_per_interval = np.mean(np.diff(daq_samplenumber[:last_valid_idx, 0])) sampling_rate_Hz = np.around(nb_samples_per_interval / nb_seconds_per_interval, -3) # round to 1000s of Hz # ss = SampStamp(sample_times=daq_stamps[:last_valid_idx, 0], frame_times=shutter_times, sample_numbers=daq_samplenumber[:, 0], auto_monotonize=False) ss = SampStamp(sample_times=daq_stamps[:last_valid_idx, 0], sample_numbers=daq_samplenumber[:, 0], frame_samples=df['sample'], frame_numbers=df['movie_frame'], auto_monotonize=False) # # different refs: # # # first sample is 0 seconds # s0 = ss.sample_time(0) # ss = SampStamp(sample_times=daq_stamps[:, 0] - s0, frame_times=cam_stamps[:, 0] - s0, sample_numbers=daq_samplenumber[:, 0]) # # # first frame is 0 seconds - for no-resample-video-data # f0 = ss.frame_time(0) # ss = SampStamp(sample_times=daq_stamps[:, 0] - f0, frame_times=cam_stamps[:, 0] - f0, sample_numbers=daq_samplenumber[:, 0]) return ss, last_sample, sampling_rate_Hz
4d54f7378f3d189a5bea4c14f68d5556958ba4f3
3,640,105
def is_valid_hotkey(hotkey: str) -> bool: """Returns True if hotkey string is valid.""" mode_opts = ["press", "click", "wheel"] btn_opts = [b.name for b in Button] wheel_opts = ["up", "down"] hotkeylist = hotkey[2:].split("_") if len(hotkeylist) == 0 or len(hotkeylist) % 2 != 0: return False for i in range(0, len(hotkeylist), 2): mode = hotkeylist[i] btn = hotkeylist[i + 1] if mode not in mode_opts: return False if mode == "wheel" and btn not in wheel_opts: return False elif mode in ("press", "click") and btn not in btn_opts: return False if hotkey[-2] == "press": return False return True
2fb47b3f77b4cb3da2b70340b6ed96bd03c0bd14
3,640,106
def find_range_with_sum(values : list[int], target : int) -> tuple[int, int]: """Given a list of positive integers, find a range which sums to a target value.""" i = j = acc = 0 while j < len(values): if acc == target: return i, j elif acc < target: acc += values[j] j += 1 else: acc -= values[i] i += 1 return -1, -1
d54f185c98c03f985724a29471ecb1e301c14df5
3,640,107
def output_AR1(outfile, fmri_image, clobber=False): """ Create an output file of the AR1 parameter from the OLS pass of fmristat. Parameters ---------- outfile : fmri_image : ``FmriImageList`` or 4D image object such that ``object[0]`` has attributes ``coordmap`` and ``shape`` clobber : bool if True, overwrite previous output Returns ------- regression_output : ``RegressionOutput`` instance """ outim = ModelOutputImage(outfile, fmri_image[0].coordmap, fmri_image[0].shape, clobber=clobber) return outputters.RegressionOutput(outim, outputters.output_AR1)
b805e73992a51045378d5e8f86ccf780d049002b
3,640,108
def feature_bit_number(current): """Fuzz bit number field of a feature name table header extension.""" constraints = UINT8_V return selector(current, constraints)
4a2103f399765aec9d84c8152922db3801e4a718
3,640,109
def render_page(context, slot, payload): # pylint: disable=R0201,W0613 """ Base template slot """ chapter = request.args.get('chapter', '') module = request.args.get('module', '') page = request.args.get('page', '') try: if page: return render_template(f"{chapter.lower()}/{module.lower()}/{page.lower()}.html", active_chapter=chapter, config=payload) return render_template(f"{chapter.lower()}/{module.lower()}.html", active_chapter=chapter, config=payload) except: return render_template(f"common/empty.html", active_chapter=chapter, config=payload)
69e5a837d90084b4c215ff75fb08a47aab1cff97
3,640,110
def add_stripe_customer_if_not_existing(f): """ Decorator which creates user as a customer if not already existing before making a request to the Stripe API """ @wraps(f) def wrapper(user: DjangoUserProtocol, *args, **kwargs): user = create_customer(user) return f(user, *args, **kwargs) return wrapper
676e9fad5de545a2627d52942917a49af3c6539d
3,640,111
def noisify_patternnet_asymmetric(y_train, noise, random_state=None): """ mistakes in labelling the land cover classes in PatternNet dataset cemetery -> christmas_tree_fram harbor <--> ferry terminal Den.Res --> costal home overpass <--> intersection park.space --> park.lot runway_mark --> park.space costal home <--> sparse Res swimming pool --> costal home """ nb_classes = 38 P = np.eye(nb_classes) n = noise if n>0.0: P[5,5], P[5,7] = 1.-n, n P[9,9], P[9,32] =1.-n, n P[11,11], P[11,9] = 1.-n, n P[17,17], P[17,12] = 1.-n, n P[12,12], P[12,17] = 1.-n, n P[18,18], P[18,23] = 1.-n, n P[23,23], P[23,18] = 1.-n, n P[25,25], P[25, 24] = 1.-n, n P[29,29], P[29,25] = 1.-n, n P[32,32], P[32,9] = 1.-n, n P[34,34], P[34,9] = 1.-n, n y_train_noisy = multiclass_noisify(y_train, P=P, random_state=random_state) actual_noise = (y_train_noisy != y_train).mean() assert actual_noise > 0.0 print('Actual noise %.2f' % actual_noise) y_train = y_train_noisy return y_train
ef37d5c39081cba489076956c0cd948a93ba0387
3,640,114
def group_superset_counts(pred, label): """ Return TP if all label spans appear within pred spans :param pred, label: A group, represeted as a dict :return: A Counts namedtuple with TP, FP and FN counts """ if (pred["label"] != label["label"]): return Counts(0, 1, 1) for label_span in label["spans"]: for pred_span in pred["spans"]: if (pred_span["start"] <= label_span["start"] and pred_span["end"] >= label_span["end"]): break else: return Counts(0, 1, 1) return Counts(1, 0, 0)
8fddd5cfdb0050e97ec60d37e4d939b40cf5d891
3,640,115
def other(): """ Queries all of the logged in user's Campaigns and plugs them into the campaigns template """ entities = db.session.query(Entity) entities = [e.to_dict() for e in entities] return render_template('other.html', entities=entities)
7c7613f919bf5eecc223cf90715e9bd2ae6eb130
3,640,116
def rel_angle(vec_set1, vec_set2): """ Calculate the relative angle between two vector sets Args: vec_set1(array[array]): an array of two vectors vec_set2(array[array]): second array of two vectors """ return vec_angle(vec_set2[0], vec_set2[1]) / vec_angle(vec_set1[0], vec_set1[1]) - 1
af89a10e26968f53200294919d8b72b532aa3522
3,640,117
def check_position_axes(chgcar1: CHGCAR, chgcar2: CHGCAR) -> bool: """Check the cell vectors and atom positions are same in two CHGCAR. Parameters ----------- chgcar1, chgcar2: vaspy.CHGCAR Returns ------- bool """ cell1 = chgcar1.poscar.cell_vecs cell2 = chgcar2.poscar.cell_vecs pos1 = np.array(chgcar1.poscar.positions) pos2 = np.array(chgcar2.poscar.positions) assert np.allclose(cell1, cell2), "UnitCells are inconsistent. Abort." assert np.allclose(pos1, pos2), "Atom positions are inconsistent!!! Abort." return True
29eabfd72a664c77d55164953b6819f3eabd72f1
3,640,118
def path_shortest(graph, start): """ Pythonic minheap implementation of dijkstra's algorithm """ # Initialize all distances to infinity but the start one. distances = {node: float('infinity') for node in graph} distances[start] = 0 paths = [(0, start)] while paths: current_distance, current_node = heap.heappop(paths) neighbors = graph[current_node].items() for neighbor, weight in neighbors: distance = current_distance + weight if distance < distances[neighbor]: distances[neighbor] = distance heap.heappush(paths, (distance, neighbor)) return distances
32fe7df3fb02c3a0c3882f5cc5135417c5193985
3,640,119
import urllib import json def request(url, *args, **kwargs): """Requests a single JSON resource from the Wynncraft API. :param url: The URL of the resource to fetch :type url: :class:`str` :param args: Positional arguments to pass to the URL :param kwargs: Keyword arguments (:class:`str`) to pass to the URL :returns: The returned JSON object as a :class:`dict` :rtype: :class:`dict` """ parsedArgs = (urllib.parse.quote(a) for a in args) parsedKwargs = {} for k,v in kwargs.items(): parsedKwargs[k] = urllib.parse.quote(v) response = urllib.request.urlopen(url.format(*parsedArgs, **parsedKwargs)) data = json.load(response) response.close() return data
66f23e5a15b44b5c9bc0777c717154749d25987e
3,640,120
def destagger(var, stagger_dim, meta=False): """Return the variable on the unstaggered grid. This function destaggers the variable by taking the average of the values located on either side of the grid box. Args: var (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A variable on a staggered grid. stagger_dim (:obj:`int`): The dimension index to destagger. Negative values can be used to choose dimensions referenced from the right hand side (-1 is the rightmost dimension). meta (:obj:`bool`, optional): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is False. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The destaggered variable. If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. """ var_shape = var.shape num_dims = var.ndim stagger_dim_size = var_shape[stagger_dim] # Dynamically building the range slices to create the appropriate # number of ':'s in the array accessor lists. # For example, for a 3D array, the calculation would be # result = .5 * (var[:,:,0:stagger_dim_size-2] # + var[:,:,1:stagger_dim_size-1]) # for stagger_dim=2. So, full slices would be used for dims 0 and 1, but # dim 2 needs the special slice. full_slice = slice(None) slice1 = slice(0, stagger_dim_size - 1, 1) slice2 = slice(1, stagger_dim_size, 1) # default to full slices dim_ranges_1 = [full_slice] * num_dims dim_ranges_2 = [full_slice] * num_dims # for the stagger dim, insert the appropriate slice range dim_ranges_1[stagger_dim] = slice1 dim_ranges_2[stagger_dim] = slice2 result = .5*(var[tuple(dim_ranges_1)] + var[tuple(dim_ranges_2)]) return result
89bb08618fa8890001f72a43da06ee8b15b328be
3,640,121
def create_blueprint(request_manager: RequestManager, cache: Cache, dataset_factory: DatasetFactory): """ Creates an instance of the blueprint. """ blueprint = Blueprint('metadata', __name__, url_prefix='/metadata') @cache.memoize() def _get_method_types_per_approach(): frame = dataset_factory.get_prepared_data_frame() return metadata.get_method_type_count_per_approach(frame) @cache.memoize() def _get_approach_type_counts(): frame = dataset_factory.get_prepared_data_frame() return metadata.get_approach_type_count(frame) # pylint: disable=unused-variable @blueprint.route('method/count') def get_method_count(): """ Triggers calculation of number of method types per approach. --- response: 200: description: The retrieved result will be a JSON object representing the number of different method types per approach. application/json: schema: $ref: '#/definitions/RequestResponse' """ ticket = request_manager.submit_ticketed( _get_method_types_per_approach) return get_state_response(ticket) @blueprint.route('approaches/count') def get_approach_count(): """ Computes which approach types are present in the available data and how many sessions each of them was used in. --- response: 200: description: The retrieved result will be a JSON object representing the number of sessions each approach was used in. application/json: schema: $ref: '#/definitions/RequestResponse' """ ticket = request_manager.submit_ticketed(_get_approach_type_counts) return get_state_response(ticket) return blueprint
32693a6286e4ffb15e4820dbd7ad5fdbe6632e95
3,640,123
def sides(function_ast, parameters, function_callback): """ Given an ast, parses both sides of an expression. sides(b != c) => None """ left = side(function_ast['leftExpression'], parameters, function_callback) right = side(function_ast['rightExpression'], parameters, function_callback) return (left, right)
9ed00100122f821340a0db37e77bfcf786eacdf9
3,640,124
def print_url(host, port, datasets): """ Prints a list of available dataset URLs, if any. Otherwise, prints a generic URL. """ def url(path = None): return colored( "blue", "http://{host}:{port}/{path}".format( host = host, port = port, path = path if path is not None else "")) horizontal_rule = colored("green", "—" * 78) print() print(horizontal_rule) if len(datasets): print(" The following datasets should be available in a moment:") for path in sorted(datasets, key = str.casefold): print(" • %s" % url(path)) else: print(" Open <%s> in your browser." % url()) print() print(" ", colored("yellow", "Warning: No datasets detected.")) print(horizontal_rule) print()
37d58dce1672f60d72936d6e1b9644fdd5ab689f
3,640,125
def get_default_sample_path_random(data_path): """Return path to sample with default parameters as suffix""" extra_suffix = get_default_extra_suffix(related_docs=False) return get_default_sample_path(data_path, sample_suffix=extra_suffix)
54220840dc6ef1831859a60058506e7503effcb7
3,640,126
def VectorShadersAddMaterialDesc(builder, materialDesc): """This method is deprecated. Please switch to AddMaterialDesc.""" return AddMaterialDesc(builder, materialDesc)
0aaec1d3e14536a65c9cb876075d12348176096c
3,640,127
import math def phase_randomize(D, random_state=0): """Randomly shift signal phases For each timecourse (from each voxel and each subject), computes its DFT and then randomly shifts the phase of each frequency before inverting back into the time domain. This yields timecourses with the same power spectrum (and thus the same autocorrelation) as the original timecourses, but will remove any meaningful temporal relationships between the timecourses. This procedure is described in: Simony E, Honey CJ, Chen J, Lositsky O, Yeshurun Y, Wiesel A, Hasson U (2016) Dynamic reconfiguration of the default mode network during narrative comprehension. Nat Commun 7. Parameters ---------- D : voxel by time by subject ndarray fMRI data to be phase randomized random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. Returns ---------- ndarray of same shape as D phase randomized timecourses """ random_state = check_random_state(random_state) F = fft(D, axis=1) if D.shape[1] % 2 == 0: pos_freq = np.arange(1, D.shape[1] // 2) neg_freq = np.arange(D.shape[1] - 1, D.shape[1] // 2, -1) else: pos_freq = np.arange(1, (D.shape[1] - 1) // 2 + 1) neg_freq = np.arange(D.shape[1] - 1, (D.shape[1] - 1) // 2, -1) shift = random_state.rand(D.shape[0], len(pos_freq), D.shape[2]) * 2 * math.pi # Shift pos and neg frequencies symmetrically, to keep signal real F[:, pos_freq, :] *= np.exp(1j * shift) F[:, neg_freq, :] *= np.exp(-1j * shift) return np.real(ifft(F, axis=1))
d8f3230acdf8b3df98995adaadc92f41497a27ea
3,640,128
def monospaced(text): """ Convert all contiguous whitespace into single space and strip leading and trailing spaces. Parameters ---------- text : str Text to be re-spaced Returns ------- str Copy of input string with all contiguous white space replaced with single space " ". """ return REGEX_SPACE.sub(' ', text).strip()
51f07908dde10ef67bd70b5eb65e03ee832c3755
3,640,129
def molecule_block(*args, **kwargs): """ Generates the TRIPOS Mol2 block for a given molecule, returned as a string """ mol = Molecule(*args, **kwargs) block = mol.molecule_block() + mol.atom_block() + mol.bond_block() + '\n' return block
79ebf821e105666fb81396197fa0f218b2cf3e48
3,640,130
def setup_test_env(settings_key='default'): """Allows easier integration testing by creating RPC and HTTP clients :param settings_key: Desired server to use :return: Tuple of RPC client, HTTP client, and thrift module """ return RpcClient(handler), HttpClient(), load_module(settings_key)
827a71692dd2eb9946db34289dcf48d5b5d4415b
3,640,131
from typing import List from typing import Tuple def calculateCentroid( pointCloud : List[Tuple[float, float, float]] ) -> Tuple[float, float, float]: """Calculate centroid of point cloud. Arguments -------------------------------------------------------------------------- pointCloud (float 3-tuple list) -- list of xyz coordinates. Returns -------------------------------------------------------------------------- centroid (float 3-tuple) -- centroid of points in point cloud. """ numPoints = len(pointCloud) x, y, z = [], [], [] for point in pointCloud: x.append(point[0]) y.append(point[1]) z.append(point[2]) x, y, z = sum(x) / numPoints, sum(y) / numPoints, sum(z) / numPoints return x, y, z
0e8d6d578a0a983fe1e68bff22c5cc613503ee76
3,640,132
def get_num_uniq_users(csv_file, userid_col): """ A Helper function to help get the number of unique users :param csv_file: path to CSV file :param userid_col: Column for user ID :return: """ # Read the CSV file using pandas df = pd.read_csv(csv_file) # Use the nunique() method to get number of unique users num = len(np.unique(df[userid_col])) return num
ade25596bb308414c80e1aea87d412bd5a340288
3,640,134
from zooniverse_web.models import Survey, QuestionResponse, Response, QuestionOption from zooniverse_web.utility.survey import generate_new_survey def administration(request): """Administration actions ((re)train acton predictor for a new survey) Parameters ---------- request: POST request Returns ------- render: django.shortcuts.render (a page to be rendered) """ message = None message_class = None if request.method == 'POST': next_action = request.POST.get('submit', None) if next_action == '(Re)Train Recommender': previous_survey = Survey.objects.filter(active=True).order_by('-creation_date').first() if not previous_survey: survey_created = generate_new_survey() message_class = 'success' message = 'New survey created on {}!'.format(survey_created.creation_date) else: # Are there any responses for this survey? try: for option in QuestionOption.objects.all(): QuestionResponse.objects.filter( response=Response.objects.get( status=Response.FINISHED, survey=previous_survey ), answer=option.option ) survey_created = generate_new_survey() message_class = 'success' message = 'New survey created on {}!'.format(survey_created.creation_date.date()) except (QuestionOption.DoesNotExist, QuestionResponse.DoesNotExist, Response.DoesNotExist): message = 'You do not have enough question responses saved yet for the current survey! ' \ 'Try again later.' message_class = 'warning' except: message = 'Something went wrong while generating the survey. Please try again. <br />' \ 'If the problem keeps on occuring, please contact your system administrator.' message_class = 'danger' else: message = '' message_class = '' return render( request, 'administration/administration.html', { 'message': message, 'message_class': message_class, } )
7d5a08450c9058f6fd33a13fc4cf6b714bc7e657
3,640,136
from typing import Counter def checkout(skus): """ Calculate the total amount for the checkout based on the SKUs entered in :param skus: string, each char is an item :return: int, total amount of the cart, including special offers """ total = 0 counter = Counter(skus) # got through the offers (biggest first), and calculate the line total, and any free offers... for item in counter: print('item: {}'.format(item)) if item not in ITEMS: return -1 line_total = 0 free_offer = 0 qty = counter[item] ordered_offers = sorted(ITEMS[item]['special_offers'], key=lambda k: (k['min_quantity']), reverse=True) # does this item have an specials? for offer in ordered_offers: # how many can we get of the biggest offer number_of_offers = qty // offer['min_quantity'] if 'price' in offer: # how many are left, put in qty for next offer... number_of_items_in_offer = number_of_offers * offer['min_quantity'] qty -= number_of_items_in_offer # update the line total line_total += number_of_offers * offer['price'] elif 'other_free' in offer: if offer['other_free'] in counter: # make sure we have the min required items if counter[item] >= offer['min_quantity']: other_free = offer['other_free'] # is this full price the correct value? what if we used a multi price? free_offer = number_of_offers * ITEMS[other_free]['price'] # add any remaining qty as full price to the line_total line_total += qty * ITEMS[item]['price'] # add the line total, and the free offers to the checkout total total += line_total total -= free_offer return total
ad00a9c3e3cd4f34cfd7b5b306d3863decc0751b
3,640,137
def func_tradeg(filename, hdulist=None, whichhdu=None): """Return the fits header value TELRA in degrees. """ hdulist2 = None if hdulist is None: hdulist2 = fits.open(filename, 'readonly') else: hdulist2 = hdulist telra = fitsutils.get_hdr_value(hdulist2, 'TELRA') if hdulist is None: hdulist2.close() return spmeta.convert_ra_to_deg(telra)
4e6751d2eb0ac9e6264f768e932cbd42c2fc2c4e
3,640,138
def column_indexes(column_names, row_header): """項目位置の取得 Args: column_names (str): column name row_header (dict): row header info. Returns: [type]: [description] """ column_indexes = {} for idx in column_names: column_indexes[idx] = row_header.index(column_names[idx]) return column_indexes
4205e31e91cd64f833abd9ad87a02d91eebc8c61
3,640,139
import logging import pickle def dmx_psrs(caplog): """Sample pytest fixture. See more at: http://doc.pytest.org/en/latest/fixture.html """ caplog.set_level(logging.CRITICAL) psrs = [] for p in psr_names: with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin: psrs.append(pickle.load(fin)) return psrs
6bbb5df017374f207d7c9338a737212f5c7e5b23
3,640,140
def fit_stats(act_map, param, func=KentFunc): """Generate fitting statistics from scipy's curve fitting""" phi_grid, theta_grid = meshgrid(phi_arr, theta_arr) Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T fval = act_map.flatten() fpred = func(Xin, *param) # KentFunc res = fval - fpred rsquare = 1 - (res**2).mean() / fval.var() return res.reshape(act_map.shape), rsquare
97e4223daf3e140f1a091491f18012840b8c006a
3,640,141
def conv2d_for_hpool_valid_width_wrapper(inputs,filters,strides,padding,**kwargs): """ Wraps tf.layers.conv2d to allow valid convolution across signal width and 'same' convolution across signal height when padding is set to "valid_time" Arguments: inputs (TF Tensor): Tensor input. filters (TF Tensor): Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] strides (int or tuple/list) : An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding (string): One of `"valid"`, `"same"`, or `"valid_time"` (case-insensitive). kwargs (dictionary): Specifies all other arguments required by tf.layers.conv2d. Passes these directly to function without modification. See Tensorflow documentation for further details. Returns: (TF Tensor): Output of tf.layers.conv2d. """ #Collects relvant parameters size=inputs.get_shape() kernel_size = filters.get_shape() filter_height = int(kernel_size[0]) in_height = int(size[1]) #Calculates according to SAME padding formula if (in_height % strides[0] == 0): pad_along_height = max(filter_height - strides[0], 0) else: pad_along_height = max(filter_height - (in_height % strides[0]), 0) pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top #Pads signal if VALID_TIME is selected and padding is necessary #Otherwise, pass inputs through and allow specified convolutioon if pad_along_height == 0 or padding.upper() != 'VALID_TIME': padding = 'VALID' if padding.upper() == 'VALID_TIME' else padding output_tensor = tf.nn.conv2d(inputs,filter=filters, strides=strides,padding=padding, **kwargs) else: #Pads input tensor and moves conv2d to valid padding paddings = tf.constant([[0,0],[pad_top, pad_bottom], [0, 0],[0,0]]) input_padded = tf.pad(inputs,paddings) output_tensor=tf.nn.conv2d(input_padded,filter=filters, strides=strides, padding="VALID", **kwargs) return output_tensor
9b4438c687232245e645ea5714e7ad7899ecd98b
3,640,142
import copy def resample_cells(tree, params, current_node = 'root', inplace = False): """ Runs a new simulation of the cell evolution on a fixed tree """ if not inplace: tree = copy.deepcopy(tree) for child in tree.successors(current_node): initial_cell = tree.nodes[current_node]['cell'].deepcopy() initial_cell.reset_seed() tree.nodes[child]['cell'] = sim.evolve_cell(initial_cell, tree.nodes[child]['time_to_parent'], params) resample_cells(tree, params, current_node = child, inplace = True) return tree
10cde9abdf3a6271aa20276c3e193b1c93ca7908
3,640,143
def get_sql_query(table_name:str) -> str: """Fetch SQL query file for generation of dim or fact table(s)""" f = open(f'./models/sql/{table_name}.sql') f_sql_query = f.read() f.close() return f_sql_query
fc3308eae51b7d10667a50a0f4ee4e295bfea8d0
3,640,144
def _map_args(call_node, function): """Maps AST call nodes to the actual function's arguments. Args: call_node: ast.Call function: Callable[..., Any], the actual function matching call_node Returns: Dict[Text, ast.AST], mapping each of the function's argument names to the respective AST node. """ args = call_node.args kwds = {kwd.arg: kwd.value for kwd in call_node.keywords} return tf_inspect.getcallargs(function, *args, **kwds)
b19befded386e6081be9858c7eb31ffd45c96ef3
3,640,145
def sub_bases( motif ): """ Return all possible specifications of a motif with degenerate bases. """ subs = {"W":"[AT]", \ "S":"[CG]", \ "M":"[AC]", \ "K":"[GT]", \ "R":"[AG]", \ "Y":"[CT]", \ "B":"[CGT]", \ "D":"[AGT]", \ "H":"[ACT]", \ "V":"[ACG]", \ "N":"[ACGTN]"} for symbol,sub in subs.iteritems(): if motif.find(symbol) > -1: motif = motif.replace(symbol, sub) return motif
10ff2ea1959aba103f1956398afb5f1d8801edd7
3,640,146
import logging def parse(input_file_path): """ Parse input file :param input_file_path: input file path :return: Image list """ verticals, horizontals = 0, 0 logging.info("parsing %s", input_file_path) with open(input_file_path, 'r') as input_file: nb = int(input_file.readline()) # images nb images = [] for i, img_txt in enumerate(input_file.readlines()): data = img_txt.rstrip().split(' ') orientation = data[0] tags = data[2:] images.append(Image(i, orientation, set(tags))) if orientation == 'V': verticals += 1 else: # H horizontals += 1 logging.info('parsing %s done', input_file_path) logging.info('%d images found (%d V,%d H)', nb, verticals, horizontals) return images
ecd4fd066d1128f385da59965a93e59c038052bd
3,640,147
def char(ctx, number): """ Returns the character specified by a number """ return chr(conversions.to_integer(number, ctx))
5c5254978055f690b6801479b180ff39b31e2248
3,640,148
import re import logging async def get_character_name(gear_url, message): """ It is *sometimes* the case that discord users don't update their username to be their character name (eg for alts). This method renders the gear_url in an HTML session and parses the page to attempt to find the character's name. This assumes a specific format of the page: player names are nested in an h3 element with css class named 'class-[player class]' Returns the character's name if successful, otherwise returns the message sender's display name in discord. """ name = message.author.display_name if not re.match(SIXTY_UPGRADES_REGEX, gear_url): return name for i in range(MAX_FETCH_CHARACTER_NAME_RETRIES): try: asession = AsyncHTMLSession() webpage = await asession.get(gear_url) await webpage.html.arender() query_selector = "h3[class^='class-']" name = webpage.html.find(query_selector, first=True).text break except Exception as e: logging.error(e) finally: await asession.close() return name
cdd18e0123f226d2c59d41bbf39e0dfc02188d73
3,640,149
import pandas def get_treant_df(tags, path='.'): """Get treants as a Pandas DataFrame Args: tags: treant tags to identify the treants path: the path to search for treants Returns: a Pandas DataFrame with the treant name, tags and categories >>> from click.testing import CliRunner >>> from toolz.curried import do >>> with CliRunner().isolated_filesystem() as dir_: ... assert pipe( ... dir_, ... dtr.Treant, ... do(lambda x: x.__setattr__('tags', ['atag'])), ... lambda x: x.uuid[:8], ... lambda x: x == get_treant_df(['atag'], path=dir_).uuid[0] ... ) """ return pipe( tags, get_by_tags(path=path), lambda x: x.map(get_treant_data), pandas.DataFrame, )
a5972646e27ffd88d18f1c0d212a2ae081ebe4f1
3,640,150
def gather_keypoints(keypoints_1, keypoints_2, matches): """ Gather matched keypoints in a (n x 4) array, where each row correspond to a pair of matching keypoints' coordinates in two images. """ res = [] for m in matches: idx_1 = m.queryIdx idx_2 = m.trainIdx pt_1 = keypoints_1[idx_1].pt pt_2 = keypoints_2[idx_2].pt row = [pt_1[0], pt_1[1], pt_2[0], pt_2[1]] res.append(row) return np.array(res)
5abef87c570493b57e81dcddc2732ed541aa6a08
3,640,151
async def stop(): """ Stop any playing audio. """ Sound.stop() return Sound.get_state()
3c7ea7aae3e8dd7e3b33ddd9beed0ce2182800bc
3,640,152
def is_numeric(X, compress=True): """ Determine whether input is numeric array Parameters ---------- X: Numpy array compress: Boolean Returns ------- V: Numpy Boolean array if compress is False, otherwise Boolean Value """ def is_float(val): try: float(val) except ValueError: return False else: return True isnumeric = np.vectorize(is_float, otypes=[bool]) # return numpy array V = isnumeric(X) if compress: return np.all(V) return V
ad28657f51680cd193671a6a8a8da6a91390dc15
3,640,153
def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9): """ show figure with result of the ellipse fitting :param ndarray img: :param ndarray seg: :param [(int, int, int, int, float)] ellipses: :param [(int, int)] centers: :param [float] crits: :param float fig_size: :return: >>> img = np.random.random((100, 150, 3)) >>> seg = np.random.randint(0, 2, (100, 150)) >>> ells = np.random.random((3, 5)) * 25 >>> centers = np.random.random((3, 2)) * 25 >>> crits = np.random.random(3) >>> fig = figure_ellipse_fitting(img[:, :, 0], seg, ells, centers, crits) >>> isinstance(fig, matplotlib.figure.Figure) True """ assert len(ellipses) == len(centers) == len(crits), \ 'number of ellipses (%i) and centers (%i) and criteria (%i) ' \ 'should match' % (len(ellipses), len(centers), len(crits)) fig, ax = create_figure_by_image(img.shape[:2], fig_size) assert img.ndim == 2, \ 'required image dimension is 2 to instead %s' % repr(img.shape) ax.imshow(img, cmap=plt.cm.Greys_r) for i, params in enumerate(ellipses): c1, c2, h, w, phi = params rr, cc = ellipse_perimeter(int(c1), int(c2), int(h), int(w), phi) ax.plot(cc, rr, '.', color=COLORS[i % len(COLORS)], label='#%i with crit=%d' % ((i + 1), int(crits[i]))) ax.legend(loc='lower right') # plt.plot(centers[:, 1], centers[:, 0], 'ow') for i in range(len(centers)): ax.plot(centers[i, 1], centers[i, 0], 'o', color=COLORS[i % len(COLORS)]) ax.set_xlim([0, seg.shape[1]]) ax.set_ylim([seg.shape[0], 0]) ax.axis('off') fig.subplots_adjust(left=0, right=1, top=1, bottom=0) return fig
de6b58a01a64c3123f5aad4dfb6935c6c19a041c
3,640,154
def fmt_bytesize(num: float, suffix: str = "B") -> str: """Change a number of bytes in a human readable format. Args: num: number to format suffix: (Default value = 'B') Returns: The value formatted in human readable format (e.g. KiB). """ for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: if abs(num) < 1024.0: return "{:3.1f} {}{}".format(num, unit, suffix) num /= 1024.0 return "{:.1f} {}{}".format(num, "Yi", suffix)
09b36d229856004b6df108ab1ce4ef0a9c1e6289
3,640,155
def get_kpoint_mesh(structure: Structure, cutoff_length: float, force_odd: bool = True): """Calculate reciprocal-space sampling with real-space cut-off.""" reciprocal_lattice = structure.lattice.reciprocal_lattice_crystallographic # Get reciprocal cell vector magnitudes abc_recip = np.array(reciprocal_lattice.abc) mesh = np.ceil(abc_recip * 2 * cutoff_length).astype(int) if force_odd: mesh += (mesh + 1) % 2 return mesh
0536b5e2c37b7ba98d240fc3099fad93d246f730
3,640,156
def resnet_v1_101(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v1_101', **kwargs): """ResNet-101 model of [1]. See resnet_v1() for arg and return description.""" blocks = [ resnet_v1_block('block1', base_depth=64, num_units=3, stride=2), resnet_v1_block('block2', base_depth=128, num_units=4, stride=2), resnet_v1_block('block3', base_depth=256, num_units=23, stride=2), resnet_v1_block('block4', base_depth=512, num_units=3, stride=1), ] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope, **kwargs)
138289084d48edd9d9c1096bd790b1479d902ec1
3,640,157
def is_package_authorized(package_name): """ get user information if it is authorized user in the package config Returns: [JSON string]: [user information session] """ authorized_users = get_package_admins(package_name) user_info = get_user_info() user_dict = j.data.serializers.json.loads(user_info) username = user_dict["username"] # if the package doesn't include admins then allow any authenticated user if authorized_users and not any([username in authorized_users, username in j.core.identity.me.admins]): return abort(403) return user_info
59b89ebb9c8579d61a18a194e7f5f4bd41d738b6
3,640,158
def submit_search_query(query_string, query_limit, query_offset, class_resource): """ Submit a search query request to the RETS API """ search_result = class_resource.search( query='%s' % query_string, limit=query_limit, offset=query_offset) return search_result
f8c30c86f7ff7c33fc96b26b1491ddaa48710fbc
3,640,159
def one_hot_encode(df): """ desc : one hot encodes categorical cols args: df (pd.DataFrame) : stroke dataframe returns: df (pd.DataFrame) : stroke dataframe with one_hot_encoded columns """ # extract categorical columns stroke_data = df.copy() cat_cols = stroke_data.select_dtypes(include = ["object"]) cat_vals = cat_cols.values cat_cols_names = cat_cols.columns enc = OneHotEncoder(sparse = False) encoded_vals = enc.fit_transform(cat_vals) encoded_cols = enc.get_feature_names(cat_cols_names) encoded_cols = pd.DataFrame(encoded_vals,columns = encoded_cols,index = cat_cols.index) #drop non one hot encoded cols stroke_data.drop(columns = cat_cols_names, axis = 1, inplace = True) #add encoded columns stroke_data = pd.concat([stroke_data,encoded_cols], axis = 1) #print(stroke_data.shape) print(stroke_data) return stroke_data
6895dfbc4bb57d5e8d9a5552e2ac7fcb94e07434
3,640,160
def assert_increasing(a): """Utility function for enforcing ascending values. This function's handle can be supplied as :py:kwarg:`post_method` to a :py:func:`processed_proprty <pyproprop>` to enforce values within a :py:type:`ndarray <numpy>` are in ascending order. This is useful for enforcing time guesses to be sequential. """ if (a is not None) and (not np.all(np.diff(a) >= 0)): msg = f"Elements in {a} must be in ascending numerical order." raise ValueError(msg) return a
f1ded37b40686cf400da23f567880e73180a78fe
3,640,161
def copy_to_device(device, remote_path, local_path='harddisk:', server=None, protocol='http', vrf=None, timeout=300, compact=False, use_kstack=False, fu=None, http_auth=True, **kwargs): """ Copy file from linux server to the device. Args: device (Device): Device object remote_path (str): remote file path on the server local_path (str): local file path to copy to on the device (default: harddisk:) server (str): hostname or address of the server (default: None) protocol(str): file transfer protocol to be used (default: http) vrf (str): vrf to use (optional) timeout(int): timeout value in seconds, default 300 compact(bool): compress image option for n9k, defaults False fu(obj): FileUtils object to use instead of creating one. Defaults to None. use_kstack(bool): Use faster version of copy, defaults False Not supported with a file transfer protocol prompting for a username and password http_auth (bool): Use http authentication (default: True) Returns: None If the server is not specified, a HTTP server will be spawned on the local system and serve the directory of the file specified via remote_path and the copy operation will use http. If the device is connected via CLI proxy (unix jump host) and the proxy has 'socat' installed, the transfer will be done via the proxy automatically. """ return generic_copy_to_device(device=device, remote_path=remote_path, local_path=local_path, server=server, protocol=protocol, vrf=vrf, timeout=timeout, compact=compact, use_kstack=use_kstack, fu=fu, http_auth=http_auth, **kwargs)
762ef928656473458e0fee8dc47c1a581103ed0e
3,640,162
def decode_replay_header(contents): """Decodes and return the replay header from the contents byte string.""" decoder = VersionedDecoder(contents, protocol.typeinfos) return decoder.instance(protocol.replay_header_typeid)
1fcee7900a5c0c310e67afe31a154b8310da7089
3,640,164
def _generate_indexed(array: IndexedArray) -> str: """Generate an indexed Bash array.""" return ( "(" + " ".join( f"[{index}]={_generate_string(value)}" for index, value in enumerate(array) if value is not None ) + ")" )
2443b3c6be74684360c395995b3d16d4ebecf1d8
3,640,165
from datetime import datetime def up_date(dte, r_quant, str_unit, bln_post_colon): """ Adjust a date in the light of a (quantity, unit) tuple, taking account of any recent colon """ if str_unit == 'w': dte += timedelta(weeks=r_quant) elif str_unit == 'd': dte += timedelta(days=r_quant) elif str_unit == 'h': dte += timedelta(hours=r_quant) elif str_unit == 'm': dte += timedelta(minutes=r_quant) elif str_unit in ('Y', 'y'): if r_quant > 500: # jul 2019 vs jul 17 r_year = r_quant else: r_year = datetime.now().year + r_quant try: dte = datetime.replace(dte, year=int(r_year)) except ValueError: dte = datetime.replace(dte, day=28, month=2, year=int(datetime.now().year + r_quant)) elif str_unit == 'H': dte = datetime.replace(dte, hour=int(r_quant), second=0, microsecond=0) elif str_unit == 'M': dte = datetime.replace(dte, minute=int(r_quant), second=0, microsecond=0) elif str_unit == 'a': if not bln_post_colon: dte = datetime.replace(dte, hour=int(r_quant), minute=0, second=0, microsecond=0) elif str_unit == 'p': if bln_post_colon: # adjust by 12 hours if necessary if dte.hour < 12: dte = datetime.replace(dte, hour=dte.hour+12) else: p_quant = r_quant if p_quant < 12: p_quant += 12 dte = datetime.replace(dte, hour=int(p_quant), minute=0, second=0, microsecond=0) elif (len(str_unit) >= 3) and (STR_MONTHS.find(str_unit) != -1): dte = datetime.replace(dte, month=(STR_MONTHS.index(str_unit) + 3)/3, day=int(r_quant), second=0, microsecond=0) # refers to this year or next year ? (assume not past) dte_today = datetime.today().replace(hour=0, minute=0, \ second=0, microsecond=0) if dte < dte_today: dte = dte.replace(year=(dte_today.year+1)) return dte
684b09e5d37bf0d3445262b886c73188d35425ef
3,640,166
from typing import MutableMapping def read_options() -> Options: """ read command line arguments and options Returns: option class(Options) Raises: NotInspectableError: the file or the directory does not exists. """ args: MutableMapping = docopt(__doc__) schema = Schema({ "<path>": And(Use(get_path), lambda path: path.is_file() or path.is_dir(), error=f"The specified path {args['<path>']}" " does not exist.\n") }) try: args = schema.validate(args) except SchemaError as e: raise NotInspectableError(e.args[0]) return Options(args["<path>"])
25cd3c29f6e206fd97334f7a48d267680a9e553c
3,640,167
def test_generator_single_input_2(): """ Feature: Test single str input Description: input str Expectation: success """ def generator_str(): for i in range(64): yield chr(ord('a') + i) class RandomAccessDatasetInner: def __init__(self): self.__data = [i for i in range(64)] def __getitem__(self, item): return chr(ord('a') + self.__data[item]) def __len__(self): return 64 class SequentialAccessDataset: def __init__(self): self.__data = [i for i in range(64)] self.__index = 0 def __next__(self): if self.__index >= 64: raise StopIteration item = chr(ord('a') + self.__data[self.__index]) self.__index += 1 return item def __iter__(self): self.__index = 0 return self def __len__(self): return 64 def assert_generator_single_input_2(data): # apply dataset operations data1 = ds.GeneratorDataset(data, ["data"], shuffle=False) i = 0 for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary s = chr(ord('a') + i) golden = np.array(bytes(s, encoding='utf8')) np.testing.assert_array_equal(item["data"], golden) i = i + 1 assert_generator_single_input_2(generator_str) assert_generator_single_input_2(RandomAccessDatasetInner()) assert_generator_single_input_2(SequentialAccessDataset())
d251279c0740b52c9d32c2ae572f6dbdf32f36ea
3,640,168
def SLINK(Dataset, d): """function to execute SLINK algo Args: Dataset(List) :- list of data points, who are also lists d(int) :- dimension of data points Returns: res(Iterables) :- list of triples sorted by the second element, first element is index of point, the other two are pointer representations of dendrograms noting the lowest level at which i is no longer the last point in his cluster and the last point in the cluster which i then joins Heights(Iterables) :- list of the second element of res' triples """ n = len(Dataset) A = [inf for i in range(n)] B = [0 for i in range(n)] # initialisation A[0] = inf B[0] = 0 for k in range(1, n): B[k] = k A[k] = inf M = [0 for i in range(k + 1)] for i in range(k): M[i] = metrics(Dataset[i], Dataset[k]) for i in range(k): if(A[i] >= M[i]): M[B[i]] = min(M[B[i]], A[i]) A[i] = M[i] B[i] = k if(A[i] < M[i]): M[B[i]] = min(M[B[i]], M[i]) for i in range(k): if(A[i] >= A[B[i]]): B[i] = k res = [(index, i, j) for index, (i, j) in enumerate(zip(A, B))] res = sorted(res, key=lambda x: x[1]) Heights = [triple[1] for triple in res] return(res, Heights)
d10a3f8cb3e6d81649bebd4a45f5be79d206f1be
3,640,169
def static_file(path='index.html'): """static_file""" return app.send_static_file(path)
5c3f2d423d029a8e7bb8db5fbe3c557f7a6aa9c3
3,640,170
def lazy_property(function): """ Decorator to make a lazily executed property """ attribute = '_' + function.__name__ @property @wraps(function) def wrapper(self): if not hasattr(self, attribute): setattr(self, attribute, function(self)) return getattr(self, attribute) return wrapper
db1d62eb66a018bc166b67fe9c2e25d671261f77
3,640,171
from pathlib import Path def basename(fname): """ Return file name without path. Examples -------- >>> fname = '../test/data/FSI.txt.zip' >>> print('{}, {}, {}'.format(*basename(fname))) ../test/data, FSI.txt, .zip """ if not isinstance(fname, path_type): fname = Path(fname) path, name, ext = fname.parent, fname.stem, fname.suffix return path, name, ext
55cd53ec71e4e914493129e40fa216ddcdbe8083
3,640,172
def validate_lockstring(lockstring): """ Validate so lockstring is on a valid form. Args: lockstring (str): Lockstring to validate. Returns: is_valid (bool): If the lockstring is valid or not. error (str or None): A string describing the error, or None if no error was found. """ global _LOCK_HANDLER if not _LOCK_HANDLER: _LOCK_HANDLER = LockHandler(_ObjDummy()) return _LOCK_HANDLER.validate(lockstring)
0feb67597e31667013ab182159c8433ae4a80346
3,640,173
from typing import Iterable def decode_geohash_collection(geohashes: Iterable[str]): """ Return collection of geohashes decoded into location coordinates. Parameters ---------- geohashes: Iterable[str] Collection of geohashes to be decoded Returns ------- Iterable[Tuple[float, float]] Collection of location coordinates in Latitude/Longitude """ locations = [] for geohash in geohashes: exact_location = decode_geo_hash(geohash) locations.append((exact_location[0], exact_location[1])) return locations
2e673c852c7ac2775fd29b32243bdc8b1aa83d77
3,640,174
def renormalize_sparse(A: sp.spmatrix) -> sp.spmatrix: """Get (D**-0.5) * A * (D ** -0.5), where D is the diagonalized row sum.""" A = sp.coo_matrix(A) A.eliminate_zeros() rowsum = np.array(A.sum(1)) assert np.all(rowsum >= 0) d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0 d_mat_inv_sqrt = sp.diags(d_inv_sqrt) return d_mat_inv_sqrt.dot(A).dot(d_mat_inv_sqrt) # return A.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
33122bcf018dba842f04e044cf9e799860a56042
3,640,175
def _fetch( self, targets=None, jobs=None, remote=None, all_branches=False, show_checksums=False, with_deps=False, all_tags=False, recursive=False, ): """Download data items from a cloud and imported repositories Returns: int: number of successfully downloaded files Raises: DownloadError: thrown when there are failed downloads, either during `cloud.pull` or trying to fetch imported files config.NoRemoteError: thrown when downloading only local files and no remote is configured """ used = self.used_cache( targets, all_branches=all_branches, all_tags=all_tags, with_deps=with_deps, force=True, remote=remote, jobs=jobs, recursive=recursive, ) downloaded = 0 failed = 0 try: downloaded += self.cloud.pull( used, jobs, remote=remote, show_checksums=show_checksums ) except NoRemoteError: if not used.external and used["local"]: raise except DownloadError as exc: failed += exc.amount for (repo_url, repo_rev), files in used.external.items(): d, f = _fetch_external(self, repo_url, repo_rev, files) downloaded += d failed += f if failed: raise DownloadError(failed) return downloaded
18238bb1c4c5bd0772757013173e26645d5cdf5a
3,640,178