content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import hashlib import six def make_hashkey(seed): """ Generate a string key by hashing """ h = hashlib.md5() h.update(six.b(str(seed))) return h.hexdigest()
38d088005cb93fc0865933bbb706be171e72503a
3,638,454
import asyncio async def report(database, year, month, limit): """Get a report.""" matches_query = """ select count(*) as count from matches where extract(year from played)=:year and extract(month from played)=:month """ players_query = """ select count(distinct players.user_id) as count from matches join players on matches.id=players.match_id where extract(year from played)=:year and extract(month from played)=:month """ most_matches_query = """ select players.user_id, players.platform_id, players.user_name, count(matches.id) as count from players join matches on players.match_id=matches.id where players.user_id != '' and extract(year from matches.played)=:year and extract(month from matches.played)=:month group by players.user_id, players.platform_id, players.user_name order by count(matches.id) desc limit :limit """ popular_maps_query = """ select map_name as name, count(map_name) as count from matches where extract(year from played)=:year and extract(month from played)=:month group by map_name order by count(map_name) desc """ longest_matches_query = """ select id from matches where extract(year from played)=:year and extract(month from played)=:month order by duration desc limit :limit """ total_matches, total_players, most_matches, popular_maps, longest_matches = await asyncio.gather( database.fetch_one(matches_query, values={'year': year, 'month': month}), database.fetch_one(players_query, values={'year': year, 'month': month}), database.fetch_all(most_matches_query, values={'limit': limit, 'year': year, 'month': month}), database.fetch_all(popular_maps_query, values={'year': year, 'month': month}), database.fetch_all(longest_matches_query, values={'limit': limit, 'year': year, 'month': month}), ) return { 'year': year, 'month': month, 'total_matches': total_matches['count'], 'total_players': total_players['count'], 'most_matches': [dict( user=dict(id=m['user_id'], platform_id=m['platform_id'], name=m['user_name']), rank=i + 1, count=m['count'] ) for i, m in enumerate(most_matches)], 'popular_maps': compute_map_data(popular_maps)[:limit], 'longest_match_ids': list(map(lambda m: m['id'], longest_matches)) }
91059c5a8bd44536f24a7edbb88ff27b9036b83a
3,638,455
def dy3(vector, g, m1, m2, L1, L2): """ Abbreviations M = m0 + m1 S = sin(y1 - y2) C = cos(y1 - y2) s1 = sin(y1) s2 = sin(y2) Equation y3' = g*[m2 * C * s2 - M * s1] - S*m2*[L1 * y3^2 * C + L2*y4^2] ------------------------------------------------------------- L1*[M - m2*C^2] """ y1, y2, y3, y4 = vector M, S, C, s1, s2 = abbreviate(m1, m2, y1, y2) # Split up the equations, for almost clarity num_a = g*(m2*C*s2-M*s1) num_b = S*m2*(L1*C*y3**2 + L2*y4**2) den = L1*(M - m2*C**2) return (num_a - num_b)/den
b93086cfcbb9d5f32143279ad01972d3f8719a78
3,638,457
from typing import Any def getType(resp: falcon.Response, class_type: str, method: str) -> Any: """Return the @type of object allowed for POST/PUT.""" for supportedOp in get_doc(resp).parsed_classes[class_type]["class"].supportedOperation: if supportedOp.method == method: return supportedOp.expects.replace("vocab:", "") # NOTE: Don't use split, if there are more than one substrings with 'vocab:' not everything will be returned.
d20b77b4f40d266e685ce87f67d8f2fcbcfbe3eb
3,638,458
def full_data_numeric(): """DataFrame with numeric data """ data_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8], 'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9], 'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] } df = pd.DataFrame(data_dict) return df
ebd105f2648475dc7dcd40f51482d18e29486254
3,638,459
def fix(x): """ Replaces spaces with tabs, removes spurious newlines, and lstrip()s each line. Makes it really easy to create BED files on the fly for testing and checking. """ s = "" for i in x.splitlines(): i = i.lstrip() if i.endswith('\t'): add_tab = '\t' else: add_tab = '' if len(i) == 0: continue i = i.split() i = '\t'.join(i) + add_tab + '\n' s += i return s
ecd3a4d7f470feae1b697025c8fbf264d5c6b149
3,638,460
def get_collect_method(collect_method_name): """Return the collect method.""" try: collect_method = CollectMethod.get(name=collect_method_name) except ValueError: raise RuntimeError(f'Collect Method {collect_method_name} not found!') return collect_method
b80fcb916d461deea1784386062017291292f218
3,638,461
def TriangleBackwardSub(U,b): """C = TriangleBackwardSub(U,b) Solve linear system UC = b """ C = solve(U,b) return C
95c7fb76ad02a5546a79b95f18b51fe385307329
3,638,462
from unittest.mock import patch def test_binance_query_balances_unknown_asset(function_scope_binance): """Test that if a binance balance query returns unknown asset no exception is raised and a warning is generated. Same for unsupported asset.""" binance = function_scope_binance def mock_unknown_asset_return(url): # pylint: disable=unused-argument return MockResponse(200, BINANCE_BALANCES_RESPONSE) with patch.object(binance.session, 'get', side_effect=mock_unknown_asset_return): # Test that after querying the assets only ETH and BTC are there balances, msg = binance.query_balances() assert msg == '' assert len(balances) == 2 assert balances[A_BTC]['amount'] == FVal('4723846.89208129') assert balances[A_ETH]['amount'] == FVal('4763368.68006011') warnings = binance.msg_aggregator.consume_warnings() assert len(warnings) == 2 assert 'unknown binance asset IDONTEXIST' in warnings[0] assert 'unsupported binance asset ETF' in warnings[1]
7521fd3039398c3eedccb16e16202687b4c28b2d
3,638,463
def petsc_to_stencil(x, Xh): """ converts a numpy array to StencilVector or BlockVector format""" x = x.array u = array_to_stencil(x, Xh) return u
6df02bbbfb9e9e386ca03510f2e4d563a6fed1aa
3,638,464
from typing import Optional import contextlib def index_internal_txs_task(self) -> Optional[int]: """ Find and process internal txs for monitored addresses :return: Number of addresses processed """ with contextlib.suppress(LockError): with only_one_running_task(self): logger.info("Start indexing of internal txs") number_traces = InternalTxIndexerProvider().start() logger.info("Find internal txs task processed %d traces", number_traces) if number_traces: logger.info("Calling task to process decoded traces") process_decoded_internal_txs_task.delay() return number_traces
b1a40ec713ff8d302f5c47b2c5d41300c699f3b4
3,638,465
import math def make_lagrangian(func, equality_constraints): """Make a Lagrangian function from an objective function `func` and `equality_constraints` Args: func (callable): Unary callable with signature `f(x, *args, **kwargs)` equality_constraints (callable): Unary callable with signature `h(x, *args, **kwargs)` Returns: tuple: Triple of callables (init_multipliers, lagrangian, get_params) """ def init_multipliers(params, *args, **kwargs): h = jax.eval_shape(equality_constraints, params, *args, **kwargs) multipliers = tree_util.tree_map(lambda x: np.zeros(x.shape, x.dtype), h) return params, multipliers def lagrangian(params, multipliers, *args, **kwargs): h = equality_constraints(params, *args, **kwargs) return -func(params, *args, **kwargs) + math.pytree_dot(multipliers, h) def get_params(opt_state): return opt_state[0] return init_multipliers, lagrangian, get_params
c5795cded21e9cc4a7092eee63b88a4fac3b346a
3,638,466
def ungroup(expr): """Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. """ return TokenConverter(expr).addParseAction(lambda t: t[0])
c007a51e5073d8a3cbcbe52ca32ad84d58f4100a
3,638,468
def test_qnn_legalize(): """Test directly replacing an operator with a new one""" def before(): x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8') y = relay.qnn.op.requantize(x, input_scale=1, input_zero_point=0, output_scale=1, output_zero_point=0, out_dtype='int8') y = relay.Function([x], y) return y @register_qnn_legalize("qnn.requantize", level=100) def legalize_qnn_requantize(attrs, inputs, types): data = inputs[0] data = relay.add(relay.const(0, 'int8'), data) y = relay.qnn.op.requantize(data, input_scale=1, input_zero_point=0, output_scale=1, output_zero_point=0, out_dtype='int8') return y def expected(): x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8') y = relay.add(relay.const(0, 'int8'), x) z = relay.qnn.op.requantize(y, input_scale=1, input_zero_point=0, output_scale=1, output_zero_point=0, out_dtype='int8') z = relay.Function([x], z) return z a = before() # Check that Relay Legalize does not change the graph. a = run_opt_pass(a, relay.transform.Legalize()) b = run_opt_pass(before(), transform.InferType()) assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) # Check that QNN Legalize modifies the graph. a = run_opt_pass(a, relay.qnn.transform.Legalize()) b = run_opt_pass(expected(), transform.InferType()) assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
b6f4a930e5c7156e60a5b26583b6e8fc48a6f441
3,638,471
import yaml def load(data, schema, yamlLoader=yaml.UnsafeLoader): """ Loads the given data and validates it according to the schema provided. Data must be either JSON or YAML, it must be a dictionary, a path, or a string of JSON. Schema must be JSON, it must be a dictionary, a path, or a string of JSON. """ if isJson(data): return loadAndValidateJson(data, schema) return loadAndValidateYaml(data, schema, yamlLoader=yamlLoader)
e7f29e1b61e60ce1cac5b1b1217f1df645691c17
3,638,472
from typing import List def calculate_slice_rotations(im_stack: np.ndarray, max_rotation:float = 45) -> List[float]: """Calculate the rotation angle to align each slice so the objects long axis is aligned with the horizontal axis. Parameters ---------- im_stack : np.ndarray A stack of images. The images should be binary or label iamges. The regions are found and processed with the scikit-image label and regionprops functions. The stack should have shape (z, y, x) for z images with shape (y, x). max_rotation : float The maximum allowed rotation between slices in degrees. If this value is exceeded, it is assumed that the opposite rotation was found and 180 is added to the rotation. The default value is 45. Returns ------- rotations : List[float] The rotation for each slice in degrees. """ # get the rotations of the images rotations = [] rotations_raw = [] prev_rot = 0 previous_values = [] for i, im in enumerate(im_stack): previous_values.append(prev_rot) rp = regionprops(im.astype(int)) if len(rp) > 0: orientation = rp[0].orientation angle_in_degrees = orientation * (180 / np.pi) + 90 else: angle_in_degrees = 0 rotations_raw.append(angle_in_degrees) if i > 0: # check if we should flip the rotation if abs(prev_rot - angle_in_degrees) > max_rotation: angle_in_degrees = -1 * (180 - angle_in_degrees) prev_rot = angle_in_degrees rotations.append(angle_in_degrees) return rotations
42c0fdbdf02e937f449cb3ca137588003c715651
3,638,473
def calc_rest_interval(data): """ SubTool for Investigate: after median_deviation filters through all the points run entropy on the remaining non_rest points. This will filter the close but could still be rest points. """ lst, rest = median_deviation(data) average = median(data) st_entropy = entropy(lst) maximum = 0.0 array = lst result = [] for instance in lst: temp_array = list(array) temp_array.remove(instance) temp_array.insert(0, average) ne_entropy = entropy(temp_array) if ne_entropy < (st_entropy - st_entropy/5): rest.append(instance) else: result.append(instance) return result, average, rest
7710e0784a5a025d99c8ead9799b1062942e3cdc
3,638,474
def get_objanno(fin_anno, godag, namespace='all'): """Get annotation object""" fin_full = get_anno_fullname(fin_anno) return get_objanno_factory(fin_full, godag=godag, namespace=namespace)
5e071190596ab37943d4001b4f03cf20d6395e06
3,638,475
def create_table_descriptives(datasets): """Merge dataset descriptives.""" df = pd.concat( [pd.read_json(ds, orient="index") for ds in datasets], axis=0 ) df.index.name = "dataset_name" return df
7c4554381ffb14572d949c27035411567d69e25d
3,638,476
def get_ngram_universe(sequence, n): """ Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe. Example -------- >>> sequence = [2,1,1,4,2,2,3,4,2,1,1] >>> ps.get_ngram_universe(sequence, 3) 64 """ # if recurrance is possible, the universe is given by k^t (SSA pg 68) k = len(set(sequence)) if k > 10 and n > 10: return 'really big' return k**n
3dbfe1822fdefb3e683b3f2b36926b4bb066468f
3,638,477
from typing import Union from typing import Iterable def as_nested_dict( obj: Union[DictLike, Iterable[DictLike]], dct_class: type = DotDict ) -> Union[DictLike, Iterable[DictLike]]: """ Given a obj formatted as a dictionary, transforms it (and any nested dictionaries) into the provided dct_class Args: - obj (Any): An object that is formatted as a `dict` - dct_class (type): the `dict` class to use (defaults to DotDict) Returns: - A `dict_class` representation of the object passed in ``` """ if isinstance(obj, (list, tuple, set)): return type(obj)([as_nested_dict(d, dct_class) for d in obj]) # calling as_nested_dict on `Box` objects pulls out their "private" keys due to our recursion # into `__dict__` if it exists. We can special-case Box and just convert it to dict this way, # which automatically handles recursion. elif isinstance(obj, Box): return dict(obj) elif isinstance(obj, (dict, DotDict)): # DotDicts could have keys that shadow `update` and `items`, so we # take care to avoid accessing those keys here return dct_class( { k: as_nested_dict(v, dct_class) for k, v in getattr(obj, "__dict__", obj).items() } ) return obj
a89261253174ce5b75d61343f0b45d3fe65e12f9
3,638,478
def twoindices_positive_up_to(n, m): """ build 2D integer indices up to n (each scanned from 0 to n) """ if not isinstance(n, int) or n <= 0: raise ValueError("%s is not a positive integer" % str(n)) nbpos_n = n + 1 nbpos_m = m + 1 gripos = np.mgrid[: n : nbpos_n * 1j, : m : nbpos_m * 1j] indices_pos = np.reshape(gripos.T, (nbpos_n * nbpos_m, 2)) return indices_pos
63f850703f7598f1a4611c13700aa1921d77dd1a
3,638,479
def ban_user(request, user): """Bans a given user.""" user = User.query.filter_by(username=user).first() if user is None: raise NotFound() next = request.next_url or url_for('admin.bans') if user.is_banned: request.flash(_(u'The user is already banned.')) return redirect(next) if user == request.user: request.flash(_(u'You cannot ban yourself.'), error=True) return redirect(next) admin_utils.ban_user(user) request.flash(_(u'The user “%s” was successfully banned and notified.') % user.username) return redirect(next)
dd8c2a43a3843a6055e9e690d8cffee8cfac2b0e
3,638,481
def lastDate(): """[summary] lastDate() function: return the total revenue of the nearest day Returns: [type]: [description] """ lastDate = totalDate().tail(1) last_date = lastDate.iloc[0]['total'].round(2) return last_date
93130bf39dc2a82fa2cae11a6ea11468211f61b6
3,638,482
import json def multitask_result(request): """多任务结果""" task_id = request.GET.get('task_id') task_obj = models.Task.objects.get(id=task_id) results = list(task_obj.tasklog_set.values('id','status', 'host_user_bind__host__hostname', 'host_user_bind__host__ip_addr', 'result' )) return HttpResponse(json.dumps(results))
c9c37fe4852a8c04662a5061445c1565400e94a1
3,638,484
from typing import Dict def process_xpath_list(node, property_manifest: Dict): """ Return a list of values as a result of running a list of XPath expressions against an input node :param node: Input node :param property_manifest: Manifest snippet of the property :return: List of values """ def complement_xpath(current_node, path): """ Return current node if XPath value is "." else process XPath normally """ if path == ".": return [current_node] else: return current_node.xpath(path) if node: return [ process_property_value(child_node, property_manifest) for path in property_manifest["xpath"] for child_node in complement_xpath(node, path) ] return []
e52ef3a7ff6b2f74554a69a5fec53125c077f6e5
3,638,485
def collect_username_and_password(db: Session) -> UserCreate: """Collect username and password information and validate""" username = get_username("Enter your username: ") password = get_password("Enter your password: ") verify_pass = get_password("Enter your password again: ") if password != verify_pass: raise Exception("Passwords do not match.") user_data = UserCreate(username=username, password=password) user = FidesopsUser.get_by(db, field="username", value=user_data.username) if user: raise Exception(f"User with username '{username}' already exists.") return user_data
be1557a4aa24cfb653c5e03f7f3cb340be1a6c1b
3,638,486
def replace_header(input_df): """replace headers of the dataframe with first row of sheet""" new_header = input_df.iloc[0] input_df = input_df[1:] input_df.columns=new_header return input_df
c8946fc269dd313b80df421af8d0b3fc6c47aed7
3,638,487
def cartToRadiusSq(cartX, cartY): """Convert Cartesian coordinates into their corresponding radius squared.""" return cartX**2 + cartY**2
3fb79d2c056f06c2fbf3efc14e08a36421782dbd
3,638,488
def unique_entity_id(entity): """ :param entity: django model :return: unique token combining the model type and id for use in HTML """ return "%s-%s" % (type(entity).__name__, entity.id)
c58daf9a115c9840707ff5e807efadad36a86ce8
3,638,489
def normalize_tuple(value, n, name): """Transforms a single int or iterable of ints into an int tuple. # Arguments value: The value to validate and convert. Could be an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. `strides` or `kernel_size`. This is only used to format error messages. # Returns A tuple of n integers. # Raises ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise ValueError('The `{}` argument must be a tuple of {} ' 'integers. Received: {}'.format(name, n, value)) if len(value_tuple) != n: raise ValueError('The `{}` argument must be a tuple of {} ' 'integers. Received: {}'.format(name, n, value)) for single_value in value_tuple: try: int(single_value) except ValueError: raise ValueError('The `{}` argument must be a tuple of {} ' 'integers. Received: {} including element {} ' 'of type {}'.format(name, n, value, single_value, type(single_value))) return value_tuple
cf396bac48b720686bb65ae7ab91b2e4cb22ac0e
3,638,490
def load_user(user_id): """ @login_manager.user_loader Passes in a user_id to this function and in return the function queries the database and gets a user's id as a response... """ return User.query.get(int(user_id))
2c2a2e7f6f9a5bc7392056bfd16402c9d2e96c22
3,638,491
def replaceall(table, a, b): """ Convenience function to replace all instances of `a` with `b` under all fields. See also :func:`convertall`. .. versionadded:: 0.5 """ return convertall(table, {a: b})
19d6c0fb60c71994de02deafb5ec9c2995aba622
3,638,492
def get_job_metadata(ibs, jobid): """ Web call that returns the metadata of a job CommandLine: # Run Everything together python -m wbia.web.job_engine --exec-get_job_metadata # Start job queue in its own process python -m wbia.web.job_engine job_engine_tester --bg # Start web server in its own process ./main.py --web --fg pass # Run foreground process python -m wbia.web.job_engine --exec-get_job_metadata:0 --fg Example: >>> # xdoctest: +REQUIRES(--web-tests) >>> # xdoctest: +REQUIRES(--slow) >>> # xdoctest: +REQUIRES(--job-engine-tests) >>> # xdoctest: +REQUIRES(--web-tests) >>> from wbia.web.job_engine import * # NOQA >>> import wbia >>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88') ... # Test get metadata of a job id that does not exist ... response = web_ibs.send_wbia_request('/api/engine/job/metadata/', jobid='badjob') """ status = ibs.job_manager.jobiface.get_job_metadata(jobid) return status
24ba96d6a71f105057a9fc9012de9edb187787d5
3,638,493
import math def create_learning_rate_scheduler(max_learn_rate, end_learn_rate, warmup_proportion, n_epochs): """Learning rate scheduler, that increases linearly within warmup epochs then exponentially decreases to end_learn_rate. Args: max_learn_rate: Float. Maximum learning rate. end_learn_rate: Float. Scheduler converges to this value. warmup_proportion: Float. How many epochs to increase linearly, before decaying. n_epochs: Float. Maximum number of epochs training will run. Returns: Keras learning rate scheduler """ def lr_scheduler(epoch): warmup_epoch_count = int(warmup_proportion * n_epochs) if epoch < warmup_epoch_count: res = (max_learn_rate / warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate * math.exp( math.log(end_learn_rate / max_learn_rate) * (epoch - warmup_epoch_count + 1) / (n_epochs - warmup_epoch_count + 1) ) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler
5c5649e429ad5f138894d30064c24bf23e547f85
3,638,494
def matchyness(section, option): """Assign numerical 'matchyness' value between target and value Parameters: section -- target value option -- proposed match """ if section != option: return _hc.NEQ if isinstance(section, rt.flask_placeholder): if isinstance(option, rt.flask_placeholder): return _hc.PP #Placeholder - placeholder else: return _hc.PS #Placeholder - string else: if option.value == section.value: return _hc.SS #String - string elif isinstance(option, rt.flask_placeholder): return _hc.SP #String - placeholder else: return _hc.NEQ
c8e3773a8afe190181fd7552460852a27b2534d3
3,638,495
def log_sum_exp_elem(*a): """ :param a: elements :return: (a[0].exp() + a[1].exp() + ...).log() """ bias = max(a).detach() ans = bias + sum([(ai-bias).exp() for ai in a]).log() return ans
a87871a7c8af9d2c6c8db683ba63124319d09a0d
3,638,496
def car_portrayal(agent): """Visualises the cars for the Mesa webserver :return: Dictionary containing the settings of an agent""" if agent is None: return portrayal = {} # update portrayal characteristics for each CarAgent object if isinstance(agent, CarAgent): if agent.is_from_traffic_light: portrayal["Shape"] = "rect" portrayal["w"], portrayal["h"] = .7, .7 else: portrayal["Shape"] = "circle" portrayal["r"] = .9 portrayal["Layer"] = 0 portrayal["Filled"] = "true" # change the agents color to its velocity portrayal["Color"] = colour_spectrum[agent.velocity - 1] return portrayal
68c0bffb02299f2b03abf6ee2dc590375ad8e2a5
3,638,497
import re from typing import OrderedDict def _load_spc_format_type_a(filepath: str): """load A(w,k) in the spc format type a Args: filepath (str): output filename Returns: np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray: kcrt, Awk, kdist, energy, kpath """ with open(filepath) as f: lines = f.readlines() lines2 = [] for line in lines: lines2.append(line.strip()) lines = lines2 lines_iter = iter(lines) line = next(lines_iter) s = line.strip().split() internal_format_type = s[-1] if internal_format_type == "(a)": line = next(lines_iter) if False: s = line.strip().split() emin = float(s[1]) emax = float(s[2]) ne = int(s[3]) nhighsymkp = int(s[4]) line = next(lines_iter) line = line.strip() kstr_list = re.split(" +", line) kpath = OrderedDict() kcrt = [] for kstr in kstr_list[1:]: s = kstr.split() idx = int(s[0]) idx -= 1 # index convert from fortran to Python name = " ".join(s[1:]).replace("'", "") kpath[idx] = name kcrt.append(idx) kcrt = np.array(kcrt) elif internal_format_type == "(a1)": # short format without kpoints line = next(lines_iter) line = line.strip() kstr_list = line.split() kpath = OrderedDict() kcrt = [] kpoints = None for kstr in kstr_list[2:]: s = kstr.split() idx = int(s[0]) idx -= 1 # index convert from fortran to Python name = None kpath[idx] = name kcrt.append(idx) kcrt = np.array(kcrt) while True: line = next(lines_iter) if line.startswith("### end of header"): break line = next(lines_iter) Awk, kdist, energy = _load_spc_format_type_a_Awk(lines_iter) return kcrt, Awk, kdist, energy, kpath
0a5c3f316875495e37502dd426e6eee7dd76ee53
3,638,498
def variable_to_json(var): """Converts a Variable object to dict/json struct""" o = {} o['x'] = var.x o['y'] = var.y o['name'] = var.name return o
86497a7915e4825e6e2cbcfb110c9bc4c229efed
3,638,499
import math def getDewPoint(temp, humidity): """ A utility function to get the temperature to which an amount of air must be cooled in order for water vapor to condense into water. This is only valid for: 1) temperatures between 0C and 60C, 2) relative humidity between 1% and 100%, and 3) dew points between 0C and 50C. @param temp: temperature in degrees Celsius @param humidity: percentage relative humidity """ if not 0 < temp < 60: raise InvalidDewPoint("Temperature out of range.") if not 1 < humidity < 100: raise InvalidDewPoint("Humidity is out of range.") a = 17.271 b = 237.7 def gamma(temp, humidity): return (a * temp) / (b + temp) + math.log(humidity/100.0) dewPoint = (b * gamma(temp, humidity)) / (a - gamma(temp, humidity)) if dewPoint < 0: raise InvalidDewPoint("Computed dew point is too low.") if dewPoint > 50: raise InvalidDewPoint("Computed dew point is too high.") return dewPoint
0e67eef5a90d9e55f85906d57e6c2eb347044897
3,638,502
import json def get_result_handler(rc_value, sa_file=None): """Returns dict of result handler config. Backwards compatible for JSON input. rc_value (str): Result config argument specified. sa_file (str): SA path argument specified. """ try: result_handler = json.loads(rc_value) except json.decoder.JSONDecodeError: config = rc_value.split(".", 1) if len(config) == 2: result_handler = { "type": "BigQuery", "project_id": config[0], "table_id": config[1], } else: raise ValueError(f"Unable to parse result handler config: `{rc_value}`") if sa_file: result_handler["google_service_account_key_path"] = sa_file return result_handler
83c6aa6e0cacdc64422553050072af5d8ea46bf6
3,638,503
def speedup_experiment_ts(args, model_iter_fn, model, example_inputs): """ Measure baseline performance (without using TorchDynamo) of TorchScript and optimize_for_inference. Writes to ./baseline_ts.csv """ return baselines( [ ("eager", model), ("ts", try_script(model, example_inputs)), ( "ofi", backends.ofi(try_script(model, example_inputs), example_inputs), ), # ("nnc", backends.nnc(try_script(model, example_inputs), example_inputs)), # ("nvfuser", backends.nvfuser(try_script(model, example_inputs), example_inputs)), ], model_iter_fn, example_inputs, args, )
0936d5e24759ae5e04027f8e68e467caa24d5ccb
3,638,504
from typing import Tuple def my_polyhedron_to_label( rays: Rays_Base, dists: ArrayLike, points: ArrayLike, shape: Tuple[int, ...] ) -> npt.NDArray[np.int_]: """Convenience funtion to pass 1-d arrays to polyhedron_to_label.""" return polyhedron_to_label( # type: ignore [no-any-return] np.expand_dims( # type: ignore [no-untyped-call] np.clip(dists, 1e-3, None), axis=0 ), np.expand_dims(points, axis=0), # type: ignore [no-untyped-call] rays, shape, verbose=False, )
f967a963fcb47c964895da182a48568a2a8a8ee2
3,638,505
from typing import Optional def get_incident_comment(incident_comment_id: Optional[str] = None, incident_id: Optional[str] = None, operational_insights_resource_provider: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIncidentCommentResult: """ Represents an incident comment API Version: 2019-01-01-preview. :param str incident_comment_id: Incident comment ID :param str incident_id: Incident ID :param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights. :param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param str workspace_name: The name of the workspace. """ __args__ = dict() __args__['incidentCommentId'] = incident_comment_id __args__['incidentId'] = incident_id __args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:securityinsights:getIncidentComment', __args__, opts=opts, typ=GetIncidentCommentResult).value return AwaitableGetIncidentCommentResult( author=__ret__.author, created_time_utc=__ret__.created_time_utc, etag=__ret__.etag, id=__ret__.id, last_modified_time_utc=__ret__.last_modified_time_utc, message=__ret__.message, name=__ret__.name, type=__ret__.type)
c0fa6ec1bb7bcccc379455454296bc6a5814946f
3,638,507
def getOrElseUpdate(dictionary, key, opr): """If given key is already in the dictionary, returns associated value. Otherwise compute the value with opr, update the dictionary and return it. None dictionary are ignored. >>> d = dict() >>> getOrElseUpdate(d, 1, lambda _: _ + 1) 2 >>> print(d) {1: 2} @type dictionary: dictionary of A => B @param dictionary: the dictionary @type key: A @param key: the key @type opr: function of A => B @param opr: the function to compute new value from keys @rtype: B @return: the value associated with the key """ if dictionary is None: return opr(key) else: if key not in dictionary: dictionary[key] = opr(key) return dictionary[key]
95454d7ca34d6ae243fda4e70338cf3d7584b827
3,638,508
from operator import add from operator import mul def gs_norm(f, g, q): """ Compute the squared Gram-Schmidt norm of the NTRU matrix generated by f, g. This matrix is [[g, - f], [G, - F]]. This algorithm is equivalent to line 9 of algorithm 5 (NTRUGen). """ sqnorm_fg = sqnorm([f, g]) ffgg = add(mul(f, adj(f)), mul(g, adj(g))) Ft = div(adj(g), ffgg) Gt = div(adj(f), ffgg) sqnorm_FG = (q ** 2) * sqnorm([Ft, Gt]) return max(sqnorm_fg, sqnorm_FG)
da30e1bac41cba3a6c051ba0159234aac5e6e3cc
3,638,509
from phaser import substructure def find_anomalous_scatterers(*args, **kwds): """ Wrapper for corresponding method in phaser.substructure, if phaser is available and configured. """ if (not libtbx.env.has_module("phaser")): if "log" in kwds: print("Phaser not available", file=kwds["log"]) return None return substructure.find_anomalous_scatterers(*args, **kwds)
0c88f0df336802fa798ac26966485b28105a6238
3,638,510
def OpChr(ea, n): """ @param ea: linear address @param n: number of operand - 0 - the first operand - 1 - the second, third and all other operands - -1 - all operands """ return idaapi.op_chr(ea, n)
39c2716ed7344fccd85edda2d27b7a7f305cb14b
3,638,511
def check_access(func): """ Check whether user is in policy owners group """ def inner(*args, **kwargs): keycloak = get_keycloak() if 'policy_id' in kwargs: current_user = kwargs['user'] group_name = f'policy-{kwargs["policy_id"]}-owners' group_list = keycloak.user_group_list(current_user) groups = {group['name']: group for group in group_list} if group_name in groups.keys(): # User has access to delete/edit policy return func(*args, **kwargs) else: # User does not have access to delete/edit policy return problem(403, 'Forbidden', 'You do not own this policy') else: return func(*args, **kwargs) return inner
6655af97f11ae04587904f1aaf2a2225ace5b64d
3,638,512
def score_ranking(score_dict): """ 用pandas实现分组排序 :param score_dict: dict {'591_sum_test_0601': 13.1, '591_b_tpg7': 13.1, '591_tdw_ltpg6': 14.14} :return: DataFrame pd.DataFrame([['591_sum_test_0601', 13.10, 2.0, 0.6667], ['591_b_tpg7', 13.10, 2.0, 0.6667], ['591_tdw_ltpg6', 14.14, 3.0, 1.0]], columns=['dataset_id', 'score', 'ranking', 'ranking_perct']) """ sorted_list = sorted(score_dict.items(), key=lambda item: item[1]) dataset_id_list = [] score_list = [] for each_dataset in sorted_list: dataset_id_list.append(each_dataset[0]) score_list.append(each_dataset[1]) score_dict = {"dataset_id": dataset_id_list, "score": score_list} df = pd.DataFrame(data=score_dict) df["ranking"] = df["score"].rank(method="max") df["ranking_perct"] = (df["ranking"]) / len(df) return df
d799576afe382c13124c703351b69b8bcb7393b2
3,638,513
def dock_widget(widget, label="DockWindow", area="right", floating=False): """Dock the given widget properly for both M2016 and 2017+.""" # convert widget to Qt if needed if not issubclass(widget.__class__, QObject): widget = utils.to_qwidget(widget) # make sure our widget has a name name = widget.objectName() if not name: name, num = label + "_mainWindow", 1 while cmds.control(name, exists=True): name = label + "_mainWindow" + str(num) num += 1 widget.setObjectName(label + "_mainWindow") # if `floating` is True, return with `widget.show()` if floating is True: if not widget.windowTitle(): widget.setWindowTitle(label) widget.show() return widget # make sure the workspaceControl doesn't exist yet control = name + "_WorkspaceControl" if cmds.control(control, exists=True): cmds.deleteUI(control) # create workspaceControl (only works with Maya 2017+) flags = {"dockToControl": ["ToolBox", "right"]} if area == "right": # If the ChannelBox is not visible, fallback on the AttributeEditor. _control = "ChannelBoxLayerEditor" if not cmds.workspaceControl(_control, query=True, visible=True): _control = "AttributeEditor" flags = {"tabToControl": [_control, -1]} control = cmds.workspaceControl(control) cmds.workspaceControl(control, edit=True, label=label, r=True, **flags) # Convert workspace to Qt and add the widget into its layout. workspace = utils.to_qwidget(control) layout = workspace.layout() layout.addWidget(widget) return widget
80ef6bde493585e0010a497dfb179600aae04e9e
3,638,514
def compute_benjamin_feir_index(bandwidth, steepness, water_depth, peak_wavenumber): """Compute Benjamin-Feir index (BFI) from bandwidth and steepness estimates. Reference: Serio, Marina, et al. “On the Computation of the Benjamin-Feir Index.” Nuovo Cimento Della Societa Italiana Di Fisica C, vol. 28, Nov. 2005, pp. 893–903. ResearchGate, doi:10.1393/ncc/i2005-10134-1. """ kd = peak_wavenumber * water_depth # side-step numerical issues if kd > 100: nu = alpha = beta = 1 else: nu = 1 + 2 * kd / np.sinh(2 * kd) alpha = -nu ** 2 + 2 + 8 * kd ** 2 * \ np.cosh(2 * kd) / np.sinh(2 * kd) ** 2 beta = ( (np.cosh(4 * kd) + 8 - 2 * np.tanh(kd) ** 2) / (8 * np.sinh(kd) ** 4) - (2 * np.cosh(kd) ** 2 + 0.5 * nu) ** 2 / (np.sinh(2 * kd) ** 2 * (kd / np.tanh(kd) - (nu / 2) ** 2)) ) return steepness / bandwidth * nu * np.sqrt(np.maximum(beta / alpha, 0))
2b3ef715a85a6dab837a36f86c3eeeaed05f8345
3,638,515
def plaintext_property_map(name: str) -> Mapper: """ Arguments --------- name : str Name of the property. Returns ------- Mapper Property map. See Also -------- property_map """ return property_map( name, python_to_api=plaintext_to_notion, api_to_python=notion_to_plaintext, markdown=False, )
9b909de0eba2d8f55375896bb2acbbb53c6d759f
3,638,517
def pooling_layer(net_input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)): """ TensorFlow pooling layer :param net_input: Input tensor :param ksize: kernel size of pooling :param strides: stride of pooling :return: Tensor after pooling """ return tf.nn.max_pool(net_input, ksize=ksize, strides=strides, padding='VALID')
4de6b7bdb5860cfa235975f799204522e77b9299
3,638,518
from typing import OrderedDict def set_standard_attrs(da): """ Add standard attributed to xarray DataArray""" da.coords["lat"].attrs = OrderedDict( [ ("standard_name", "latitude"), ("units", "degrees_north"), ("axis", "Y"), ("long_name", "latitude"), ("out_name", "lat"), ("stored_direction", "increasing"), ("type", "double"), ("valid_max", "90.0"), ("valid_min", "-90.0"), ] ) da.coords["lon"].attrs = OrderedDict( [ ("standard_name", "longitude"), ("units", "degrees_east"), ("axis", "X"), ("long_name", "longitude"), ("out_name", "lon"), ("stored_direction", "increasing"), ("type", "double"), ("valid_max", "180.0"), ("valid_min", "-180.0"), ] ) da.coords["depth_coord"].attrs = OrderedDict( [ ("standard_name", "depth"), ("units", "m"), ("axis", "Z"), ("long_name", "ocean depth coordinate"), ("out_name", "lev"), ("positive", "down"), ("stored_direction", "increasing"), ("valid_max", "12000.0"), ("valid_min", "0.0"), ] ) da.coords["time"].attrs = OrderedDict( [ ("standard_name", "time"), ("axis", "T"), ("long_name", "time"), ("out_name", "time"), ("stored_direction", "increasing"), ] ) da.coords["time"].encoding["units"] = "days since '1900-01-01'" return da
21f83552466127928c9a30e9354e91c3031225aa
3,638,519
def isnotebook(): """ Utility function to detect if the code being run is within a jupyter notebook. Useful to change progress indicators for example. Returns ------- isnotebook : bool True if the function is being called inside a notebook, False otherwise. """ try: shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell": return True # Jupyter notebook or qtconsole elif shell == "TerminalInteractiveShell": return False # Terminal running IPython else: return False # Other type (?) except NameError: return False
71e0a77c4bbf3afe16723b01ee5a8d08cf3b98a3
3,638,521
from typing import Optional from typing import Tuple from typing import List from typing import Dict def get_poagraph(dagmaf: DAGMaf.DAGMaf, fasta_provider: missings.FastaProvider, metadata: Optional[msa.MetadataCSV]) -> \ Tuple[List[graph.Node], Dict[msa.SequenceID, graph.Sequence]]: """Gets poagraph from given dagmaf using fasta_provider and metadata. Args: dagmaf: DagMaf that will be converted to Poagraph. fasta_provider: Provider of symbols missing in DagMaf. metadata: MetadataCSV. Returns: Tuple of poagraph elements. """ sequences_in_dagmaf = _get_sequences_ids(dagmaf) build_state = _BuildState(initial_nodes=[], initial_sequences=_init_sequences(sequences_in_dagmaf, metadata), initial_edges=_init_free_edges(sequences_in_dagmaf), seqs_info=_get_seqs_info(dagmaf, sequences_in_dagmaf), initial_column_id=graph.ColumnID(-1), fasta_provider=fasta_provider) _complement_starting_nodes(build_state) for i, mafnode in enumerate(dagmaf.dagmaf_nodes): _process_block(build_state, mafnode) return build_state.nodes, build_state.sequences
cdc62d444cd22a8ff4c1b99382ffcc35a0ab33a6
3,638,522
def const_bool(value): """Create an expression representing the given boolean value. If value is not a boolean, it is converted to a boolean. So, for instance, const_bool(1) is equivalent to const_bool(True). """ return ['constant', 'bool', ['{0}'.format(1 if value else 0)]]
d11d01f94b8ad20d393a39a28dbfd18cc8fa217e
3,638,523
import struct def long_to_bytes(n, blocksize=0): """Convert an integer to a byte string. In Python 3.2+, use the native method instead:: >>> n.to_bytes(blocksize, 'big') For instance:: >>> n = 80 >>> n.to_bytes(2, 'big') b'\x00P' If the optional :data:`blocksize` is provided and greater than zero, the byte string is padded with binary zeros (on the front) so that the total length of the output is a multiple of blocksize. If :data:`blocksize` is zero or not provided, the byte string will be of minimal length. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) pack = struct.pack while n > 0: s = pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s
1157a466ce9754c12e01f7512e879cc28a2a4b23
3,638,524
def peek(library, session, address, width): """Read an 8, 16 or 32-bit value from the specified address. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :param width: Number of bits to read. :return: Data read from bus. :rtype: bytes """ if width == 8: return peek_8(library, session, address) elif width == 16: return peek_16(library, session, address) elif width == 32: return peek_32(library, session, address) elif width == 64: return peek_64(library, session, address) raise ValueError('%s is not a valid size. Valid values are 8, 16, 32 or 64' % width)
6203a516f5a67daa67ec0f37c0e3a8818515f2de
3,638,526
def mtf_from_psf(psf, dx=None): """Compute the MTF from a given PSF. Parameters ---------- psf : `prysm.RichData` or `numpy.ndarray` object with data property having 2D data containing the psf, or the array itself dx : `float` sample spacing of the data Returns ------- RichData container holding the MTF, ready for plotting or slicing. """ data, df = transform_psf(psf, dx) cy, cx = (int(np.ceil(s / 2)) for s in data.shape) dat = abs(data) dat /= dat[cy, cx] return RichData(data=dat, dx=df, wavelength=None)
fb009d3068c67447d2f10c3448e91b258a0d7ca3
3,638,528
def check_intersection(vertical_line: Line, other_line: Line) -> bool: """ Check for intersection between two line segments. :param vertical_line: The first line segment. Guaranteed to be vertical. :param other_line: The second line segment. :return: Whether or not they intersect. """ intersection = get_intersection_point(vertical_line, other_line) return not not intersection
7e9279ea5976b99c9edb36ae5c59bcc69d22aa59
3,638,529
from krun.scheduler import ManifestManager from krun.platform import detect_platform def get_session_info(config): """Gets information about the session (for --info) Overwrites any existing manifest file. Separated from print_session_info for ease of testing""" platform = detect_platform(None, config) manifest = ManifestManager(config, platform, new_file=True) return { "n_proc_execs": manifest.total_num_execs, "n_in_proc_iters": manifest.get_total_in_proc_iters(config), "skipped_keys": manifest.skipped_keys, "non_skipped_keys": manifest.non_skipped_keys, }
25729c3838fc7b600600dd74da44a3be9fd7b46d
3,638,530
def rotate(x, y, a): """Rotate vector (x, y) by an angle a.""" return x * np.cos(a) + y * np.sin(a), -x * np.sin(a) + y * np.cos(a)
2858539f3de5c15072657af5f39231f8e7867b6b
3,638,531
def filt_all(list_, func): """Like filter but reverse arguments and returns list""" return [i for i in list_ if func(i)]
72010b483cab3ae95d49b55ca6a70b0838b0a34d
3,638,532
def auth_user_logout(payload, override_authdb_path=None, raiseonfail=False, config=None): """Logs out a user. Deletes the session token from the session store. On the next request (redirect from POST /auth/logout to GET /), the frontend will issue a new one. The frontend MUST unset the cookie as well. Parameters ---------- payload : dict The payload dict should contain the following keys: - session_token: str - user_id: int In addition to these items received from an authnzerver client, the payload must also include the following keys (usually added in by a wrapping function): - reqid: int or str - pii_salt: str override_authdb_path : str or None The SQLAlchemy database URL to use if not using the default auth DB. raiseonfail : bool If True, and something goes wrong, this will raise an Exception instead of returning normally with a failure condition. config : SimpleNamespace object or None An object containing systemwide config variables as attributes. This is useful when the wrapping function needs to pass in some settings directly from environment variables. Returns ------- dict Returns a dict containing the result of the password verification check. """ for key in ('reqid', 'pii_salt'): if key not in payload: LOGGER.error( "Missing %s in payload dict. Can't process this request." % key ) return { 'success': False, 'failure_reason': ( "invalid request: missing '%s' in request" % key ), 'user_id': None, 'messages': ["Invalid user logout request."], } for key in ('session_token', 'user_id'): if key not in payload: LOGGER.error( '[%s] Invalid user logout request, missing %s.' % (payload['reqid'], key) ) return { 'success': False, 'failure_reason': ( "invalid request: missing '%s' in request" % key ), 'messages': ["Invalid user logout request. " "No %s provided." % key], } # check if the session token exists session = auth_session_exists( {'session_token': payload['session_token'], 'reqid': payload['reqid'], 'pii_salt': payload['pii_salt']}, override_authdb_path=override_authdb_path, raiseonfail=raiseonfail) if session['success']: # check the user ID if payload['user_id'] == session['session_info']['user_id']: deleted = auth_session_delete( {'session_token': payload['session_token'], 'reqid': payload['reqid'], 'pii_salt': payload['pii_salt']}, override_authdb_path=override_authdb_path, raiseonfail=raiseonfail ) if deleted['success']: LOGGER.info( "[%s] User logout request successful for " "session_token: %s, user_id: %s. " % (payload['reqid'], pii_hash(payload['session_token'], payload['pii_salt']), pii_hash(payload['user_id'], payload['pii_salt'])) ) return { 'success': True, 'user_id': session['session_info']['user_id'], 'messages': ["Logout successful."] } else: LOGGER.error( "[%s] User logout request failed for " "session_token: %s, user_id: %s. " "Invalid user_id provided for " "corresponding session token." % (payload['reqid'], pii_hash(payload['session_token'], payload['pii_salt']), pii_hash(payload['user_id'], payload['pii_salt'])) ) return { 'success': False, 'failure_reason': ( "delete session failed" ), 'user_id': payload['user_id'], 'messages': ["Logout failed. Invalid " "session_token for user_id."] } else: LOGGER.error( "[%s] User logout request failed for " "session_token: %s, user_id: %s. " "Invalid user_id provided for " "corresponding session token." % (payload['reqid'], pii_hash(payload['session_token'], payload['pii_salt']), pii_hash(payload['user_id'], payload['pii_salt'])) ) return { 'success': False, 'failure_reason': ( "user does not exist" ), 'user_id': payload['user_id'], 'messages': [ "Logout failed. Invalid session_token for user_id." ] } else: LOGGER.error( "[%s] User logout request failed for " "session_token: %s, user_id: %s. " "Invalid user_id provided for " "corresponding session token." % (payload['reqid'], pii_hash(payload['session_token'], payload['pii_salt']), pii_hash(payload['user_id'], payload['pii_salt'])) ) return { 'success': False, 'failure_reason': ( "session does not exist" ), 'user_id': payload['user_id'], 'messages': ["Logout failed. Invalid " "session_token for user_id."] }
1f468a53f82a58f8c5c3f5397d6f026276a93f05
3,638,533
def rx_observer(on_next: NextHandler, on_error: ErrorHandler = default_error, on_completed: CompleteHandler = default_on_completed) -> Observer: """Return an observer. The underlying implementation use an named tuple. Args: on_next (NextHandler): on_next handler which process items on_error (ErrorHandler): on_error handler (default with default_error which raise Exception) on_completed (CompleteHandler): on_completed handler (default with noop) Returns: (Observer): an Observer """ return ObserverDefinition(on_next=on_next, on_error=on_error, on_completed=on_completed)
2ebfd3c6b4e5ed854fdc89e76ac006fddd20ad0b
3,638,534
def _rav_setval_ ( self , value ) : """Assign the valeu for the variable >>> var = ... >>> var.value = 10 """ value = float ( value ) self.setVal ( value ) return self.getVal()
80ad7ddec68d5c97f72ed63dd6ba4a1101de99cb
3,638,535
import scipy def import_matrix_as_anndata(matrix_path, barcodes_path, genes_path): """Import a matrix as an Anndata object. :param matrix_path: path to the matrix ec file :type matrix_path: str :param barcodes_path: path to the barcodes txt file :type barcodes_path: str :param genes_path: path to the genes txt file :type genes_path: str :return: a new Anndata object :rtype: anndata.Anndata """ df_barcodes = pd.read_csv( barcodes_path, index_col=0, header=None, names=['barcode'] ) df_genes = pd.read_csv( genes_path, header=None, index_col=0, names=['gene_id'], sep='\t' ) return anndata.AnnData( X=scipy.io.mmread(matrix_path).tocsr(), obs=df_barcodes, var=df_genes )
83f5ccdaa945f26451ab2834c832e0e1ea58ce89
3,638,536
import torch import tqdm def get_representations(dataset, pretrained_model, alphabet, batch_size=128): """Returns: N x 1280 numpy array""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pretrained_model = pretrained_model.to(device) dataloader = DataLoader( dataset, batch_size=batch_size, shuffle=False, collate_fn=dataset.collate_fn ) batch_converter = alphabet.get_batch_converter() sequence_representations = [] progress_bar = tqdm(dataloader, ascii=True) for i, (tokens, labels, seqs) in enumerate(progress_bar): esm_batch = list(zip(labels, seqs)) batch_labels, batch_strs, batch_tokens = batch_converter(esm_batch) batch_tokens = batch_tokens.to(device) with torch.no_grad(): results = pretrained_model( batch_tokens, repr_layers=[33], return_contacts=True ) token_representations = results["representations"][33] outputs = token_representations[:, 0] # get the <cls> token sequence_representations.append(outputs.cpu().numpy()) return np.vstack(sequence_representations)
7a199156810b787ae7fb8ea059ebe69b6de70250
3,638,537
def rem_hap_cands(): """json endpoint to set a sample or set of sample's haplotype candidate designation to false""" form = flask.request.form samples = form['samples'] return mds.remove_hap_cands(samples)
ca22c2af4b6079f3b03accb3b414d553da75e1e3
3,638,538
def UndistortImage(image,image_size,\ image_rotation=None,image_center=None,\ out_xs=None,out_ys=None,\ direction='fwd',regenerate_grids=True,\ **kwargs): """Remember the recipe for fixin gwyddion image orientation: `image0=image0.T[:,::-1]`""" global grids if out_xs is None: out_xs=default_out_xs if out_ys is None: out_ys=default_out_ys if grids is None or regenerate_grids: s=source_pts[direction]; d=destination_pts[direction] grids=numrec.AffineGridsFromFeaturePoints(d,[s],xs=out_xs,ys=out_ys) in_Xs,in_Ys=getXYGrids(image.shape,image_size,\ rotation=image_rotation,center=image_center) undistorted=numrec.InterpolateImageToAffineGrid(image,grid_pts=grids['grid_pts'][0],\ image_xgrid=in_Xs,image_ygrid=in_Ys, **kwargs) return AWA(undistorted,axes=[out_xs,out_ys])
12cc1e1e428b8a860b0b29a6b81169cb6c1dc73d
3,638,541
def quartic_oscillator(grids, k=1.): """Potential of quantum quartic oscillator. Args: grids: numpy array of grid points for evaluating 1d potential. (num_grids,) k: strength constant for potential. Returns: vp: Potential on grid. (num_grid,) """ vp = 0.5 * k * grids ** 4 return vp
c4a386816cd85e24080d62365d2bcd25b6735d5f
3,638,542
def compute_row_similarities(A): """ Compute pairwise similarities between the rows of a binary sparse matrix. Parameters ---------- A: scipy csr_matrix, shape (rows, cols) Binary matrix. Returns ------- sim: numpy array, shape (rows, rows) Pairwise column similarities. """ # normalize A in row-axis # 1) compute per-row norm norm = np.sqrt(A.sum(axis=1)) # Y is binary: \sum 1^2 = \sum 1 norm = sparse.csr_matrix(norm) # save as sparse # 2) build left-multiplying norm (https://stackoverflow.com/questions/16043299/substitute-for-numpy-broadcasting-using-scipy-sparse-csc-matrix) # summary: sparse arrays don't broadcast and something like # np.where(norm[:, na]==0., 0., A/norm[:, na]) wouldn't work # we need to use the left-multiplying trick to achieve that data = 1. / norm.data indices = np.where(np.diff(norm.indptr) != 0)[0] indptr = norm.indptr rows = A.shape[0] left_norm = sparse.csr_matrix((data, indices, indptr), shape=(rows, rows)) # 3) compute row-wise normalized version of A A_norm = left_norm.dot(A) # compute pairwise row similarities sim = A_norm.dot(A_norm.T) return sim
96ab44ec15f94bf666da248100a98f282119caf1
3,638,543
def sha9(R, S): """Shape functions for a 4-noded quad element Parameters ---------- x : float x coordinate for a point within the element. y : float y coordinate for a point within the element. Returns ------- N : Numpy array Array of interpolation functions. Examples -------- We can check evaluating at two different points, namely (0, 0) and (1, 1). Thus >>> N = sha9(0, 0) >>> N_ex = np.array([ ... [1/4, 0, 1/4, 0, 1/4, 0, 1/4, 0], ... [0, 1/4, 0, 1/4, 0, 1/4, 0, 1/4]]) >>> np.allclose(N, N_ex) True and >>> N = sha9(1, 1) >>> N_ex = np.array([ ... [0, 0, 0, 0, 1, 0, 0, 0], ... [0, 0, 0, 0, 0, 1, 0, 0]]) >>> np.allclose(N, N_ex) True """ N = np.zeros((2, 18)) SN = np.zeros((9)) ONE = 1.0 QUART = 0.25 HALF = 0.5 RP =ONE+R RM =ONE-R RMS=ONE-R*R SP =ONE+S SM =ONE-S SMS=ONE-S*S # SN[8]=RMS*SMS SN[7]=HALF*SMS*RM-HALF*SN[8] SN[6]=HALF*RMS*SP-HALF*SN[8] SN[5]=HALF*SMS*RP-HALF*SN[8] SN[4]=HALF*RMS*SM-HALF*SN[8] SN[0]=QUART*RM*SM-HALF*SN[7]-HALF*SN[4]-QUART*SN[8] SN[1]=QUART*RP*SM-HALF*SN[5]-HALF*SN[4]-QUART*SN[8] SN[2]=QUART*RP*SP-HALF*SN[5]-HALF*SN[6]-QUART*SN[8] SN[3]=QUART*RM*SP-HALF*SN[7]-HALF*SN[6]-QUART*SN[8] # N[0, ::2] = SN N[1, 1::2] = SN # return N
ba34cde6b5673853d34b9e074e2fbc05dc845aa5
3,638,544
def padding(seq, size, mode): """ Parameters ---------- seq: np.array The sequence to be padded. mode: str Select padding mode among {"zero", "repeat"}. Returns ------- seq: np.ndarray """ if mode == "zero": seq = np.array(trimmer(seq, size, filler=0)) elif mode == "repeat": seq = np.array(repeat_padding(seq, size)) return seq
3a0a070f784a355ead8439ff63f09918fa401014
3,638,545
def get_dense_span_ends_from_starts(dense_span_starts, dense_span_ends): """For every mention start positions finds the corresponding end position.""" seq_len = tf.shape(dense_span_starts)[0] start_pos = tf.cast(tf.where(tf.equal(dense_span_starts, 1)), tf.int32) end_pos = tf.cast( tf.squeeze(tf.where(tf.equal(dense_span_ends, 1)), 1), tf.int32) dense_span_ends_from_starts = tf.zeros(seq_len, dtype=tf.int32) dense_span_ends_from_starts = tf.tensor_scatter_nd_add( dense_span_ends_from_starts, start_pos, end_pos) return dense_span_ends_from_starts
d825ed109b6055ca84adf46f6e5fd91cb5dd513a
3,638,546
def bb_to_plt_plot(x, y, w, h): """ Converts a bounding box to parameters for a plt.plot([..], [..]) for actual plotting with pyplot """ X = [x, x, x+w, x+w, x] Y = [y, y+h, y+h, y, y] return X, Y
10ea3d381969b7d30defdfdbbac0a8d58d06d4d4
3,638,547
def handler404(request, *args): """ Renders 404 page. :param request: the request object used :type request: HttpRequest """ return render(request, '404.html', status=404)
2ae6e036bb56b46ee16a4c0bec4182ba999f14ed
3,638,548
def merge_dimensions(z, axis, sizes): """Merge dimensions of a tensor into one dimension. This operation is the opposite of :func:`split_dimension`. Args: z (tensor): Tensor to merge. axis (int): Axis to merge into. sizes (iterable[int]): Sizes of dimensions to merge. Returns: tensor: Reshaped version of `z`. """ shape = B.shape(z) # The indexing below will only be correct for positive `axis`, so resolve the index. axis = resolve_axis(z, axis) return B.reshape( z, *shape[: axis - len(sizes) + 1], np.prod(sizes), *shape[axis + 1 :], )
5ef62cd90ebf5bd9276f334a65a7a9075f5d3710
3,638,549
import collections import re def get_assignment_map_replaced(init_ckpt, name_replacement_dict={}, list_vars=None): """ name_replacement_dict = { old_name_str_chunk: new_name_str_chunk } """ if list_vars is None: list_vars = tf.global_variables() # name_to_variable = collections.OrderedDict() for var in list_vars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var # # ckpt_vars = tf.train.list_variables(init_ckpt) # assignment_map = collections.OrderedDict() for x in ckpt_vars: (name, var) = (x[0], x[1]) # for k, v in name_replacement_dict.items(): if k in name: name_new = name.replace(k, v) break else: continue # if name_new not in name_to_variable: continue # assignment_map[name] = name_new print("name_old: %s" % name) print("name_new: %s" % name_new) # return assignment_map
fd7df6630f84bde9caf747540c05729b8898ffa0
3,638,550
def RULE110(): """RULE 110 celular automata node. .. code:: 000 : 0 001 : 1 010 : 1 011 : 1 100 : 0 101 : 1 110 : 1 111 : 0 """ return BooleanNode.from_output_list(outputs=[0,1,1,1,0,1,1,0], name="RULE 110")
3c79a7b6c25f031fdeac4a86f2afc770ad71ea23
3,638,551
def search_cut(sentence): """ HMM的切割方式 :param sentence: :return: """ return jieba.lcut_for_search(sentence)
7ee0f7eb1a16cd24920b98e38387b2c9b576990f
3,638,552
from typing import Counter def count_items(column_list:list): """ Contar os tipos (valores) e a quantidade de items de uma lista informada args: column_list (list): Lista de dados de diferentes tipos de valores return: Retorna dois valores, uma lista de tipos (list) e o total de itens de cada tipo (list) """ counter = Counter(column_list) item_types = list(counter.keys()) count_items = list(counter.values()) return item_types, count_items
06cf25aed4d0de17fa8fb11303c9284355669cf5
3,638,553
import cloudpickle def py_call(obj, inputs=(), direct_args=()): """Create a task that calls Python code Example: >>> def hello(x): return b"Hello " + x.read() >>> a = tasks.const("Loom") >>> b = tasks.py_call((a,), hello) >>> client.submit(b) b'Hello Loom' """ task = Task() task.task_type = PY_CALL task.inputs = (obj,) + tuple(inputs) task.config = cloudpickle.dumps(tuple(direct_args)) task.resource_request = cpu1 return task
f89a5876fcf9b4c192f2b7c6d1362bf5a97e399c
3,638,554
def to_graph(grid): """ Build adjacency list representation of graph Land cells in grid are connected if they are vertically or horizontally adjacent """ adj_list = {} n_rows = len(grid) n_cols = len(grid[0]) land_val = "1" for i in range(n_rows): for j in range(n_cols): if grid[i][j] == land_val: adj_list[(i,j)] = [] if i > 0 and grid[i-1][j] == land_val: adj_list[(i,j)].append((i-1,j)) if i < n_rows-1 and grid[i+1][j] == land_val: adj_list[(i,j)].append((i+1,j)) if j > 0 and grid[i][j-1] == land_val: adj_list[(i,j)].append((i,j-1)) if j < n_cols-1 and grid[i][j+1] == land_val: adj_list[(i,j)].append((i,j+1)) return adj_list
ebdd0406b123a636a9d380391ef4c13220e2dabd
3,638,555
def validate_doc(doc): """ Check to see if the given document is a valid dictionary, that is, that it contains a single definition list. """ return len(doc.content) == 1 and \ isinstance(doc.content[0], pf.DefinitionList)
c60799ebbdaa7ec2e3a7e6607853ff021a40ed17
3,638,556
def V_bandpass(V, R_S, C, L, R_L, f): """ filter output voltage input voltage minus the current times the source impedance """ # current in circuit I = V/(R_S + Z_bandpass(C, L, R_L, f)) # voltage across circuit V_out = V - I*R_S return V_out
c21c54e7065a32531dca417eb7e50ea63db820d8
3,638,559
from admiral.celery import celery def celery(): """Celery app test fixture.""" return celery
69f672e1c6a568e14a4ad9f5df723b454a346b03
3,638,561
def perm_cache(func): """ 根据用户+请求参数,把权限验证结果结果进行缓存 """ def _deco(self, request, view): # 只对查询(GET方法)进行权限缓存 if request.method != "GET": return func(self, request, view) user = request.user.username kwargs = "_".join("{}:{}".format(_k, _w) for _k, _w in list(view.kwargs.items())) cache_name = "{}__{}__{}".format(user, view.action, kwargs) perm = cache.get(cache_name) if perm is None: perm = func(self, request, view) cache.set(cache_name, perm, 60) return perm return _deco
4ca53057b12efb15dddb422b3aaaddd11898f4bd
3,638,562
def ast_walker(handler): """ A generic AST walker decorator. Decorates either a function or a class (if dispatching based on node type is required). ``handler`` will be wrapped in a :py:class:`~peval.Dispatcher` instance; see :py:class:`~peval.Dispatcher` for the details of the required class structure. Returns a callable with the signature:: def walker(state, node, ctx=None) :param state: a dictionary with the state which will be passed to every handler call. It will be converted into a :class:`~peval.tools.immutableadict` object at the start of the traversal. Handlers can update it by returning a modified version. :param node: an ``ast.AST`` object to traverse. :param ctx: a dictionary with the global context which will be passed to every handler call. It will be converted into a :class:`~peval.tools.immutableadict` object at the start of the traversal. :returns: a tuple ``(state, new_node)``, where ``state`` is the same object which was passed as the corresponding parameter. Does not mutate ``node``. ``handler`` will be invoked for every node during the AST traversal (depth-first, pre-order). The ``handler`` function, if it is a function, or its static methods, if it is a class must have the signature:: def handler([state, node, ctx, prepend, visit_after, visiting_after, skip_fields, walk_field,] **kwds) The names of the arguments must be exactly as written here, but their order is not significant (they will be passed as keywords). If ``handler`` is a class, the default handler is a "pass-through" function that does not change the node or the state. :param state: the (supposedly immutable) state object passed during the initial call. :param node: the current node :param ctx: the (supposedly immutable) dictionary with the global context passed during the initial call. In addition to normal dictionary methods, its values can be alternatively accessed as attributes (e.g. either ``ctx['value']`` or ``ctx.value``). :param prepend: a function ``prepend(lst)`` which, when called, prepends the list of ``ast.AST`` objects to whatever is returned by the handler of the closest statement block that includes the current node. These nodes are not traversed automatically. :param visit_after: a function of no arguments, which, when called, schedules to call the handler again on this node when all of its fields are traversed (providing that after calling it, the handler returns an ``ast.AST`` object and not a list or ``None``). During the second call this parameter is set to ``None``. :param visiting_after: set to ``False`` during the normal (pre-order) visit, and to ``True`` during the visit caused by ``visit_after()``. :param skip_fields: a function of no arguments, which, when called, orders the walker not to traverse this node's fields. :param walk_field: a function ``walk_field(state, value, block_context=False) -> (new_state, new_value)``, which traverses the given field value. If the value contains a list of statements, ``block_context`` must be set to ``True``, so that ``prepend`` could work correctly. :returns: must return a tuple ``(new_state, new_node)``, where ``new_node`` is one of: * ``None``, in which case the corresponding node will be removed from the parent list or the parent node field. * The passed ``node`` (unchanged). By default, its fields will be traversed (unless ``skip_fields()`` is called). * A new ``ast.AST`` object, which will replace the passed ``node`` in the AST. By default, its fields will not be traversed, and the handler must do it manually if needed (by calling ``walk_field()``). * If the current node is an element of a list, a list of ``ast.AST`` objects can be returned, which will be spliced in place of the node. Same as in the previous case, these new nodes will not be automatically traversed. """ return _Walker(handler, transform=True, inspect=True)
978e6718d81663914017af89cf41101ca68dd2bb
3,638,563
def html_escape(text): """Produce entities within text.""" L=[] for c in text: L.append(html_escape_table.get(c,c)) return "".join(L)
de73c127de8b6338c5db5c9ba7d1f5ebbd6d23a9
3,638,564
def qs_without_parameter(arg1, arg2): """ Removes an argument from the get URL. Use: {{ request|url_without_parameter:'page' }} Args: arg1: request arg2: parameter to remove """ parameters = {} for key, value in arg1.items(): if parameters.get(key, None) is None and arg2 != key: try: parameters[key] = value[0] except IndexError: parameters[key] = value return "&".join( [k + "=" + v for k, v in parameters.items()])
649931de5490621c92513877b21cb8cfce8d66ff
3,638,565
def find_power_graph(I, J, w_intersect=10, w_difference=1): """takes a graph with edges I,J, and returns a power graph with routing edges Ir,Jr and power edges Ip,Jp. Note that this treats the graph as undirected, and will internally convert edges to be undirected if not already.""" n = int(max(max(I), max(J)) + 1) Ir, Jr, Ip, Jp = cpp.routing_swig(n, I, J, w_intersect, w_difference) return Ir, Jr, Ip, Jp
9e682eebd9664863d80689f0aa718f30e3ad611a
3,638,566
import string def getcomments(pyObject): """Get lines of comments immediately preceding an object's source code. Returns None when source can't be found. """ try: lines, lnum = findsource(pyObject) except (IOError, TypeError): return None if ismodule(pyObject): # Look for a comment block at the top of the file. start = 0 if lines and lines[0][:2] == '#!': start = 1 while start < len(lines) and string.strip(lines[start]) in ('', '#'): start = start + 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(string.expandtabs(lines[end])) end = end + 1 return string.join(comments, '') # Look for a preceding block of comments at the same indentation. elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \ indentsize(lines[end]) == indent: comments = [string.lstrip(string.expandtabs(lines[end]))] if end > 0: end = end - 1 comment = string.lstrip(string.expandtabs(lines[end])) while comment[:1] == '#' and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = string.lstrip(string.expandtabs(lines[end])) while comments and string.strip(comments[0]) == '#': comments[:1] = [] while comments and string.strip(comments[-1]) == '#': comments[-1:] = [] return string.join(comments, '')
f58421f176b42ecb2e1e883f48deb31025b13559
3,638,567
import time import requests import io def crack_captcha(headers): """ 破解验证码,完整的演示流程 :return: """ currentTime = str(int(time.time())*1000) # 向指定的url请求验证码图片 rand_captcha_url = 'http://59.49.77.231:81/getcode.asp?t=' + currentTime res = requests.get(rand_captcha_url, stream=True,headers=headers) f = io.BytesIO() for chunk in res.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() img = Image.open(f) # 从网络上请求验证码图片保存在内存中 bin_clear_img = get_clear_bin_image(img) # 处理获得去噪的二值图 child_img_list = get_crop_imgs(bin_clear_img) # 切割图片为单个字符,保存在内存中,例如:4位验证码就可以分割成4个child # 加载SVM模型进行预测 svm_model_name = 'svm_model_file' model_path = data_root + '/svm_train/' + svm_model_name model = svm_load_model(model_path) img_ocr_name = '' for child_img in child_img_list: img_feature_list = get_feature(child_img) # 使用特征算法,将图像进行特征化降维 yt = [0] # 测试数据标签 # xt = [{1: 1, 2: 1}] # 测试数据输入向量 xt = convert_feature_to_vector(img_feature_list) # 将所有的特征转化为标准化的SVM单行的特征向量 p_label, p_acc, p_val = svm_predict(yt, xt, model) img_ocr_name += ('%d' % p_label[0]) # 将识别结果合并起来 #uuid_tag = str(uuid.uuid1()) # 生成一组随机的uuid的字符串(开发人员自己写,比较好实现) #img_save_folder = data_root + '/crack_img_res' #img.save(img_save_folder + '/' + img_ocr_name + '__' + uuid_tag + '.png') # 例如:__0067__77b10a28f73311e68abef0def1a6bbc8.png f.close() return img_ocr_name
538843289a64dde1229f7df0a260632fbbd557b6
3,638,568
from . import sill from clawpack.pyclaw.util import check_diff import numpy as np from clawpack.pyclaw.util import gen_variants from itertools import chain def test_2d_sill(): """test_2d_sill Tests against expected classic solution of shallow water equations over a sill.""" def verify_expected(expected): def sill_verify(claw): q0 = claw.frames[0].state.get_q_global() qfinal = claw.frames[claw.num_output_times].state.get_q_global() if q0 is not None and qfinal is not None: dx, dy = claw.solution.domain.grid.delta total_mass = dx * dy * np.linalg.norm(qfinal[0,:,:].reshape(-1), 1) return check_diff(expected, total_mass, reltol=1e-3) else: return return sill_verify classic_tests = gen_variants(sill.setup, verify_expected(3.7439), kernel_languages=["Fortran"], solver_type='classic', outdir=None) for test in chain(classic_tests): yield test
5d37c3ad21d842c3f03d1b464609f94ee86e1496
3,638,569
def draw_box( canvas, layout, box_width=None, box_alpha=0, color_map=None, show_element_id=False, show_element_type=False, id_font_size=None, id_font_path=None, id_text_color=None, id_text_background_color=None, id_text_background_alpha=1, ): """Draw the layout region on the input canvas(image). Args: canvas (:obj:`~np.ndarray` or :obj:`~PIL.Image.Image`): The canvas to draw the layout boxes. layout (:obj:`Layout` or :obj:`list`): The layout of the canvas to show. box_width (:obj:`int`, optional): Set to change the width of the drawn layout box boundary. Defaults to None, when the boundary is automatically calculated as the the :const:`DEFAULT_BOX_WIDTH_RATIO` * the maximum of (height, width) of the canvas. box_alpha (:obj:`float`, optional): A float range from 0 to 1. Set to change the alpha of the drawn layout box. Defaults to 0 - the layout box will be fully transparent. color_map (dict, optional): A map from `block.type` to the colors, e.g., `{1: 'red'}`. You can set it to `{}` to use only the :const:`DEFAULT_OUTLINE_COLOR` for the outlines. Defaults to None, when a color palette is is automatically created based on the input layout. show_element_id (bool, optional): Whether to display `block.id` on the top-left corner of the block. Defaults to False. show_element_type (bool, optional): Whether to display `block.type` on the top-left corner of the block. Defaults to False. id_font_size (int, optional): Set to change the font size used for drawing `block.id`. Defaults to None, when the size is set to :const:`DEFAULT_FONT_SIZE`. id_font_path (:obj:`str`, optional): Set to change the font used for drawing `block.id`. Defaults to None, when the :const:`DEFAULT_FONT_OBJECT` is used. id_text_color (:obj:`str`, optional): Set to change the text color used for drawing `block.id`. Defaults to None, when the color is set to :const:`DEFAULT_TEXT_COLOR`. id_text_background_color (:obj:`str`, optional): Set to change the text region background used for drawing `block.id`. Defaults to None, when the color is set to :const:`DEFAULT_TEXT_BACKGROUND`. id_text_background_alpha (:obj:`float`, optional): A float range from 0 to 1. Set to change the alpha of the drawn text. Defaults to 1 - the text box will be solid. Returns: :obj:`PIL.Image.Image`: A Image object containing the `layout` draw upon the input `canvas`. """ assert 0 <= box_alpha <= 1, ValueError( f"The box_alpha value {box_alpha} is not within range [0,1]." ) assert 0 <= id_text_background_alpha <= 1, ValueError( f"The id_text_background_alpha value {id_text_background_alpha} is not within range [0,1]." ) draw = ImageDraw.Draw(canvas, mode="RGBA") id_text_background_color = id_text_background_color or DEFAULT_TEXT_BACKGROUND id_text_color = id_text_color or DEFAULT_TEXT_COLOR if box_width is None: box_width = _calculate_default_box_width(canvas) if show_element_id or show_element_type: font_obj = _create_font_object(id_font_size, id_font_path) if color_map is None: all_types = set([b.type for b in layout if hasattr(b, "type")]) color_map = _create_color_palette(all_types) for idx, ele in enumerate(layout): if isinstance(ele, Interval): ele = ele.put_on_canvas(canvas) outline_color = ( DEFAULT_OUTLINE_COLOR if not isinstance(ele, TextBlock) else color_map.get(ele.type, DEFAULT_OUTLINE_COLOR) ) _draw_box_outline_on_handler(draw, ele, outline_color, box_width) _draw_transparent_box_on_handler(draw, ele, outline_color, box_alpha) if show_element_id or show_element_type: text = "" if show_element_id: ele_id = ele.id or idx text += str(ele_id) if show_element_type: text = str(ele.type) if not text else text + ": " + str(ele.type) start_x, start_y = ele.coordinates[:2] text_w, text_h = font_obj.getsize(text) text_box_object = Rectangle( start_x, start_y, start_x + text_w, start_y + text_h ) # Add a small background for the text _draw_transparent_box_on_handler( draw, text_box_object, id_text_background_color, id_text_background_alpha, ) # Draw the ids draw.text( (start_x, start_y), text, fill=id_text_color, font=font_obj, ) return canvas
9d8ca19a35e91c6e8670aed05c2e61b2c89958c5
3,638,570
def login(request): """Home view, displays login mechanism""" return render(request, 'duck/login.html')
5d4474d4ce7bb8f7327e1a005fe9e485d8784ec7
3,638,571