content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def map_vocabulary(docs, vocabulary): """ Maps sentencs and labels to vectors based on a vocabulary. """ mapped = np.array([[vocabulary[word] for word in doc] for doc in docs]) return mapped
b5b39aeac6306709a4b4ac10a29d40a2006d57ff
3,638,331
def mobilenetv3_large_minimal_100(pretrained=False, **kwargs): """ MobileNet V3 Large (Minimalistic) 1.0 """ # NOTE for train set drop_rate=0.2 model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model
717a67b1ab7cb0ad7a6c8d40ea4b0b29108eff94
3,638,332
def get_identity(user, identity_uuid): """ Given the (request) user and an identity uuid, return None or an Active Identity """ try: identity_list = get_identity_list(user) if not identity_list: raise CoreIdentity.DoesNotExist( "No identities found for user %s" % user.username) identity = identity_list.get(uuid=identity_uuid) return identity except CoreIdentity.DoesNotExist: logger.warn("Identity %s DoesNotExist" % identity_uuid) return None
800e47d8782fc5e71e97192f76713032eade9441
3,638,333
def same_strange_looking_function(param1, callback_fn): """ This function is documented, but the function is identical to some_strange_looking_function and should result in the same hash """ tail = param1[-1] # return the callback value from the tail of param whatever that is return callback_fn(tail)
438becf6803e6b25a200a34e18eb648aaa4b6fbb
3,638,334
def __extractFunction(text, jsDoc, classConstructor): """ Extracts a function depending of its pattern: 'function declaration': function <name>(<parameters>) { <realization> }[;] 'named function expression': <variable> = function <name>(<parameters>) { <realization> }[;] 'unnamed function expression'. <variable> = function(<parameters>) { <realization> }[;] 'alias function': <variable> = <name>(<parameters>)[;] @param {string} text. @param {jsCodeParser.jsDoc.JsDoc} jsDoc. @param {(jsCodeParser.elements.Class|jsCodeParser.elements.Method)} classConstructor. @return {(jsCodeParser.elements.Class|jsCodeParser.elements.Method)} Element. """ parameters = extractTextBetweenTokens(text, '(') if not parameters: return None end = text.find(parameters) + len(parameters) realization = text[end:].strip() if realization[0] == '{': realization = extractTextBetweenTokens(realization, '{') end = text.find(realization) + len(realization) if end < len(text) and text[end] == ';': end += 1 code = text[:end].strip() return classConstructor(code, jsDoc)
992604ccd1e56da6706cf2e4ec2955c2c9ecfa7e
3,638,336
def vocabulary_size(tokens): """Returns the vocabulary size count defined as the number of alphabetic characters as defined by the Python str.isalpha method. This is a case-sensitive count. `tokens` is a list of token strings.""" vocab_list = set(token for token in tokens if token.isalpha()) return len(vocab_list)
5e26e1be98a3e82737277458758f0fd65a64fe8f
3,638,337
from typing import Dict from typing import Any from typing import Optional from typing import Tuple def max_iteration_for_analysis(query: Dict[str, Any], db: cosem_db.MongoCosemDB, check_evals_complete: bool = False, conv_it: Optional[Tuple[int, int]] = None) -> Tuple[int, bool]: """ Find the first iteration that meets the convergence criterion like `convergence_iteration` but return a minimum iteration of 700k if the convergence criterion is met at a previous iteration. To avoid re-computation if `convergence_iteration` has explicitly been called before, the previous output can be passed in explicitly. Args: query: Dictionary specifying which set of configuration to consider for the maximum iteration. This will typically contain keys for setups, label and crop. db: Database containing the evaluation results. conv_it: Output of `convergence_iteration` if already known. Otherwise, None and `convergence_iteration` will be called. check_evals_complete: Whether to first check whether the considered evaluations are consistent across the queries (i.e. same for all crops/labels/raw_datasets within one setup, at least to 500k, if above threshold by 500k at least to 700k). Should generally be set to True unless this has already been checked. Returns: The max iteration. If none of the results produce above threshold segmentations False is returned. If the convergence condition isn't met anywhere or not evaluated to at least 700k iterations. Raises: ValueError if no evaluations are found for given query. """ if conv_it is None: it, valid = convergence_iteration(query, db, check_evals_complete=check_evals_complete) else: it, valid = conv_it if valid != 2: it = max(it, 700000) return it, bool(valid)
b5d0bebd2af634ac72f8bc318276d0f7c03114f2
3,638,338
def getMatirces(Dynamics, Cost): """ This functions takes the dynamics class as input and outputs the required matrices and cvxpy.variables to turn the covariance steering problem into a finite dimensional optimization problem. """ Alist = Dynamics.Alist Blist = Dynamics.Blist Dlist = Dynamics.Dlist zlist = Dynamics.zlist sigmaWlist = Dynamics.sigmaWlist Rulist = Cost.Rulist Rvlist = Cost.Rvlist N = len(Alist) # Problem horizon nx, nu, nv = Alist[0].shape[1], Blist[0].shape[1], Dlist[0].shape[1] # Set Constant Matirces: Gamma = [] for i in range(N+1): Gamma.append(Phi_func(Alist, i, 0)) Gamma = np.vstack(Gamma) block_Hu, block_Hv, block_Hw = [], [], [] for i in range(N+1): row_Hu, row_Hv, row_Hw = [], [], [] for j in range(N): if j < i: row_Hu.append(Phi_func(Alist, i, j) @ Blist[j]) row_Hv.append(Phi_func(Alist, i, j) @ Dlist[j]) row_Hw.append(Phi_func(Alist, i, j)) else: row_Hu.append(np.zeros((nx, nu))) row_Hv.append(np.zeros((nx, nv))) row_Hw.append(np.zeros((nx, nx))) block_Hu.append(np.hstack(row_Hu)) block_Hv.append(np.hstack(row_Hv)) block_Hw.append(np.hstack(row_Hw)) Hu, Hv, Hw = np.vstack(block_Hu), np.vstack(block_Hv), np.vstack(block_Hw) Z = np.vstack(zlist) Wbig = np.zeros((nx*N, nx*N)) for k in range(N): Wbig[k*nx:(k+1)*nx, k*nx:(k+1)*nx] = sigmaWlist[k] Rubig = np.zeros((nu*N, nu*N)) Rvbig = np.zeros((nv*N, nv*N)) # set_trace() for k in range(N): Rubig[k*nu:(k+1)*nu, k*nu:(k+1)*nu] = Rulist[k] Rvbig[k*nv:(k+1)*nv, k*nv:(k+1)*nv] = Rvlist[k] return Gamma, Hu, Hv, Hw, Z, Wbig, Rubig, Rvbig
50de11ba3f3d1528f7ff577861613b96f8e35254
3,638,339
def get_transit_boundary_indices(time, transit_size): """ Determines transit boundaries from sorted time of transit cut out :param time (1D np.array) sorted times of transit cut out :param transit_size (float) size of the transit crop window in days :returns tuple: [0] list of transit start indices (int) [1] list of sequence lengths (int) of each transit """ sequence_lengths = [] transit_start_indices = [0] for i, t in enumerate(time): if t - time[transit_start_indices[-1]] > transit_size: sequence_lengths.append(i - transit_start_indices[-1]) transit_start_indices.append(i) # last length is from last transit start til the end of the array sequence_lengths.append(len(time) - transit_start_indices[-1]) return transit_start_indices, sequence_lengths
cd3775d72690eb4539e0434b0ac7f715d14374a6
3,638,340
def decode_gbe_string(s): """This helper function turns gbe output strings into dataframes""" columns, df = s.replace('","',';').replace('"','').split('\n') df = pd.DataFrame([column.split(',') for column in df.split(';')][:-1]).transpose().ffill().iloc[:-1] df.columns = [c.replace('tr_','') for c in columns.split(',')[:-1]] return df
0a2d262b2653f736ef8ae7c7ed4b969faf80e9bf
3,638,342
import re def get_scihub_namespaces(xml): """Take an xml string and return a dict of namespace prefixes to namespaces mapping.""" nss = {} matches = re.findall(r'\s+xmlns:?(\w*?)\s*=\s*[\'"](.*?)[\'"]', xml.decode('utf-8')) for match in matches: prefix = match[0]; ns = match[1] if prefix == '': prefix = '_default' nss[prefix] = ns return nss
b1d5a32d7583a655c59fa5175bdd133899bf6223
3,638,343
def valid_verify_email(form, email): """ Returns true if "email" is equal the first email """ try: if(form.email.data!=form.email_verify.data): raise ValidationError('Email address is not the same') if models.Account.pull_by_email(form.email.data) is not None: print('Account already exist') raise ValidationError('An account already exists for that email address') except Exception as e: raise ValidationError('Email is wrong check it again: ' + str(e)) return True
16073bb559e06759632323289f49e127bb9f8cb1
3,638,344
def _computePolyVal(poly, value): """ Evaluates a polynomial at a specific value. :param poly: a list of polynomial coefficients, (first item = highest degree to last item = constant term). :param value: number used to evaluate poly :return: a number, the evaluation of poly with value """ #return numpy.polyval(poly, value) acc = 0 for c in poly: acc = acc * value + c return acc
0377ba0757439409824b89b207485a99f804cb41
3,638,345
from io import StringIO def fix_e26(source): """Format block comments.""" if '#' not in source: # Optimization. return source string_line_numbers = multiline_string_lines(source, include_docstrings=True) fixed_lines = [] sio = StringIO(source) for (line_number, line) in enumerate(sio.readlines(), start=1): if (line.lstrip().startswith('#') and line_number not in string_line_numbers): indentation = _get_indentation(line) line = line.lstrip() # Normalize beginning if not a shebang. if len(line) > 1: # Leave multiple spaces like '# ' alone. if line.count('#') > 1 or line[1].isalnum(): line = '# ' + line.lstrip('# \t') fixed_lines.append(indentation + line) else: fixed_lines.append(line) return ''.join(fixed_lines)
ec569e442c2244421afa94cc8316478c55377220
3,638,346
def graph_distance(tree, node1, node2=None): """ Return shortest distance from node1 to node2, or just update all node.distance shortest to node1 """ for node in tree.nodes(): node.distance = inf node.back = None # node backwards towards node1 fringe = Queue([node1]) while fringe: node = fringe.pop() #print(f"looking at '{node}'") previous_distance = node.back.distance if node.back else -1 node.distance = previous_distance + 1 if node == node2: break for neighbor in node.neighbors(): if neighbor.distance > node.distance: fringe.push(neighbor) neighbor.back = node if node2: return node2.distance
0764d2a687933631d592e1b6d40ceec8d629036c
3,638,347
def trunicos(b): """Return a unit-distance embedding of the truncated icosahedron graph.""" p0 = star_radius(5)*root(1,20,1) p1 = p0 + root(1,20,1) p2 = mpc(b, 0.5) p3 = cu(p2, p1) p4 = cu(p3, p1*root(1,5,-1)) p5 = cu(p4, p2*root(1,5,-1)) return (symmetrise((p0, p1, p2, p3, p4, p5), "D5"), [abs(p5 - root(1,5,-1)*conj(p5)) - 1])
018112497882a6f0a572cf2c1c222cdf36ca95e9
3,638,348
import torch def histogram2d( x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10 ) -> torch.Tensor: """Function that estimates the 2d histogram of the input tensor. The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter. Args: x1 (torch.Tensor): Input tensor to compute the histogram with shape :math:`(B, D1)`. x2 (torch.Tensor): Input tensor to compute the histogram with shape :math:`(B, D2)`. bins (torch.Tensor): The number of bins to use the histogram :math:`(N_{bins})`. bandwidth (torch.Tensor): Gaussian smoothing factor with shape shape [1]. epsilon (float): A scalar, for numerical stability. Default: 1e-10. Returns: torch.Tensor: Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`. Examples: >>> x1 = torch.rand(2, 32) >>> x2 = torch.rand(2, 32) >>> bins = torch.torch.linspace(0, 255, 128) >>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9)) >>> hist.shape torch.Size([2, 128, 128]) """ pdf1, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon) pdf2, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon) pdf = joint_pdf(kernel_values1, kernel_values2) return pdf
5e360f1e9350a29664e3beb1d0cc6ba3024647b9
3,638,349
import json def webhooks_v2(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) event_json = json.loads(request.body) event_key = event_json['type'].replace('.', '_') if event_key in WEBHOOK_MAP: WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json) return HttpResponse(status=200)
afa86e189c417a147ae05fa46e89d985207c403b
3,638,350
def nth(iterable, n, default=None): """ Returns the nth item or a default value :param iterable: The iterable to retrieve the item from :param n: index of the item to retrieve. Must be >= 0 :param default: the value to return if the index isn't valid :return: the nth item, or the default value if n isn't a valid index """ return next(islice(iterable, n, None), default)
9f0eb8a31d8b4499d8538f6aefc9dba8231b27e0
3,638,351
import types def _dict_items(typingctx, d): """Get dictionary iterator for .items()""" resty = types.DictItemsIterableType(d) sig = resty(d) codegen = _iterator_codegen(resty) return sig, codegen
6435320c6ba490b85c3ef4c065f55cef0d7d2c8e
3,638,352
def odd_desc(count): """ Replace ___ with a single call to range to return a list of descending odd numbers ending with 1 For e.g if count = 2, return a list of 2 odds [3,1]. See the test below if it is not clear """ return list(reversed(range(1,count*2,2)))
2f90095c5b25f8ac33f3bb86d3f46e67932bc78a
3,638,353
def retrieval_score(test_ratings: pd.DataFrame, recommender, remove_known_pos: bool = False, metric: str = 'mrr') -> float: """ Mean Average Precision / Mean Reciprocal Rank of first relevant item @ N """ N = recommender.N user_scores = [] relevant_items = get_relevant_items(test_ratings) for user in recommender.users: if user in relevant_items.keys(): predicted_items = recommender.get_recommendations(user, remove_known_pos) predicted_items = [item for item, _ in predicted_items] if metric == 'map': true_positives = np.intersect1d(relevant_items[user], predicted_items) score = len(true_positives) / N elif metric == 'mrr': score = np.mean([reciprocal_rank(item, predicted_items) for item in relevant_items[user]]) else: raise ValueError(f"Unknown value {metric} for Argument `metric`") user_scores.append(score) return np.mean(user_scores)
c7167eef0195496ea460dcbe63926028c430433e
3,638,354
def test_dump_load_keras_model_with_dict(tmpdir, save_and_load): """Test whether tensorflow ser/de-ser work for models returning dictionaries""" class DummyModel(tf.keras.Model): def __init__(self): super().__init__() def _random_method(self): pass def call(self, in_): out = {} out["b1"], out["b2"] = in_, in_ return out in_ = tf.ones((1, 3)) model = DummyModel() # this line is very important or tensorflow cannot trace the graph of the module model(in_) loaded = save_and_load(model, str(tmpdir)) out = loaded(in_) assert set(out) == {"b1", "b2"} assert tf.is_tensor(out["b1"]) assert tf.is_tensor(out["b2"]) assert not hasattr(loaded, "_random_method")
5fcaf73e5a0b138a04091573782a2c03f4459f15
3,638,355
def stemmer_middle_high_german(text_l, rem_umlauts=True, exceptions=exc_dict): """text_l: text in string format rem_umlauts: choose whether to remove umlauts from string exceptions: hard-coded dictionary for the cases the algorithm fails""" # Normalize text text_l = normalize_middle_high_german( text_l, to_lower_all=False, to_lower_beginning=True ) # Tokenize text word_tokenizer = WordTokenizer("middle_high_german") text_l = word_tokenizer.tokenize(text_l) text = [] for word in text_l: try: text.append(exceptions[word]) # test if word in exception dictionary except: if word[0].isupper(): # MHG only uses upper case for locations, people, etc. So any word that starts with a capital # letter while not being at the start of a sentence will automatically be excluded. text.append(word) elif word in MHG_STOPS: text.append(word) # Filter stop words else: text.append(stem_helper(word, rem_umlaut=rem_umlauts)) return text
608ec49ad36ee5ae7ad41fe4eab5d9f7c65eb609
3,638,356
def test_queue_trials(start_connected_emptyhead_cluster): """Tests explicit oversubscription for autoscaling. Tune oversubscribes a trial when `queue_trials=True`, but does not block other trials from running. """ cluster = start_connected_emptyhead_cluster runner = TrialRunner() def create_trial(cpu, gpu=0): kwargs = { "resources": Resources(cpu=cpu, gpu=gpu), "stopping_criterion": { "training_iteration": 3 } } return Trial("__fake", **kwargs) runner.add_trial(create_trial(cpu=1)) with pytest.raises(TuneError): runner.step() # run 1 del runner executor = RayTrialExecutor(queue_trials=True) runner = TrialRunner(trial_executor=executor) cluster.add_node(num_cpus=2) cluster.wait_for_nodes() cpu_only = create_trial(cpu=1) runner.add_trial(cpu_only) runner.step() # add cpu_only trial gpu_trial = create_trial(cpu=1, gpu=1) runner.add_trial(gpu_trial) runner.step() # queue gpu_trial # This tests that the cpu_only trial should bypass the queued trial. for i in range(3): runner.step() assert cpu_only.status == Trial.TERMINATED assert gpu_trial.status == Trial.RUNNING # Scale up cluster.add_node(num_cpus=1, num_gpus=1) cluster.wait_for_nodes() for i in range(3): runner.step() assert gpu_trial.status == Trial.TERMINATED
fed9fe1458db15f871ccd4afff942c0d022a9b8a
3,638,357
def get_bboxes(outputs, proposals, num_proposals, num_classes, im_shape, im_scale, max_per_image=100, thresh=0.001, nms_thresh=0.4): """ Returns bounding boxes for detected objects, organized by class. Transforms the proposals from the region proposal network to bounding box predictions using the bounding box regressions from the classification network: (1) Applying bounding box regressions to the region proposals. (2) For each class, take proposed boxes where the corresponding objectness score is greater then THRESH. (3) Apply non-maximum suppression across classes using NMS_THRESH (4) Limit the maximum number of detections over all classes to MAX_PER_IMAGE Arguments: outputs (list of tensors): Faster-RCNN model outputs proposals (Tensor): Proposed boxes from the model's proposalLayer num_proposals (int): Number of proposals num_classes (int): Number of classes im_shape (tuple): Shape of image im_scale (float): Scaling factor of image max_per_image (int): Maximum number of allowed detections per image. Default is 100. None indicates no enforced maximum. thresh (float): Threshold for objectness score. Default is 0.001. nms_thresh (float): Threshold for non-maximum suppression. Default is 0.4. Returns: detections (list): List of bounding box detections, organized by class. Each element contains a numpy array of bounding boxes for detected objects of that class. """ detections = [[] for _ in range(num_classes)] proposals = proposals.get()[:num_proposals, :] # remove padded proposals boxes = proposals[:, 1:5] / im_scale # scale back to real image space # obtain bounding box corrections from the frcn layers scores = outputs[2][0].get()[:, :num_proposals].T bbox_deltas = outputs[2][1].get()[:, :num_proposals].T # apply bounding box corrections to the region proposals pred_boxes = bbox_transform_inv(boxes, bbox_deltas) pred_boxes = clip_boxes(pred_boxes, im_shape) # Skip the background class, start processing from class 1 for j in range(1, num_classes): inds = np.where(scores[:, j] > thresh)[0] # obtain class-specific boxes and scores cls_scores = scores[inds, j] cls_boxes = pred_boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False) # apply non-max suppression keep = nms(cls_dets, nms_thresh) cls_dets = cls_dets[keep, :] # store results detections[j] = cls_dets # Limit to max_per_image detections *over all classes* if max_per_image is not None: # obtain flattened list of all image scores image_scores = np.hstack([detections[j][:, -1] for j in range(1, num_classes)]) if len(image_scores) > max_per_image: # compute threshold needed to keep the top max_per_image image_thresh = np.sort(image_scores)[-max_per_image] # apply threshold for j in range(1, num_classes): keep = np.where(detections[j][:, -1] >= image_thresh)[0] detections[j] = detections[j][keep, :] return detections
09e5eb94f35672e77980c89e71fcb9ed6b460ab4
3,638,358
def air_transport_per_year_by_country(country): """Returns the number of passenger carried per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="IS.AIR.PSGR"'.format(country)) air_transport = cur.fetchall() cur.close() return jsonify(air_transport)
4ca85c537c5bc7ccda332af977f1252b14672235
3,638,359
def outside_range(number, min_range, max_range): """ Returns True if `number` is between `min_range` and `max_range` exclusive. """ return number < min_range or number > max_range
dc3889fbabb74db38b8558537413ebc5bc613d05
3,638,360
import re def is_string_constant(node): """Checks whether the :code:`node` is a string constant.""" return is_leaf(node) and re.match('^\"[^\"]*\"$', node) is not None
5a62c513bc856571e62c40b9d14bdefb67be4c79
3,638,361
from typing import List def is_list_type(t) -> bool: """ Return True if ``t`` is ``List`` python type """ # print(t, getattr(t, '__origin__', None) is list) return t == list or is_pa_type(t, pa.types.is_list) or ( hasattr(t, '__origin__') and t.__origin__ in (list, List) ) or ( isinstance(t, dict) and is_list_type(t.get('type')) )
7da1ea98dccc4341a6db7a3e13e9f9bd278bd984
3,638,362
from datetime import datetime def get_measure_of_money_supply(): """ 从 Sina 获取 中国货币供应量数据。 Returns: 返回获取到的数据表。数据从1978.1开始。 Examples: .. code-block:: python >>> from finance_datareader_py.sina import get_measure_of_money_supply >>> df = get_measure_of_money_supply() >>> print(df.iloc[0][df.columns[0]]) >>> print(df.index[-1]) >>> print(df.columns) 1776196.11 1978.8 Index(['货币和准货币(广义货币M2)(亿元)', '货币和准货币(广义货币M2)同比增长(%)', '货币(狭义货币M1)(亿元)', '货币(狭义货币M1)同比增长(%)', '流通中现金(M0)(亿元)', '流通中现金(M0)同比增长(%)', '活期存款(亿元)', '活期存款同比增长(%)', '准货币(亿元)', '准货币同比增长(%)', '定期存款(亿元)', '定期存款同比增长(%)', '储蓄存款(亿元)', '储蓄存款同比增长(%)', '其他存款(亿元)', '其他存款同比增长(%)'], dtype='object') """ num = (datetime.date.today().year + 1 - 1978) * 12 return _get_mac_price(num=num, event=1, cate='fininfo', index='统计时间')
304cf05be6a226e7da46ec16e36a6632f02848c5
3,638,363
def make_inverter_path(wire, inverted): """ Create site pip path through an inverter. """ if inverted: return [('site_pip', '{}INV'.format(wire), '{}_B'.format(wire)), ('inverter', '{}INV'.format(wire))] else: return [('site_pip', '{}INV'.format(wire), wire)]
066c4bbad0f65fec587b12fc7a2947246401b877
3,638,365
def constant(t, length): """ ezgal.sfhs.constant( ages, length ) Burst of constant starformation from t=0 to t=length """ if type(t) == type(np.array([])): sfr = np.zeros(t.size) m = t <= length if m.sum(): sfr[m] = 1.0 return sfr else: return 0.0 if t > length else 1.0
bfbc32042512465c7fecc50d976b369ac8e2c9fe
3,638,367
def model_setup_fn(attrs): """Generate the setup function for models.""" model = load_model(attrs['type'], attrs['data']) def func(self): self.model = model self.type = attrs['type'] self.data = attrs['data'] self.network_type = attrs['network_type'] self.dto = attrs.get('dto') self.catbuffer = attrs.get('catbuffer') self.extras = attrs.get('extras', {}) return func
4f0ffa9e1de3f60edef847faf319f3c5a4bef28d
3,638,368
def _mkdir(space, dirname, mode=0777, recursive=False, w_ctx=None): """ mkdir - Makes directory """ mode = 0x7FFFFFFF & mode if not _valid_fname(dirname): space.ec.warn("mkdir() expects parameter 1 to " "be a valid path, string given") return space.w_False if not is_in_basedir(space, 'mkdir', rpath.realpath(dirname)): return space.w_False try: if not os.path.isdir(dirname): if recursive: _recursive_mkdir(dirname, mode) else: os.mkdir(dirname, mode) return space.w_True else: space.ec.warn("mkdir(): No such file or directory") return space.w_False except OSError, e: space.ec.warn("mkdir(): %s" % os.strerror(e.errno)) return space.w_False except TypeError: return space.w_False
c16b5e0100c50e300fcf9268383f20b1cb5c11b5
3,638,369
import decimal def prepare_fixed_decimal(data, schema): """Converts decimal.Decimal to fixed length bytes array""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) size = schema['size'] # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 size_in_bits = size * 8 offset_bits = size_in_bits - bits_req mask = 2 ** size_in_bits - 1 bit = 1 for i in range(bits_req): mask ^= bit bit <<= 1 if bits_req < 8: bytes_req = 1 else: bytes_req = bits_req // 8 if bits_req % 8 != 0: bytes_req += 1 tmp = MemoryIO() if sign: unscaled_datum = (1 << bits_req) - unscaled_datum unscaled_datum = mask | unscaled_datum for index in range(size - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) else: for i in range(offset_bits // 8): tmp.write(mk_bits(0)) for index in range(bytes_req - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
5dc5ae8355842e175e1fa83394a63b37c04bdade
3,638,370
from typing import Any def device_traits() -> dict[str, Any]: """Fixture that sets default traits used for devices.""" return {"sdm.devices.traits.Info": {"customName": "My Sensor"}}
1ccaeac4a716706915654d24270c24dac0210977
3,638,371
def calculate_equivalent_diameter(areas): """Calculate the equivalent diameters of a list or numpy array of areas. :param areas: List or numpy array of areas. :return: List of equivalent diameters. """ areas = np.asarray(areas) diameters = np.sqrt(4 * areas / np.pi) return diameters.tolist()
a353883cf148819d9f298167e73acd60b89720e5
3,638,373
def truncation_error(stencil: list, deriv: int, interval: str = DEFAULT_INTERVAL): """ derive the leading-order of error term in the finite difference equation based on the given stencil. Args: stencil (list of int): relative point numbers used for discretization. deriv (int): order of derivative. interval (str, optional): an interval symbol like `dx`. Defaults to DEFAULT_INTERVAL. Returns: sympy Expr: the leading-order of error term Examples: >>> from dictos import finite_difference as fd >>> fd.truncation_error([-1, 0, 1], deriv=1) -f^(3)*h**2/6 >>> fd.truncation_error([-1, 0, 1], deriv=2) -f^(4)*h**2/12 >>> fd.truncation_error([-2, -1, 0, 1, 2], deriv=1) f^(5)*h**4/30 >>> fd.truncation_error([-2, -1, 0, 1, 2], deriv=2) f^(6)*h**4/90 """ coef = coefficients(stencil, deriv) # derive finite difference coefficients based on given stencil x_set = create_coordinate_symbols(stencil, interval=interval) # create set of coordinate symbols from stencil. # [-2, -1, 0, 1, 2] -> [-2*h, -h, 0, h, 2*h] num_term = len(x_set) + deriv f_ts = [taylor_series(x, num_term) for x in x_set] # calculate Taylor series around points in x_set. fd_eq = dot_product(coef, f_ts) # calculate weighted sum of Taylor series. # for instance, 2nd-order 3-point central finite difference # for 1st derivative is # fd_eq [= f(h)/2 - f(-h)/2)] = f^(1)*h + f^(3)*h**3/6 + ... h = sp.symbols(interval) return sp.expand( sp.simplify( derivative_symbol(DEFAULT_DIFFERENTIAND, deriv) - sp.nsimplify(fd_eq / h ** deriv, rational=True, tolerance=1e-10) ) ).as_leading_term(h) # extract the leading-order of errer term. # A finite difference formulation with error term is, for instance, # f^(1) = (f(h) - f(-h))/(2*h) - f^(3)*h**3/6 - ... # to extract error terms, reformulate fd_eq as # f^(1) - fd_eq/h**1 = - f^(3)*h**3/6 - ...
e3b8d312d551ed88ead3690b285659d56865e6e0
3,638,374
def cmd_renderurl(cfg, command, argv): """Renders a single url of your blog to stdout.""" parser = build_parser('%prog renderurl [options] <url> [<url>...]') parser.add_option('--headers', action='store_true', dest='headers', default=False, help='Option that causes headers to be displayed ' 'when rendering a single url.') (options, args) = parser.parse_args(argv) if not args: parser.print_help() return 0 for url in args: p = build_douglas(cfg) base_url = cfg['base_url'] if url.startswith(base_url): url = url[len(base_url):] p.run_render_one(url, options.headers) return 0
2073c71c459357c0b6a9661596cad34196fd6c24
3,638,376
def combine_expressions(expressions, relation='AND', licensing=Licensing()): """ Return a combined license expression string with relation, given a list of license expressions strings. For example: >>> a = 'mit' >>> b = 'gpl' >>> combine_expressions([a, b]) 'mit AND gpl' >>> assert 'mit' == combine_expressions([a]) >>> combine_expressions([]) >>> combine_expressions(None) >>> combine_expressions(('gpl', 'mit', 'apache',)) 'gpl AND mit AND apache' """ if not expressions: return if not isinstance(expressions, (list, tuple)): raise TypeError( 'expressions should be a list or tuple and not: {}'.format( type(expressions))) # Remove duplicate element in the expressions list expressions = list(dict((x, True) for x in expressions).keys()) if len(expressions) == 1: return expressions[0] expressions = [licensing.parse(le, simple=True) for le in expressions] if relation == 'OR': return str(licensing.OR(*expressions)) else: return str(licensing.AND(*expressions))
8955522546a8b803caf0b1c6a3c6e8752cb35a19
3,638,377
import sqlite3 def get_prof_details(prof_id): """ Returns the details of the professor in same order as DB. """ cursor = sqlite3.connect('./db.sqlite3').cursor() cursor.execute("SELECT * FROM professor WHERE prof_id = ?;", (prof_id)) return cursor.fetchone()
668652474009abdda36d3e97fb5d30074f0a2755
3,638,379
def available_help(mod, ending="_command"): """Returns the dochelp from all functions in this module that have _command at the end.""" help_text = [] for key in mod.__dict__: if key.endswith(ending): name = key.split(ending)[0] help_text.append(name + ":\n" + mod.__dict__[key].__doc__) return help_text
9afa1525c016aa74dd4b3eb91851890da3590524
3,638,382
from functools import reduce import operator def __s_polynomial(g, h): """ Computes the S-polynomial of g, h. The S-polynomial is a polynomial built explicitly so that the leading terms cancel when combining g and h linearly. """ deg_g = __multidegree(g) deg_h = __multidegree(h) max_deg = map(max, zip(deg_g, deg_h)) R = g.parent() # Builds a polynomial with the variables raised to max_deg, in order vars = map(R, R.variable_names()) x_pow_max_deg = reduce(operator.mul, [x ** d for (d, x) in zip(max_deg, vars)], R(1)) quo_g, _ = x_pow_max_deg.quo_rem(g.lt()) quo_h, _ = x_pow_max_deg.quo_rem(h.lt()) return quo_g * g - quo_h * h
49aa5b5b1dbebde1309aaa9fd2cb5947a010709f
3,638,383
def generate_map_chunk(size_x: int, size_y: int, biome_type: str, x_offset: int = 0, y_offset: int = 0): """ Function responsible for generating map chunk in specified or random biome type, map chunk is basically a rectangular part of a map; generated array is basically nested list representing a 2d-array, where fields are integers indicating elevation of certain point. For generating map chunk I use OpenSimplex noise generator, which is a deterministic coherent (gradient) noise generator, The chunk is randomised by chosing random seed for the generator object initialisation. Args: size_x (int): horizontal size of chunk in map pixels size_y (int): vertical size of chunk in map pixels biome_type (str): string indicating which biome type to use x_offset (int): integer indicating horizontal offset used in generating Simplex Noise y_offset (int): integer indicating vertical offset used in generating Simplex Noise Returns: map_array (:obj:`list` of :obj:`list` of :obj:`int`): list of lists containing elevation number for specified coordinates """ map_array = [] for _ in range(size_x): map_array_part = [] for _ in range(size_y): map_array_part.append(127) map_array.append(map_array_part) noise_maker = OpenSimplex(randint(-10000, 10000)) for x in range(size_x): for y in range(size_y): for octave in range(OCTAVES): if map_array[x][y] > LEVELS.water or octave < 1: map_array[x][y] = int_median_cutter(0, 255, map_array[x][y]+OCTAVE_AMPLITUDE[octave]*\ noise_maker.noise2d((x+x_offset)/OCTAVE_WAVELENGTH[octave], (y+y_offset)/OCTAVE_WAVELENGTH[octave])) if biome_type == 'random': biome_type = ['ocean_islands', 'ocean', 'high_mountains', 'default'][randint(0,3)] if biome_type == 'ocean_islands': for x in range(size_x): for y in range(size_y): map_array[x][y] = max(map_array[x][y] - 100, 20) elif biome_type == 'ocean': for x in range(size_x): for y in range(size_y): map_array[x][y] = max(int(map_array[x][y]*0.3125), 20) elif biome_type == 'high_mountains': for x in range(size_x): for y in range(size_y): map_array[x][y] = min(map_array[x][y] + 100 + 10 * noise_maker.noise2d(x/OCTAVE_WAVELENGTH[1], y/OCTAVE_WAVELENGTH[1]), 250) return map_array
42863b7058bfce23b1123c14db562483254bdc21
3,638,384
def test_process_cycle(zs2_file_name, verbose=True): """This is a test to check if util output changed in an incompatible manner. A zs2 file is read, converted to XML, and back-converted to a raw datastream.""" if verbose: print('Decoding %s...' % zs2_file_name) data_stream = _parser.load(zs2_file_name) input_fingerprint = fingerprint(data_stream) if verbose: print(' Data fingerprint %s' % input_fingerprint) xml_data = data_stream_to_xml(data_stream) if verbose: print(' Length of XML: %.0f kB' % (len(xml_data)/1024.)) if verbose: print('Encoding XML to zs2...') enc_data_stream = xml_to_data_stream(xml_data) output_fingerprint = fingerprint(enc_data_stream) if verbose: print(' Data fingerprint: %s' % output_fingerprint) if input_fingerprint != output_fingerprint: raise ValueError('Decode/Encode cycle of %s is unsuccessful.' % zs2_file_name) return input_fingerprint == output_fingerprint
6417362a9bdaa4086865f0b8fc510dda186534f7
3,638,386
def get_dev_risk(weight, error): """ :param weight: shape [N, 1], the importance weight for N source samples in the validation set :param error: shape [N, 1], the error value for each source sample in the validation set (typically 0 for correct classification and 1 for wrong classification) """ N, d = weight.shape _N, _d = error.shape assert N == _N and d == _d, "dimension mismatch!" weighted_error = weight * error cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1] var_w = np.var(weight, ddof=1) eta = -cov / var_w return np.mean(weighted_error) + eta * np.mean(weight) - eta
7278a8827dd48c341d9f294a3fed3a8b2e3c71ae
3,638,387
import torch def skewness_fn(x, dim=1): """Calculates skewness of data "x" along dimension "dim".""" std, mean = torch.std_mean(x, dim) n = torch.Tensor([x.shape[dim]]).to(x.device) eps = 1e-6 # for stability sample_bias_adjustment = torch.sqrt(n * (n - 1)) / (n - 2) skewness = sample_bias_adjustment * ( (torch.sum((x.T - mean.unsqueeze(dim).T).T.pow(3), dim) / n) / std.pow(3).clamp(min=eps) ) return skewness
ae0bdea16c1461a2e407ed57279557bc8c7f56de
3,638,388
import random def encrypt(message): """ Self-developed encryption method that uses base conversion """ base = random.randint(3, 9) number_list = [] for i in message: number_list.append(keys.index(i)+1) converted_number_list = [] for i in number_list: converted_number_list.append(convert(i, base)) encryption_list = [] for number in converted_number_list: cur = [] for digit in str(number): cur.append(chars[int(digit)]) encryption_list.append(cur) string_encryption_list = [] for i in encryption_list: string_encryption_list.append(''.join([str(x) for x in i])) converted_base_number = convert(123, base) encrypted_base_list = [] for i in str(converted_base_number): if i == '0': encrypted_base_list.append('?') elif i == '1': encrypted_base_list.append('{') elif i == '2': encrypted_base_list.append('[') elif i == '3': encrypted_base_list.append('/') elif i == '4': encrypted_base_list.append('$') elif i == '6': encrypted_base_list.append('@') elif i == '7': encrypted_base_list.append('>') return insert(list('|'.join(string_encryption_list)), encrypted_base_list)
967d45341fb8a5ec87f946ba6fc0a603f491485e
3,638,389
def get_signature_algorithm(algorithm_type_string): """convert a string into a key_type (TFTF_SIGNATURE_TYPE_xxx) returns a numeric key_type, or raises an exception if invalid """ try: return TFTF_SIGNATURE_ALGORITHMS[algorithm_type_string] except: raise ValueError("Unknown algorithm type: '{0:s}'". format(algorithm_type_string))
41ca226dc7e6c1c0f8d5b8592803d6555630902c
3,638,390
def corrgroups60(display=False): """ A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 # build a correlation matrix with groups of 3 tightly correlated features x = np.ones(M) mu = np.zeros(M) C = np.eye(M) for i in range(0,30,3): C[i,i+1] = C[i+1,i] = 0.99 C[i,i+2] = C[i+2,i] = 0.99 C[i+1,i+2] = C[i+2,i+1] = 0.99 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X_centered = X_start - X_start.mean(0) Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0] W = np.linalg.cholesky(np.linalg.inv(Sigma)).T X_white = np.matmul(X_centered, W.T) assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data # create the final data X_final = np.matmul(X_white, np.linalg.cholesky(C).T) X = X_final y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
5a80116890ff262a164f48421871107c4cdaf8a6
3,638,392
def alpha_nu_gao08(profile, **kwargs): """log normal distribution of alpha about the alpha--peak height relation from Gao+2008""" z = kwargs["z"] alpha = kwargs["alpha"] # scatter in dex if "sigma_alpha" in kwargs: sigma_alpha = kwargs["sigma_alpha"] else: # take scatter from Dutton & Maccio 2014 sigma_alpha = 0.16 + 0.03 * z try: M = profile.MDelta(z, "vir") nu = peakHeight(M, z) except: # can't find peak height, reject model return -np.inf alpha_model = 0.155 + 0.0095 * nu**2 return lnlike_gauss(np.log10(alpha_model), np.log10(alpha), sigma_alpha)
393fdc6c87d4bf61fc367e7f9033bac24b9d6cea
3,638,393
import base64 def get_feed_entries(helper, name, stats): """Pulls the indicators from the minemeld feed.""" feed_url = helper.get_arg('feed_url') feed_creds = helper.get_arg('credentials') feed_headers = {} # If auth is specified, add it as a header. if feed_creds is not None: auth = '{0}:{1}'.format(feed_creds['username'], feed_creds['password']) auth = base64.encodestring(auth).replace('\n', '') feed_headers['Authorization'] = 'Basic {0}'.format(auth) # Pull events as json. resp = helper.send_http_request( url=feed_url, method='GET', parameters={'v': 'json', 'tr': 1}, headers=feed_headers) # Raise exceptions on problems. resp.raise_for_status() feed_entries = resp.json() # Return the normalized events to be saved to the kv store. return normalized(name, feed_entries)
e881eebaaa9c31bc8d0abdd8b8f4aaeb9efcffe6
3,638,394
def get_skeleton_definition(character): """ Returns skeleton definition of the given character :param character: str, HIK character name :return: dict """ hik_bones = dict() hik_count = maya.cmds.hikGetNodeCount() for i in range(hik_count): bone = get_skeleton_node(character, i) if not bone: continue hik_name = maya.cmds.GetHIKNodeName(i) hik_bones[hik_name] = {'bone': bone, 'hik_id': i} return hik_bones
f76d4613f3a8adec649ea689d049ccff2966783c
3,638,395
def get_f_a_st( fuel="C3H8", oxidizer="O2:1 N2:3.76", mech="gri30.cti" ): """ Calculate the stoichiometric fuel/air ratio of an undiluted mixture using Cantera. Calculates using only x_fuel to allow for compound oxidizer (e.g. air) Parameters ---------- fuel : str oxidizer : str mech : str mechanism file to use Returns ------- float stoichiometric fuel/air ratio """ if oxidizer.lower() == "air": oxidizer = "O2:1 N2:3.76" gas = ct.Solution(mech) gas.set_equivalence_ratio( 1, fuel, oxidizer ) x_fuel = gas.mole_fraction_dict()[fuel] return x_fuel / (1 - x_fuel)
ecd711d8a1d5499e47ccbedebfb5641aec7c7a8b
3,638,396
def get_parser_args(args=None): """ Transform args (``None``, ``str``, ``list``, ``dict``) to parser-compatible (list of strings) args. Parameters ---------- args : string, list, dict, default=None Arguments. If dict, '--' are added in front and there should not be positional arguments. Returns ------- args : None, list of strings. Parser arguments. Notes ----- All non-strings are converted to strings with :func:`str`. """ if isinstance(args,str): return args.split() if isinstance(args,list): return list(map(str,args)) if isinstance(args,dict): toret = [] for key in args: toret += ['--%s' % key] if isinstance(args[key],list): toret += [str(arg) for arg in args[key]] else: val = str(args[key]) if val: toret += [val] return toret return args
41b607a6ebf12526efcd38469192b398419327bf
3,638,397
def parse_time_to_min(time): """Convert a duration to an integer in minutes. Example ------- >>> parse_time_to_min("2m 30s") 2.5 """ if " " in time: return sum([parse_time_to_min(t) for t in time.split(" ")]) time = time.strip() for unit, value in time_units.items(): if time.endswith(unit): number = float(time.replace(unit, "")) return number * value / time_units["m"]
6bf9656694ba4787bf9fd3e7c269d9c84e3ed143
3,638,398
def relate_stream_island(stream_layer, island_layer): """ Return the streams inside or delimiting islands. The topology is defined by DE-9IM matrices. :param stream_layer: the layer of the river network :stream_layer type: QgisVectorLayer object (lines) :param island_layer: the layer of the islands :island_layer type: QgisVectorLayer object (polygons) :return: list of lists of all the streams that make up the islands :rtype: list of lists of QgisFeatures objects """ # Get the features of the stream and island layers streams_list = list(stream_layer.dataProvider().getFeatures()) islands_list = list(island_layer.dataProvider().getFeatures()) # Initialise output list streams_in_island_list = [] for island in islands_list: # Initialise list of output list island_list = [] # Get the AbstractGeometry object for the current island current_island_abstract_geom = island.geometry().constGet() for stream in streams_list: # Get the AbstractGeometry object for the current stream current_stream_abstract_geom = stream.geometry().constGet() # Create QgsGeometryEngine object engine = QgsGeometry.createGeometryEngine(current_stream_abstract_geom) # Prepares the geometry, so that subsequent calls to spatial relation methods are much faster engine.prepareGeometry() # Test if the current stream fits with the DE-9IM matrices if engine.relatePattern(current_island_abstract_geom,'F1FF0F212') or engine.relatePattern(current_island_abstract_geom,'1FF00F212') or engine.relatePattern(current_island_abstract_geom,'1FF0FF212') or engine.relatePattern(current_island_abstract_geom,'1FFF0F212'): # If so, then the current stream is appended to the output list island_list.append(stream) streams_in_island_list.append(island_list) return streams_in_island_list
1d6c90349808f6364cc8b1461b09a0c31df6d9d3
3,638,399
def stringify_array(v, maxDepth=None, maxItems=-1, maxStrlen=-1): """ Convert a dict to a string representation. Parameters: d(dict) : the data dict to convert maxDepth (int|None): if > 0, then ellipsise structures deeper than this maxItems (int|-1): if > 0, then ellipsise lists longer than this or dicts with more than this many items maxStrlen (int|-1): if > 0, then ellipsise strings longer than this Returns: tuple(depth:int, str): the depth (explored) of the structure and the string representation of the data """ return _stringify_array(v, maxDepth=maxDepth, maxItems=maxItems, maxStrlen=maxStrlen)
17bf5008c7a263c102f0fa03fdcc708c0fcc9a0f
3,638,400
import pickle def rpickle(picke_file, state=None): """ Save the state of the gps file treated """ logger.warning('Running rpickle ...') results = [] if picke_file.isfile(): with open(picke_file, 'rb') as read_pickle: results += pickle.load(read_pickle) # print results return results
a3f0cc46d6992032d008053e679ec75c64805141
3,638,401
def should_print(test_function): """should_print is a helper for testing code that uses print For example, if you had a function like this: ```python def hello(name): print('Hello,', name) ``` You might want to test that it prints "Hello, Nate" if you give it the name "Nate". To do that, you could write the following test. ```python @should_print def test_hello_nate(output): hello("Nate") assert output == "Hello, Nate" ``` There are a couple pieces of this: - Put `@should_print` directly above the test function. - Add an `output` parameter to the test function. - Assert against `output` """ return mock.patch("sys.stdout", new_callable=FakeStringIO)(test_function)
16a1f675d3dced411fe5a6ffdc566db61ca7890f
3,638,403
def produce_segmentation(indices: list[list[int]], wav_name: str) -> list[dict]: """produces the segmentation yaml content from the indices of the probabilistic_dac Args: indices (list[list[int]]): output of the probabilistic_dac function wav_name (str): the name of the wav file (with the .wav suffix) Returns: list[dict]: the content of the segmentation yaml """ talk_segments = [] for ind in indices: size = len(ind) / TARGET_SAMPLE_RATE if size < NOISE_THRESHOLD: continue start = ind[0] / TARGET_SAMPLE_RATE talk_segments.append( { "duration": round(size, 6), "offset": round(start, 6), "rW": 0, "uW": 0, "speaker_id": "NA", "wav": wav_name, } ) return talk_segments
cd8267e90f5e69589325a4e261d3f8136b36cc53
3,638,405
def trac_get_tracs_for_object(obj, user=None, trac_type=None): """ Returns tracs for a specific object. """ content_type = ContentType.objects.get_for_model(type(obj)) qs = Trac.objects.filter(content_type=content_type, object_id=obj.pk) if user: qs = qs.filter(user=user) if trac_type: qs = qs.filter(trac_type=trac_type) return qs
9617fc5e417e40fb27bfe90b2f87434902cdb70b
3,638,406
def size_from_ftp(ftp, url): """Get size of a file on an FTP server. Parameters ---------- ftp : FTP An open ftplib FTP session. url : str File URL. Returns ------- int Size in bytes. """ url = urlparse(url) return ftp.size(url.path)
50d21fa95669a9863b32de3a67eda78de713fe7c
3,638,407
def set_name_line(hole_lines, name): """Define the label of each line of the hole Parameters ---------- hole_lines: list a list of line object of the slot name: str the name to give to the line Returns ------- hole_lines: list List of line object with label """ for ii in range(len(hole_lines)): hole_lines[ii].label = name + "_" + str(ii) return hole_lines
a57667f269dac62d39fa127b2a4bcd438a8a989b
3,638,408
import torch def dist_to_boxes(points, boxes): """ Calculates combined distance for each point to all boxes :param points: (N, 3) :param boxes: (N, 7) [x, y, z, h, w, l, ry] :return: distances_array: (M) torch.Tensor of [(N), (N), ...] distances """ distances_array = torch.Tensor([]) box_corners = kitti_utils.boxes3d_to_corners3d(boxes) for box in box_corners: minX = min(box[:, 0]) minY = min(box[:, 1]) minZ = min(box[:, 2]) maxX = max(box[:, 0]) maxY = max(box[:, 1]) maxZ = max(box[:, 2]) centroid = np.array([(maxX + minX) / 2, (maxY + minY) / 2, (maxZ + minZ) / 2]) dists_to_curr_box = dist_to_box_centroid(torch.from_numpy(points), torch.from_numpy(centroid)).reshape(1, len(points)) distances_array = torch.cat((distances_array.float(), dists_to_curr_box.float()), 0) return distances_array
b3305ec8a4c8d5e0d5cf520e9e22d2c5377fe1de
3,638,409
def blackwhite2D(data,xsize=None,ysize=None,show=1): """blackwhite2D(data,xsize=None,ysize=None,show=1)) - display list or array data as black white image default popup window with (300x300) pixels """ if type(data) == type([]): data = array(data) w,h = data.shape[1],data.shape[0] d = preprocess(data) im = Image.new('L',(w,h)) for j in range(h): for i in range(w): ij = i+j*w im.putpixel((i,j),d[j][i]) if show: if xsize == None: xsize = 300 if ysize == None: ysize = 300 resizeImage(im,xsize,ysize) return im
78a76fab9f3eb989697b695c8d7b82c877f8dc9a
3,638,411
def contains_digit(s): """Find all files that contain a number and store their patterns. """ isdigit = str.isdigit return any(map(isdigit, s))
941bcee8b6fbca6a60a8845f88a3b5765e3711bb
3,638,412
def to_signed(dtype): """ Return dtype that can hold data of passed dtype but is signed. Raise ValueError if no such dtype exists. Parameters ---------- dtype : `numpy.dtype` dtype whose values the new dtype needs to be able to represent. Returns ------- `numpy.dtype` """ if dtype.kind == "u": if dtype.itemsize == 8: raise ValueError("Cannot losslessly convert uint64 to int.") dtype = "int{:d}".format(min(dtype.itemsize * 2 * 8, 64)) return np.dtype(dtype)
7be15d324eef6f9686a5866a92ad365a67949424
3,638,413
def listen_for_wakeword(): """Continuously detecting the appeareance of wakeword from the audio stream. Higher priority than the listen() function. Returns: (bool): return True if detected wakeword, False otherwise. """ gotWakeWord = core.listen_for_wakeword() return gotWakeWord
49f600ed303fb9bea11cb9247653c66272fc5491
3,638,414
from scipy.stats import kurtosis def kurtosis(x,y): """ Calculate kurtosis of the probability distribution of the forecast error if an observation and forecast vector are given. Both vectors must have same length, so pairs of elements with same index are compared. Description: Kurtosis is a measure of the magnitude of the peak of the distribution, or, conversely, how fat-tailed the distribution is, and is the fourth standardized moment The difference between the kurtosis of a sample distribution and that of the normal distribution is known as the excess kurtosis. In the subsequent anIn [142]: U alysis, the term kurtosis will be treated synonymously with excess kurtosis. A distribution with a positive kurtosis value is known as leptokurtic, which indicates a peaked distribution; whereas a negative kurtosis indicates a flat data distribution, known as platykurtic. The pronounced peaks of the leptokurtic distribution represent a large number of very small forecast errors :param x: vector of observations :param y: vector of forecasts :returns: Kurtosis """ return kurtosis(x-y)
b4242f58db8a48dbe9bec03ec641ae78858c28f7
3,638,416
def preprocess_text(sentence): """Handle some weird edge cases in parsing, like 'i' needing to be capitalized to be correctly identified as a pronoun""" cleaned = [] words = sentence.split(' ') for w in words: if w == 'i': w = 'I' if w == "i'm": w = "I'm" cleaned.append(w) return ' '.join(cleaned)
4e1d69eaf0adc1ede6bc67563e499602e320e76b
3,638,417
def csr_scale_rows(*args): """ csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, signed char [] Ax, signed char const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, unsigned char [] Ax, unsigned char const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, short [] Ax, short const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, unsigned short [] Ax, unsigned short const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, int [] Ax, int const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, unsigned int [] Ax, unsigned int const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, long long [] Ax, long long const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, unsigned long long [] Ax, unsigned long long const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, float [] Ax, float const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, double [] Ax, double const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, long double [] Ax, long double const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx) csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj, npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, signed char [] Ax, signed char const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned char [] Ax, unsigned char const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, short [] Ax, short const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned short [] Ax, unsigned short const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, int [] Ax, int const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned int [] Ax, unsigned int const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, long long [] Ax, long long const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned long long [] Ax, unsigned long long const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, float [] Ax, float const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, double [] Ax, double const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, long double [] Ax, long double const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx) csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj, npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx) """ return _csr.csr_scale_rows(*args)
887f6c51d297649232d6fd297380c551dbb47008
3,638,420
def complexity_hjorth(signal): """**Hjorth's Complexity and Parameters** Hjorth Parameters are indicators of statistical properties initially introduced by Hjorth (1970) to describe the general characteristics of an EEG trace in a few quantitative terms, but which can applied to any time series. The parameters are activity, mobility, and complexity. NeuroKit returns complexity directly in the output tuple, but the other parameters can be found in the dictionary. * The **activity** parameter is simply the variance of the signal, which corresponds to the mean power of a signal (if its mean is 0). .. math:: Activity = \\sigma_{signal}^2 * The **complexity** parameter gives an estimate of the bandwidth of the signal, which indicates the similarity of the shape of the signal to a pure sine wave (for which the value converges to 1). In other words, it is a measure of the "excessive details" with reference to the "softest" possible curve shape. The Complexity parameter is defined as the ratio of the mobility of the first derivative of the signal to the mobility of the signal. .. math:: Complexity = \\sigma_{d}/ \\sigma_{signal} * The **mobility** parameter represents the mean frequency or the proportion of standard deviation of the power spectrum. This is defined as the square root of variance of the first derivative of the signal divided by the variance of the signal. .. math:: Mobility = \\frac{\\sigma_{dd}/ \\sigma_{d}}{Complexity} :math:`d` and :math:`dd` represent the first and second derivatives of the signal, respectively. Hjorth (1970) illustrated the parameters as follows: .. figure:: ../img/hjorth1970.png :alt: Figure from Hjorth (1970). :target: http://dx.doi.org/10.1016/0013-4694(70)90143-4 See Also -------- .fractal_petrosian Parameters ---------- signal : Union[list, np.array, pd.Series] The signal (i.e., a time series) in the form of a vector of values. Returns ------- hjorth : float Hjorth's Complexity. info : dict A dictionary containing the additional Hjorth parameters, such as ``"Mobility"`` and ``"Activity"``. Examples ---------- .. ipython:: python import neurokit2 as nk # Simulate a signal with duration os 2s signal = nk.signal_simulate(duration=2, frequency=5) # Compute Hjorth's Complexity complexity, info = nk.complexity_hjorth(signal) complexity info References ---------- * Hjorth, B (1970) EEG Analysis Based on Time Domain Properties. Electroencephalography and Clinical Neurophysiology, 29, 306-310. http://dx.doi.org/10.1016/0013-4694(70)90143-4 """ # Sanity checks if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1: raise ValueError( "Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet." ) # Calculate derivatives dx = np.diff(signal) ddx = np.diff(dx) # Calculate variance and its derivatives x_var = np.var(signal) # = activity dx_var = np.var(dx) ddx_var = np.var(ddx) # Mobility and complexity mobility = np.sqrt(dx_var / x_var) complexity = np.sqrt(ddx_var / dx_var) / mobility return complexity, {"Mobility": mobility, "Activity": x_var}
af5b5fb8925055da4cf48facadd1bed257e40f76
3,638,422
import pandas def load_gecko(): """ target variable is column "A375 Percent rank" """ data_nonessential = pandas.read_excel(settings.pj(settings.offtarget_data_dir, 'GeCKOv2_Non_essentials_Achilles_A375_complete.xls')) #(4697, 31) data_all_A375 = pandas.read_csv(settings.pj(settings.offtarget_data_dir, 'GeckoAvanaSameUnits/GeCKOv2_DMSO_lentiGuide_A375.txt', sep="\t")) # (121964, 25) guides = data_nonessential['sgRNA Sequence'].values data = data_nonessential[data_all_A375["sgRNA Sequence"].isin(guides)] #missing_guides = set(guides).difference(set(data_all_A375["sgRNA Sequence"].values)) #tmp = set(data_all_A375["sgRNA Sequence"].values).difference(set(guides)) return data
31c2db07261fb1b242f4c52808c3b7e6312b1e54
3,638,423
def get_sample_eclat(name): """Read a tweet sample from a sample file and return it in a format eclat can process. """ sampleFile = open(name) X = [] Y = [] line = sampleFile.readline() while line != '': row = line.split() Y.append(int(row[0])) x = [] if int(row[3]) < 50: x.append('#followers: 0-49') elif int(row[3]) < 100: x.append('#followers: 50-99') elif int(row[3]) < 500: x.append('#followers: 100-499') elif int(row[3]) < 1000: x.append('#followers: 500-999') elif int(row[3]) < 5000: x.append('#followers: 1000-4999') elif int(row[3]) < 10000: x.append('#followers: 5000-9999') else: x.append('#followers: 10000+') for i in range(4, 12): if int(row[i]): x.append(cols[i - 3]) if int(row[12]) == 0: x.append('Sentiment: Negative') elif int(row[12]) == 1: x.append('Sentiment: Neutral') else: x.append('Sentiment: Positive') x.append('Topic: ' + row[13]) X.append(x) for _ in range(8): sampleFile.readline() line = sampleFile.readline() return X, Y
dd5daa2cd19b087c4b59379b8d3b2c2ea9ec27de
3,638,424
from datetime import datetime def submission_storage_path(instance, filename): """ Function DocString """ string = '/'.join(['submissions', instance.submission_user.user_nick, str(instance.submission_question.question_level), str(instance.submission_question.question_level_id)]) string += '/'+datetime.datetime.now().strftime("%I:%M%p-%m-%d-%Y") string += filename return string
587785869da8906234bb572e9d635a892dc3270b
3,638,425
def distance_to_center(n): """Return Manhattan distance to center of spiral of length <n>.""" dist = distances_to_center() for _ in range(n - 1): next(dist) return next(dist)
1301d0370a3f3dca72fb003073522376fd0790c0
3,638,426
from typing import List from typing import Mapping from typing import Any from typing import Optional import inspect async def _assert_preconditions_async(preconditions: List[List[Contract]], resolved_kwargs: Mapping[str, Any]) -> Optional[BaseException]: """Assert that the preconditions of an async function hold.""" exception = None # type: Optional[BaseException] # Assert the preconditions in groups. This is necessary to implement "require else" logic when a class # weakens the preconditions of its base class. for group in preconditions: exception = None for contract in group: assert exception is None, "No exception as long as pre-condition group is satisfiable." condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs) if inspect.iscoroutinefunction(contract.condition): check = await contract.condition(**condition_kwargs) else: check_or_coroutine = contract.condition(**condition_kwargs) if inspect.iscoroutine(check_or_coroutine): check = await check_or_coroutine else: check = check_or_coroutine if not_check(check=check, contract=contract): exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs) break # The group of preconditions was satisfied, no need to check the other groups. if exception is None: break return exception
d89c355ed56e350a619e1d7324c8341bb74f827c
3,638,428
import re def moveGeneratorFromStrList (betaStringList, string_mode = True): """ generate the final output of move sequence as a list of dictionary. Input : ['F5-LH', 'F5-RH', 'E8-LH', 'H10-RH', 'E13-LH', 'I14-RH', 'E15-LH', 'G18-RH'] Length of the list: how many moves in this climb to the target hold. Target holds run from the third order hold to the last hold Dictionary involves all information needed to evaluate grade/ analyze style for human. This is a basic building block of the route. TargetHoldString : "A10" for example TargetHoldHand: "RH" for example TargetHoldScore: the difficulty to hold on the target hold applying the "RH" operation RemainingHoldString : "A10" for example RemainingHoldHand: RemainingHoldScore MovingHoldString : A10 for example MovingHoldHand: MovingHoldScore: dxdyMtoT: vector Target - moving hand. This distance's physical meaning is the real hand traveling range during the move dxdyRtoT: vector Target - Remaining hand. This distance's physical meaning is the inter distance between two remaining hand after finish the move FootPlacement: [0,0,0,0,1,1,0] means there is hold on region 5 and 6. MoveSuccessRate: estimation of how easy of this move if coordinate_mode = True, String will be coordinate form and """ # From List of string to hand sequence and op sequence handSequence = [] handOperatorSequence = [] xSequence = [] ySequence = [] for hold in betaStringList: characterAndNum = [re.findall(r'(\w+?)(\d+)', hold.split("-")[0])[0]] handOp = hold.split("-")[1] alphabateList = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"] handOperatorSequence.append(handOp) xSequence.append(alphabateList.index(characterAndNum[0][0]) ) ySequence.append(int(characterAndNum[0][1]) - 1) outputDictionaryList = [] numOfMoves = len(handOperatorSequence) - 2 # calculate from the third hold to the end hold (no final match) # loop over holds from third one to the finish hold (rank from 3 to end). In each move, this is the hold defined as target hold for rank in range(2, len(handOperatorSequence)): # Renew a dictionary moveDictionary = {} # Define target hold targetHoldHand = handOperatorSequence[rank] coordinateOfTarget = (xSequence[rank], ySequence[rank]) if string_mode == False: moveDictionary["TargetHoldString"] = coordinateOfTarget if targetHoldHand == "LH": moveDictionary["TargetHoldHand"] = 0 # LH ->0 else: moveDictionary["TargetHoldHand"] = 1 # RH -> 1 else: moveDictionary["TargetHoldString"] = coordinateToString(coordinateOfTarget) moveDictionary["TargetHoldHand"] = targetHoldHand moveDictionary["TargetHoldScore"] = holdScoreUseCordAndOp(coordinateOfTarget, targetHoldHand) # Could you file I/O excile file L/R hand difficulty? # Define remaining hold listBeforeTargetHold = handOperatorSequence[0:rank] remainingHoldHand = oppositehand(targetHoldHand) order = int(''.join(listBeforeTargetHold).rindex(remainingHoldHand)/2) # remaining hold is the last hold with opposite hand in the sequence before Target hand coordinateOfRemaining = (xSequence[order], ySequence[order]) if string_mode == False: moveDictionary["RemainingHoldString"] = coordinateOfRemaining moveDictionary["RemainingHoldHand"] = 1 - moveDictionary["TargetHoldHand"] else: moveDictionary["RemainingHoldString"] = coordinateToString(coordinateOfRemaining) moveDictionary["RemainingHoldHand"] = remainingHoldHand moveDictionary["RemainingHoldScore"] = holdScoreUseCordAndOp(coordinateOfRemaining, remainingHoldHand) moveDictionary["dxdyRtoT"] = (coordinateOfTarget[0] - coordinateOfRemaining[0], coordinateOfTarget[1] - coordinateOfRemaining[1]) # Define moving hold movingHoldHand = targetHoldHand order = int(''.join(listBeforeTargetHold).rindex(movingHoldHand)/2) # remaining hold is the last hold with opposite hand in the sequence before Target hand coordinateOfMoving = (xSequence[order], ySequence[order]) if string_mode == False: moveDictionary["MovingHoldString"] = coordinateOfMoving moveDictionary["MovingHoldHand"] = moveDictionary["TargetHoldHand"] else: moveDictionary["MovingHoldString"] = coordinateToString(coordinateOfMoving) moveDictionary["MovingHoldHand"] = movingHoldHand moveDictionary["MovingHoldScore"] = holdScoreUseCordAndOp(coordinateOfMoving, movingHoldHand) moveDictionary["dxdyMtoT"] = (coordinateOfTarget[0] - coordinateOfMoving[0], coordinateOfTarget[1] - coordinateOfMoving[1]) # Define foot region location x0, y0 = int(coordinateOfRemaining[0]), int(coordinateOfRemaining[1]) region0 = [(x,y) for x in range(x0 - 4, x0 - 1) for y in range(y0 - 3, y0 - 1)] region1 = [(x,y) for x in range(x0 - 1, x0 + 2) for y in range(y0 - 3, y0 - 1)] region2 = [(x,y) for x in range(x0 + 2, x0 + 5) for y in range(y0 - 3, y0 - 1)] region3 = [(x,y) for x in range(x0 - 5, x0 - 1) for y in range(y0 - 6, y0 - 3)] region4 = [(x,y) for x in range(x0 - 1, x0 + 2) for y in range(y0 - 6, y0 - 3)] region5 = [(x,y) for x in range(x0 + 2, x0 + 6) for y in range(y0 - 6, y0 - 3)] region6 = [(x,y) for x in range(x0 - 2, x0 + 3) for y in range(y0 - 9, y0 - 6)] # check is there foot holds in the region footholdList = [0] * 7 regionList = [region0, region1, region2, region3, region4, region5, region6] for holdx, holdy in zip(xSequence, ySequence): for i in range(7): if (holdx, holdy) in regionList[i]: footholdList[i] = 1 # deal with additional footholds if region1[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds (region's first element start from the lowest) footholdList[0] = 1 footholdList[1] = 1 footholdList[2] = 1 elif region4[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds footholdList[3] = 1 footholdList[4] = 1 footholdList[5] = 1 elif region6[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds footholdList[6] = 1 moveDictionary["FootPlacement"] = footholdList # Add the singlemoveSuccessRate if coordinateOfMoving == coordinateOfRemaining: ## If start from the match position pass # May need special consideration when match hand if targetHoldHand == "RH": scoreFromDistance = makeGaussian(coordinateOfTarget, 3, coordinateOfRemaining, "LH") if targetHoldHand == "LH": scoreFromDistance = makeGaussian(coordinateOfTarget, 3, coordinateOfRemaining, "RH") scoreFromfoot = 1 if sum(footholdList) < 1: scoreFromfoot = 0.5 moveSuccessRate = moveDictionary["RemainingHoldScore"] * moveDictionary["TargetHoldScore"] * scoreFromDistance * scoreFromfoot moveDictionary["MoveSuccessRate"] = moveSuccessRate # Finish fill in all components of a move outputDictionaryList.append(moveDictionary) return outputDictionaryList
c2905fffd9d1873c79239199027697e5c6162731
3,638,429
from datetime import datetime def generateVtBar(row): """生成K线""" bar = VtBarData() symbol, exchange = row['symbol'].split('.') bar.symbol = symbol bar.exchange = exchangeMapReverse[exchange] if bar.exchange in ['SSE', 'SZSE']: bar.vtSymbol = '.'.join([bar.symbol, bar.exchange]) else: bar.vtSymbol = bar.symbol bar.open = row['open'] bar.high = row['high'] bar.low = row['low'] bar.close = row['close'] bar.volume = row['volume'] bar.date = str(row['date']) bar.time = str(row['time']).rjust(6, '0') #将bar的时间改成提前一分钟 hour=bar.time[0:2] minute=bar.time[2:4] sec=bar.time[4:6] if minute=="00": minute="59" h = int(hour) if h == 0: h = 24 hour=str(h-1).rjust(2,'0') else: minute=str(int(minute)-1).rjust(2,'0') bar.time=hour+minute+sec bar.datetime = datetime.strptime(' '.join([bar.date, bar.time]), '%Y%m%d %H%M%S') return bar
5beecf78f932c8e1bf76c680157ecd29fbdf9567
3,638,430
import sqlite3 def index_with_links(): """post request that the form link uses """ db = sqlite3.connect('link_shortner.db') c = db.cursor() link = request.forms.get('link') generated_id = gen_id() #row = db.execute('SELECT * from links where link_id=?', generate_id).fetchone() c.execute("INSERT INTO links values (?, ?)", (generated_id, link)) db.commit() db.close() shortened = app.config.get('info.hostname', 'localhost:8080') + '/' + generated_id return dict(short_link=shortened, csrf_tag=csrf.csrf_tag())
38e4ee6e63bacbc55a40533759c06b836a050e56
3,638,431
def divide_blend(img_x: np.ndarray, img_y: np.ndarray) -> np.ndarray: """ Blend image x and y in 'divide' mode :param img_x: input grayscale image on top :param img_y: input grayscale image at bottom :return: """ result = np.zeros_like(img_x, np.float_) height, width = img_x.shape for i in range(height): for j in range(width): if img_x[i, j] == 0: color = img_y[i, j] and 255 or 0 elif img_x[i, j] == 255: color = img_y[i, j] elif img_x[i, j] == img_y[i, j]: color = 255 else: color = (img_y[i, j] / img_x[i, j]) * 255 result[i, j] = color return result.astype(np.uint8)
27207b209c871a794162ee5b2932344a185668e7
3,638,433
def init_wavefunction(n_sites,bond_dim,**kwargs): """ A function that initializes the coefficients of a wavefunction for L sites (from 0 to L-1) and arranges them in a tensor of dimension n_0 x n_1 x ... x n_L for L sites. SVD is applied to this tensor iteratively to obtain the matrix product state. Parameters ---------- n_sites : int Number of sites. kwargs ---------- conserve_n : boolean True for conservation of number of particles. num_e : int Number of electrons Returns ------- mps : tensornetwork Matrix Product State. """ # t1 = time.time() mps = [ \ tn.Node( block(2, bond_dim),axis_names=["n_0","i_0"] )] + \ [tn.Node( block(2, bond_dim, bond_dim),axis_names=["n_{}".format(l),"i_{}".format(l-1),"i_{}".format(l)]) for l in range(1,n_sites-1)] + \ [tn.Node( block(2, bond_dim),axis_names=["n_{}".format(n_sites-1),"i_{}".format(n_sites-2)] ) \ ] #Right Canonicalize for i in range(n_sites-1,0,-1): if i == n_sites-1: redges = [mps[i]["n_{}".format(i)]] else: redges = [mps[i]["i_{}".format(i)],mps[i]["n_{}".format(i)]] ledges = [mps[i]["i_{}".format(i-1)]] u,s,v,_ = tn.split_node_full_svd(mps[i], left_edges=ledges, right_edges=redges,\ left_edge_name="d_{}".format(i-1), right_edge_name="i_{}".format(i-1),\ # max_singular_values=bond_dim) max_truncation_err=1e-5) if i == n_sites-1: reord_edges=[v["n_{}".format(i)],v["i_{}".format(i-1)]] else: reord_edges=[v["n_{}".format(i)],v["i_{}".format(i-1)],v["i_{}".format(i)]] v.reorder_edges(reord_edges) if i == 1: mps[i-1].tensor = tn.ncon([mps[i-1].tensor, u.tensor, s.tensor],[(-1,'k'),('k','l'),('l',-2)]) else: mps[i-1].tensor = tn.ncon([mps[i-1].tensor, u.tensor, s.tensor],[(-1,-2,'k'),('k','l'),('l',-3)]) mps[i].tensor = v.tensor #connect edges to build mps connected_edges=[] conn=mps[0]["i_0"]^mps[1]["i_0"] connected_edges.append(conn) for k in range(1,n_sites-1): conn=mps[k]["i_{}".format(k)]^mps[k+1]["i_{}".format(k)] connected_edges.append(conn) mod = np.linalg.norm(mps[0].tensor) mps[0].tensor /= mod # t2 = time.time() #print("MPS CONSTRUCTION TIME=",t2-t1) return mps #NOW FOR SVD
8f1a4d456945d9a345f560ee3d87dadbf353e7d3
3,638,435
def num_channels_to_num_groups(num_channels): """Returns number of groups to use in a GroupNorm layer with a given number of channels. Note that these choices are hyperparameters. Args: num_channels (int): Number of channels. """ if num_channels < 8: return 1 if num_channels < 32: return 2 if num_channels < 64: return 4 if num_channels < 128: return 8 if num_channels < 256: return 16 else: return 32
e2095fba2b1b9cdada72d354ddcd781d99e4aa48
3,638,436
def response_message(status, message, status_code): """ method to handle response messages """ return jsonify({ "status": status, "message": message }), status_code
e9dd25f237f264835d507af01a71ef9c826bf28d
3,638,437
def glDrawBuffers( baseOperation, n=None, bufs=None ): """glDrawBuffers( bufs ) -> bufs Wrapper will calculate n from dims of bufs if only one argument is provided... """ if bufs is None: bufs = n n = None bufs = arrays.GLenumArray.asArray( bufs ) if n is None: n = arrays.GLenumArray.arraySize( bufs ) return baseOperation( n,bufs )
ef5a83ea633138d4cb18d8d2d20736d8c1942bc0
3,638,438
def compare_rendered(obj1, obj2): """ Return True/False if the normalized rendered version of two folium map objects are the equal or not. """ return normalize(obj1) == normalize(obj2)
b7debf048ea41b882003283b6e3b94d257f0e0fa
3,638,439
async def _get_device_client_adapter(settings_object): """ get a device client adapter for the given settings object """ if not settings_object.device_id and not settings_object.id_scope: return None adapter = adapters.create_adapter(settings_object.adapter_address, "device_client") adapter.device_id = settings_object.device_id return adapter
411b52a4e916d55b46933afbfa4e8513243b4397
3,638,440
def is_reserved(word): """ Determines if word is reserved :param word: String representing the variable :return: True if word is reserved and False otherwise """ lorw = ['define','define-struct'] return word in lorw
0b0e3706bcafe36fc52e6384617223078a141fb2
3,638,441
def verify_figure_hash(name, figure=None): """ Verifies whether a figure has the same hash as the named hash in the current hash library. If the hash library does not contain the specified name, the hash is added to the library. Parameters ---------- name : string The identifier for the hash in the hash library figure : matplotlib.figure.Figure If None is specified, the current figure is used (as determined by matplotlib.pyplot.gcf()) Returns ------- out : bool False if the figure's hash does not match the named hash, otherwise True """ if name not in hash_library: hash_library[name] = hash_figure(figure) return True return hash_library[name] == hash_figure(figure)
09ee240c9efbeddd4a0f33401d80b918175a579e
3,638,442
def x_span_contains_y(x_spans, y_spans): """ Return whether all elements of y_spans are contained by some elements of x_spans :param x_spans: :type x_spans: :param y_spans: :type y_spans: """ for i, j in y_spans: match_found = False for m, n in x_spans: if i >= m and j <= n: match_found = True break # If this particular x_span found # a match, keep looking. if match_found: continue # If we find an element that doesn't # have a match, return false. else: return False # If we have reached the end of both loops, then # all elements match. return True
c366a5a5543e2fe9f6325cd3d31eccffb921693c
3,638,443
import time def log(fn): """ logging decorator for the for the REST method calls. Gets all important information about the request and response, takes the time to complete the calls and writes it to the logs. """ def wrapped(self, *args): try: start = time() ret = fn(self, *args) duration = time() - start logData = extractLogData(ctx) logData['duration'] = duration logData['httpCode'] = ctx.status logData['responseHeader'] = dumps(ctx.headers) logger.info('', extra=logData) return ret except Exception: duration = time() - start logData = extractLogData(ctx) logData['duration'] = duration logData['httpCode'] = ctx.status logData['responseHeader'] = dumps(ctx.headers) if ctx.status[0] == '2': logger.info('', extra=logData) else: logger.error('', extra=logData) raise return wrapped
8efcfcf043c220565092971749a12876a55641dc
3,638,445
def deal_line(text_str1, text_str2, para_bound=None): """行合并和段落拆分""" global result_text text_str2 = text_str2.strip() len_text_str2 = len(text_str2) if len_text_str2 > 3 and len(set(text_str2)) == 1: # 处理 ***** 这类分割线 st = list(set(text_str2))[0] # new_file.write(' ' + st * 24 + '\n') result_text += HEAD_SPACE + st * 24 + '\n' return "" if len_text_str2 > 3 and str(text_str2[0:3]) == str(text_str2[-3:]): # 处理 ***Text*** 这类分割线 # new_file.write(' ' + text_str1 + '\n') # new_file.write(' ' + text_str2 + '\n') result_text += HEAD_SPACE + text_str1 + '\n' result_text += HEAD_SPACE + text_str2 + '\n' return "" else: if isparagraph_break(text_str1): # new_file.write(' ' + text_str1 + '\n') result_text += HEAD_SPACE + text_str1 + '\n' text_str1 = text_str2 else: text_str1 += text_str2 if para_bound: return split_paragraph(text_str1, para_bound) else: return text_str1
b984cefd842071fed3359ac36f8bae46e916e956
3,638,446
def resized_image(image: np.ndarray, max_size: int) -> np.ndarray: """Resize image to feature_process_size.""" h, w = image.shape[:2] size = max(w, h) if 0 < max_size < size: dsize = w * max_size // size, h * max_size // size return cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_AREA) else: return image
a32f0639b8b59cef8817861d123b5c304b7c243c
3,638,447
def load_folder_list(args, ndict): """ Args: dict : "name_run" -> path """ l = [] for p in ndict: print("loading %s" % p) l.append(load_pickle_to_dataframe(args, p)) d = pd.concat(l) d = d.sort_values("name_run") print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print("Datasets:") print("=========") for n, name in zip(range(len(d.columns)), d.columns): print(f"{n} -> {name}") print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print() return d
bd434fd93b3cb06a18d40edc48f8119442e7f0ff
3,638,448
def charge_initial(): """ Not currently in use, parking spot id gets passed in and it carries over and passes it into the stripe charge view. """ spot_id = int(request.args.get('id')) spot = AddressEntry.query.get(spot_id) return render_template('users/charge_initial.html', key=stripe_keys['publishable_key'], price=spot.price)
f971b5c69954ce2026d2c4b08d6877c9f7da6067
3,638,449
import csv def read_csv_from_file(file): """ Reads the CSV data from the open file handle and returns a list of dicts. Assumes the CSV data includes a header row and uses that header row as fieldnames in the dict. The following fields are required and are case-sensitive: - ``artist`` - ``song`` - ``submitter`` - ``seed`` Other fields are ultimately preserved untouched in the output CSV. If the CSV doesn't have a header row, uses the following hardcoded list: - ``order`` - ``seed`` - ``submitter`` - ``year`` - ``song`` - ``artist`` - ``link`` If a tab character is present in the first row, assumes the data is tab-delimited, otherwise assumes comma-delimited. :returns: All parsed data from the already-opened CSV file given, as a list of dicts as generated by `csv.DictReader` """ data = list(file) delimiter = "\t" if "\t" in data[0] else "," # Look for a header row reader = csv.reader([data[0]], delimiter=delimiter) row = next(reader) for col in row: try: int(col) # Found an integer, no headers present headers = ["order", "seed", "submitter", "year", "song", "artist", "link"] break except ValueError: pass else: # Unable to find an integer here, must be a header row # Pop the header row off the data list and create a new reader just to # parse that row data.pop(0) headers = row return list(csv.DictReader(data, fieldnames=headers, delimiter=delimiter))
89cfce0be6270076230051a6e852d1add3f4dcaf
3,638,450
def identify_denonavr_receivers(): """ Identify DenonAVR using SSDP and SCPD queries. Returns a list of dictionaries which includes all discovered Denon AVR devices with keys "host", "modelName", "friendlyName", "presentationURL". """ # Sending SSDP broadcast message to get devices devices = send_ssdp_broadcast() # Check which responding device is a DenonAVR device and prepare output receivers = [] for device in devices: try: receiver = evaluate_scpd_xml(device["URL"]) except ConnectionError: continue if receiver: receivers.append(receiver) return receivers
712cba308d150ec179a390c27ae6931595cdffa9
3,638,452
def get_index_settings(index): """Returns ES settings for this index""" return (get_es().indices.get_settings(index=index) .get(index, {}).get('settings', {}))
6d5d13bc30fdf8db666206bb07c3310394f3ff44
3,638,453