content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import yaml def loadConfig(filename): """Load and parse .yaml configuration file Args: filename (str): Path to system configuration file Returns: dict: representing configuration information Raises: BdsError: if unable to get configuration information """ try: with open(filename) as stream: config = yaml.load(stream) return config['bdsSnmpAdapter'] except Exception as exc: raise error.BdsError( 'Failed to read configuration file %s: %s' % (filename, exc))
099d1892bf6f6798a77dcc59067afa59af770745
3,644,005
def scale_t50(t50_val = 1.0, zval = 1.0): """ Change a t50 value from lookback time in Gyr at a given redshift to fraction of the age of the universe. inputs: t50 [Gyr, lookback time], redshift outputs: t50 [fraction of the age of the universe, cosmic time] """ return (1 - t50_val/cosmo.age(zval).value)
43d7fa07a59c4b66c7db7caca3c138800ca8db4e
3,644,006
def get_car_changing_properties(car): """ Gets cars properties that change during a trip :param car: car info in original system JSON-dict format :return: dict with keys mapped to common electric2go format """ result = {mapped_key: car.get(original_key, None) for mapped_key, original_key in KEYS['changing'].items()} # derived fields that can't be done automatically with a key mapping result['address'] = ', '.join(car['address']) result['price_offer'] = car['rentalPrice']['isOfferDrivePriceActive'] result['price_offer_details'] = car['rentalPrice'].get('offerDrivePrice', {}) return result
540dbd0b6d08cc08a950946dda018c3296d8c51d
3,644,007
def get_metadata(record): """ Calls DNZ's API to retrieve the metadata for a given record. """ id = record['id'] url = DNZ_URL + '{id}.json?api_key={key}'.format(id=id, key=DNZ_KEY) try: metadata = get(url).json()['record'] metadata['hash'] = record['hash'] except KeyError: print('You forgot the DNZ Key – Again!') exit(1) return metadata
522e2aed2f7d71bcf9d397036c764c90c67b6184
3,644,008
def _expand_one_dict(cfg, shared): """expand a piece of config Parameters ---------- cfg : dict Configuration shared : dict A dict of shared objects Returns ------- dict, list Expanded configuration """ if shared['default_config_key'] is not None: if not (len(cfg) == 1 and list(cfg.keys())[0] in shared['config_keys']): cfg = {shared['default_config_key']: cfg} if not len(cfg) == 1: return cfg.copy() key, val = list(cfg.items())[0] if key not in shared['config_keys']: cfg = _apply_default_for_all_keys(cfg, shared) return cfg.copy() if key not in shared['expand_func_map']: cfg = _apply_default_for_all_keys(cfg, shared) return cfg.copy() expand_func = shared['expand_func_map'][key] try: return expand_func(val, shared) except TypeError: return expand_func(val)
d8d00bfede1bdca504f3d643836947363d8914ac
3,644,009
import six def _api_decrypt(): """ Return the response dictionary from the KMS decrypt API call. """ kms = _kms() data_key = _cfg_data_key() try: return kms.decrypt(CiphertextBlob=data_key) except botocore.exceptions.ClientError as orig_exc: error_code = orig_exc.response.get("Error", {}).get("Code", "") if error_code != "InvalidCiphertextException": raise err_msg = "aws_kms:data_key is not a valid KMS data key" config_error = salt.exceptions.SaltConfigurationError(err_msg) six.raise_from(config_error, orig_exc)
b0b01e9a71dfaf594dd9526072b77f2a5c6363c1
3,644,010
def hide_panel(panel_name, base_url=DEFAULT_BASE_URL): """Hide a panel in the UI of Cytoscape. Other panels will expand into the space. Args: panel_name (str): Name of the panel. Multiple ways of referencing panels is supported: (WEST == control panel, control, c), (SOUTH == table panel, table, ta), (SOUTH_WEST == tool panel, tool, to), (EAST == results panel, results, r) base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if panel name is not recognized requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> hide_panel('control panel') '' >>> hide_panel('WEST') '' """ panel_name = _check_panel_name(panel_name) panel_name_state = {'name': panel_name, 'state': 'HIDE'} res = commands.cyrest_put('ui/panels', body=[panel_name_state], base_url=base_url, require_json=False) return res
5e8ead9f8ca51d4629c4c4dbd605ad2257cfa147
3,644,011
def user_tickets(raffle_prize, user): """return the allocate ticket for user""" return raffle_prize.allocated_tickets(user)
a29c578713664018f639088539f2404fc7a63171
3,644,012
def init_container(self, **kwargs): """Initialise a container with a dictionary of inputs """ for k, v in kwargs.iteritems(): try: setattr(self, k, v) except Exception as e: # Deal with the array -> list issue if isinstance(getattr(self, k), list) and isinstance(v, ndarray): setattr(self, k, v.tolist()) return self
888f5fc1cfc2b7718b8712360f86b5fd51fd25d2
3,644,013
def optimize(nn_last_layer, correct_label, learning_rate, num_classes): """ Build the TensorFLow loss and optimizer operations. :param nn_last_layer: TF Tensor of the last layer in the neural network :param correct_label: TF Placeholder for the correct label image :param learning_rate: TF Placeholder for the learning rate :param num_classes: Number of classes to classify :return: Tuple of (logits, train_op, cross_entropy_loss) """ # Reshape 4D tensors to 2D, each row represents a pixel, each column a class logits = tf.reshape(nn_last_layer, (-1, num_classes), name="fcn_logits") correct_label_reshaped = tf.reshape(correct_label, (-1, num_classes)) # Calculate distance from actual labels using cross entropy cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label_reshaped[:]) # Take mean for total loss loss_op = tf.reduce_mean(cross_entropy, name="fcn_loss") # The model implements this operation to find the weights/parameters that would yield correct pixel labels train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_op, name="fcn_train_op") return logits, train_op, loss_op
0c1f50c3148a87206fff9473f2ecd78793aef630
3,644,014
def setIamPolicy(asset_id, policy): """Sets ACL info for an asset. Args: asset_id: The asset to set the ACL policy on. policy: The new Policy to apply to the asset. This replaces the current Policy. Returns: The new ACL, as an IAM Policy. """ return _execute_cloud_call( _get_cloud_api_resource().projects().assets().setIamPolicy( resource=_cloud_api_utils.convert_asset_id_to_asset_name(asset_id), body={'policy': policy}, prettyPrint=False))
2501565aee420cd3b66eaf204ecd756d51e30b4f
3,644,015
def get_corners(n): """Returns corner numbers of layer n""" end = end = (2*n + 1) * (2*n + 1) return [end-m*n for m in range(0,8,2)]
8d78135f13675d01fc2b6736b7c1fb1e7cf3e5f5
3,644,016
def plot_single_hist(histvals, edges, legend=None, **kwds): """ Bokeh-based plotting of a single histogram with legend and tooltips. **Parameters**\n histvals: 1D array Histogram counts (e.g. vertical axis). edges: 1D array Histogram edge values (e.g. horizontal axis). legend: str Text for the plot legend. **kwds: Keyword arguments for 'bokeh.plotting.figure().quad()'. **Return**\n p: object An instance of 'bokeh.plotting.figure()' as a plot handle. """ ttp = kwds.pop('tooltip', [('(x, y)', '($x, $y)')]) p = pbk.figure(background_fill_color='white', tooltips=ttp) p.quad(top=histvals, bottom=0, left=edges[:-1], right=edges[1:], line_color='white', alpha=0.8, legend=legend, **kwds) p.y_range.start = 0 p.legend.location = 'top_right' p.grid.grid_line_color = 'lightgrey' return p
24a91ed6e3653dde35a27bba26530f47ec11bcd2
3,644,017
import torch def resnet50(alpha, beta,**kwargs): """Constructs a ResNet-50 based model. """ model = ResNet(Bottleneck, [3, 4, 6, 3], alpha, beta, **kwargs) checkpoint = torch.load(model_dirs['resnet50']) layer_name = list(checkpoint.keys()) for ln in layer_name: if 'conv' in ln or 'downsample.0.weight' in ln: checkpoint[ln] = checkpoint[ln].unsqueeze(2) if 'conv2' in ln: n_out, n_in, _, _, _ = checkpoint[ln].size() checkpoint[ln] = checkpoint[ln][:n_out // alpha * (alpha - 1), :n_in//beta,:,:,:] model.load_state_dict(checkpoint,strict = False) return model
165f0bfd357af96004edcc5d73224d8efcb98943
3,644,018
from datetime import datetime def datetime_to_timestamp(dt, epoch=datetime(1970,1,1)): """takes a python datetime object and converts it to a Unix timestamp. This is a non-timezone-aware function. :param dt: datetime to convert to timestamp :param epoch: datetime, option specification of start of epoch [default: 1/1/1970] :return: timestamp """ td = dt - epoch return (td.microseconds + (td.seconds + td.days * 86400))
2fbd5b3d6a56bc04066f7aaa8d4bef7c87a42632
3,644,019
def connectivity_dict_builder(edge_list, as_edges=False): """Builds connectivity dictionary for each vertex (node) - a list of connected nodes for each node. Args: edge_list (list): a list describing the connectivity e.g. [('E7', 'N3', 'N6'), ('E2', 'N9', 'N4'), ...] as_edges (bool): whether to return connected vertices / nodes or edges Returns: (dict): connectivity dictionary, each node is a key and the value is a set of connected nodes e.g. {'N3': {'N6', 'N11', 'N7'}, 'N9': {'N4'}, etc} """ connectivity_dict = {} for b, n1, n2 in edge_list: n_set = connectivity_dict.get(n1,set()) n_set.add(b if as_edges else n2) connectivity_dict[n1] = n_set n_set = connectivity_dict.get(n2,set()) n_set.add(b if as_edges else n1) connectivity_dict[n2] = n_set return connectivity_dict
58f24c6465fa1aaccca92df4d06662b0ce1e1e77
3,644,020
def get_confusion_matrix(*, labels, logits, batch_mask): """Computes the confusion matrix that is necessary for global mIoU.""" if labels.ndim == logits.ndim: # One-hot targets. y_true = jnp.argmax(labels, axis=-1) else: y_true = labels # Set excluded pixels (label -1) to zero, because the confusion matrix # computation cannot deal with negative labels. They will be ignored due to # the batch_mask anyway: y_true = jnp.maximum(y_true, 0) y_pred = jnp.argmax(logits, axis=-1) # Prepare sample weights for confusion matrix: weights = batch_mask.astype(jnp.float32) # Normalize weights by number of samples to avoid having very large numbers in # the confusion matrix, which could lead to imprecise results (note that we # should not normalize by sum(weights) because that might differ between # devices/hosts): weights = weights / weights.size confusion_matrix = model_utils.confusion_matrix( y_true=y_true, y_pred=y_pred, num_classes=logits.shape[-1], weights=weights) confusion_matrix = confusion_matrix[jnp.newaxis, ...] # Dummy batch dim. return confusion_matrix
664f08446ea25000c77a78b133fc749fbb919376
3,644,021
import socket def init_socket(): """Returns a fresh socket""" return socket.socket()
429d790f3007a357d4a14d57066d890f14f42178
3,644,022
def semitone_frequencies(fmin, fmax, fref=A4): """ Returns frequencies separated by semitones. Parameters ---------- fmin : float Minimum frequency [Hz]. fmax : float Maximum frequency [Hz]. fref : float, optional Tuning frequency of A4 [Hz]. Returns ------- semitone_frequencies : numpy array Semitone frequencies [Hz]. """ # return MIDI frequencies return log_frequencies(12, fmin, fmax, fref)
b4a29dcb0ae53f2876d01f4a084d577219db1e47
3,644,023
from typing import Mapping from typing import Any from typing import Sequence def dict_get_value(dict: Mapping, name: str) -> Any: """Gets data from a dictionary using a dotted accessor-string :param dict: source dictionary :param name: dotted value name """ current_data = dict for chunk in name.split('.'): if not isinstance(current_data, (Mapping, Sequence)): raise InvalidParamError('Could not find item "{}"'.format(name)) if chunk not in current_data: raise InvalidParamError('Could not find item "{}"'.format(name)) current_data = current_data.get(chunk, {}) return current_data
c77c4fbfd8677fc53510a1dfe565e3496d57f8ef
3,644,024
def get_files_from_split(split): """ " Get filenames for real and fake samples Parameters ---------- split : pandas.DataFrame DataFrame containing filenames """ files_1 = split[0].astype(str).str.cat(split[1].astype(str), sep="_") files_2 = split[1].astype(str).str.cat(split[0].astype(str), sep="_") files_real = pd.concat([split[0].astype(str), split[1].astype(str)]).to_list() files_fake = pd.concat([files_1, files_2]).to_list() return files_real, files_fake
951c8e73952017db2d29b6b1e4944ddf832516e3
3,644,025
def dedupBiblioReferences(doc): """ SpecRef has checks in its database preventing multiple references from having the same URL. Shepherd, while it doesn't have an explicit check for this, should also generally have unique URLs. But these aren't uniqued against each other. So, if you explicitly biblio-link to a SpecRef spec, and autolink to a Shepherd spec, you might get two distinct biblio entries with the exact same URL. This code checks for this, and deletes Shepherd biblio if there's a SpecRef biblio with the same URL. It then adjusts doc.externalRefsUsed to point to the SpecRef biblio. """ def isShepherdRef(ref): return isinstance(ref, SpecBasedBiblioEntry) normSpecRefRefs = {} normShepherdRefs = {} informSpecRefRefs = {} informShepherdRefs = {} for ref in doc.normativeRefs.values(): if isShepherdRef(ref): normShepherdRefs[ref.url] = ref else: normSpecRefRefs[ref.url] = ref for ref in doc.informativeRefs.values(): if isShepherdRef(ref): informShepherdRefs[ref.url] = ref else: informSpecRefRefs[ref.url] = ref normSpecRefUrls = set(normSpecRefRefs.keys()) normShepherdUrls = set(normShepherdRefs.keys()) informSpecRefUrls = set(informSpecRefRefs.keys()) informShepherdUrls = set(informShepherdRefs.keys()) specRefUrls = normSpecRefUrls | informSpecRefUrls shepherdUrls = normShepherdUrls | informShepherdUrls dupedUrls = shepherdUrls & specRefUrls if not dupedUrls: return # If an informative duped URL is SpecRef, # and a normative Shepherd version also exists, # mark it for "upgrading", so the SpecRef becomes normative. upgradeUrls = dupedUrls & informSpecRefUrls & normShepherdUrls upgradeRefs = {} popInformatives = [] for key, ref in doc.informativeRefs.items(): if ref.url in upgradeUrls and not isShepherdRef(ref): upgradeRefs[ref.url] = ref popInformatives.append(key) for key in popInformatives: doc.informativeRefs.pop(key) for key, ref in doc.normativeRefs.items(): if ref.url in upgradeUrls: doc.normativeRefs[key] = upgradeRefs[ref.url] for url in upgradeUrls: normShepherdUrls.discard(url) informSpecRefUrls.discard(url) normSpecRefUrls.add(url) shepherdUrls = normShepherdUrls | informShepherdUrls specRefUrls = normSpecRefUrls | informSpecRefUrls dupedUrls = shepherdUrls & specRefUrls # Remove all the Shepherd refs that are left in duped poppedKeys = defaultdict(dict) for key, ref in list(doc.informativeRefs.items()): if ref.url in dupedUrls: if isShepherdRef(ref): doc.informativeRefs.pop(key) poppedKeys[ref.url]["shepherd"] = key else: poppedKeys[ref.url]["specref"] = key for key, ref in list(doc.normativeRefs.items()): if ref.url in dupedUrls: if isShepherdRef(ref): doc.normativeRefs.pop(key) poppedKeys[ref.url]["shepherd"] = key else: poppedKeys[ref.url]["specref"] = key # For every key that was popped, # swap out the "externalRefsUsed" for that key for keys in poppedKeys.values(): if "shepherd" not in keys or "specref" not in keys: continue if keys["shepherd"] in doc.externalRefsUsed: for k, v in list(doc.externalRefsUsed[keys["shepherd"]].items()): doc.externalRefsUsed[keys["specref"]][k] = v del doc.externalRefsUsed[keys["shepherd"]]
4fbbb6eb85b1136c5addc5421ff9be083cc3429d
3,644,026
def check_percent(mask_arr, row, col, sz, percent): """ :param mask_arr: mask数组 :param row: :param col: :param sz: :param percent: 有效百分比 :return: """ upper_bound = mask_arr.max() area = np.sum(mask_arr[row:row + sz, col:col + sz]) / upper_bound if area / (sz ** 2) > percent: return True return False
0d84e511d6895145dc4a7f8f150ae907a4884f90
3,644,027
def find_center_pc(proj1, proj2, tol=0.5, rotc_guess=None): """ Find rotation axis location by finding the offset between the first projection and a mirrored projection 180 degrees apart using phase correlation in Fourier space. The ``register_translation`` function uses cross-correlation in Fourier space, optionally employing an upsampled matrix-multiplication DFT to achieve arbitrary subpixel precision. :cite:`Guizar:08`. Parameters ---------- proj1 : ndarray 2D projection data. proj2 : ndarray 2D projection data. tol : scalar, optional Subpixel accuracy rotc_guess : float, optional Initual guess value for the rotation center Returns ------- float Rotation axis location. """ imgshift = 0.0 if rotc_guess is None else rotc_guess - (proj1.shape[1]-1.0)/2.0 proj1 = ndimage.shift(proj1, [0,-imgshift], mode='constant', cval=0) proj2 = ndimage.shift(proj2, [0,-imgshift], mode='constant', cval=0) # create reflection of second projection proj2 = np.fliplr(proj2) # Determine shift between images using scikit-image pcm shift = register_translation(proj1, proj2, upsample_factor=1.0/tol) # Compute center of rotation as the center of first image and the # registered translation with the second image center = (proj1.shape[1] + shift[0][1] - 1.0)/2.0 return center + imgshift
4bc9a25bb6bd041d9d5cb8ae46bfd91dfa7c97ff
3,644,028
def emce_comparison(nus, n_reps=100): """Simulation comparing ECME algorithm with M-estimates. We compare the estimates obtained by the ECME algorithm against two Huber M-estimates with tuning parameters 1 and 4. Args: nus, iter: Iterator of values for the degrees of freedom. n_reps, int (default 100): Number of times experiment is repeated. Return: Results of the simulation recording average percentage errors. """ models = ['ecme', 'huber1', 'huber4'] errors = { model : {'a':[], 'b':[]} for model in models} for nu in nus: tmp_errors = { model : {'a':[], 'b':[]} for model in models} for _ in range(n_reps): a = 10*np.random.randn() b = 10*np.random.randn() sigma2 = 2*np.random.rand() df = simulation.simulate_data(100, b, a, nu, sigma2) y, X = from_dataframe(df) model = ECME(y, X, compare=True, use_sigma2=True) model.fit() # slope tmp_errors['ecme']['b'].append(np.abs((model.B[0]-b)/b)) tmp_errors['huber1']['b'].append(np.abs((model.B_huber_1[0]-b)/b)) tmp_errors['huber4']['b'].append(np.abs((model.B_huber_4[0]-b)/b)) # intercept tmp_errors['ecme']['a'].append(abs((model.B[1] - a)/a)) tmp_errors['huber1']['a'].append(np.abs((model.B_huber_1[1]-a)/a)) tmp_errors['huber4']['a'].append(np.abs((model.B_huber_4[1]-a)/a)) # compute average errors for name in errors: for coeff in errors[name]: errors[name][coeff].append(np.mean(tmp_errors[name][coeff])) return errors
1d79ae528d8bffb694e1718f037d880f46d8c597
3,644,029
def seconds(seconds_since_epoch: int) -> date: """Converts a seconds offset from epoch to a date Args: seconds_since_epoch (int): The second offset from epoch Returns: date: The date the offset represents """ return EPOCH + timedelta(seconds=seconds_since_epoch)
1dd1559e3f971922bad3d618ff4db8b1e0012c42
3,644,031
def check_presence(user): """ Gets user presence information from Slack ("active" or "away") :param user: The identifier of the specified user :return: True if user is currently active, False if user is away """ if not settings.SLACK_TOKEN: return None client = WebClient(token=settings.SLACK_TOKEN) try: response = client.users_getPresence(user=user) assert response['ok'] is True if response['presence'] == 'active': return True else: return False except SlackApiError as e: assert e.response['ok'] is False return None
acdeae9b80613edcfbfb05ea594260d1f99473ff
3,644,032
def adjust_position_to_boundaries(positions, bounds, tolerance=DEFAULT_TOLERANCE): """ Function to update boid position if crossing a boundary (toroid boundary condition) :param positions: vector of (x,y) positions :param bounds: (xmin,xmax,ymin,ymax) boundaries :param tolerance: optional tolerance for being on boundary. by default set to DEFAULT_TOLERANCE (in constants.py) """ positions[:, 0] = np.where(positions[:, 0] < (bounds[0] - tolerance), positions[:, 0] + bounds[1])[0] positions[:, 0] = np.where(positions[:, 0] > (bounds[1] - tolerance), positions[:, 0] - bounds[1])[0] positions[:, 1] = np.where(positions[:, 1] < (bounds[2] - tolerance), positions[:, 1] + bounds[3])[0] positions[:, 1] = np.where(positions[:, 1] > (bounds[3] + tolerance), positions[:, 1] - bounds[3])[0] return positions
3354a0e19d085e0e02595866deac7a035b364e58
3,644,034
def residual_mlp_layer(x_flat, intermediate_size, initializer_range=0.02, hidden_dropout_prob=0.1): """ :param x_flat: The attention output. It should be [batch_size*seq_length, dim] :param intermediate_size: the hidden projection. By default this is the input_dim * 4. in the original GPT we would return layer_norm(x_norm + h1) rather than layer_norm(x + h1) :return: """ batch_size_seq_length, hidden_size = get_shape_list(x_flat, expected_rank=2) x_norm = layer_norm(x_flat, name='mlp_ln0') intermediate_output = tf.layers.dense( x_norm, intermediate_size, activation=gelu, kernel_initializer=create_initializer(initializer_range), name='intermediate', ) output_for_residual = tf.layers.dense( intermediate_output, hidden_size, name='output', kernel_initializer=create_initializer(initializer_range)) output_for_residual = dropout(output_for_residual, hidden_dropout_prob) layer_output = layer_norm(x_flat + output_for_residual, name='mlp_ln1') return layer_output
03e04c074080b54c4a8bc71a0fbef9e6e025f71f
3,644,035
def _delete_project_repo(repo_name): """Deletes the specified repo from AWS.""" client = boto3.client('codecommit') response = client.delete_repository(repositoryName=repo_name) return response
8410302fc419cbe9c13b9f73ef6af63f588ede76
3,644,036
def score_items(X, U, mu, scoremethod='lowhigh', missingmethod='none', feature_weights=[]): """score_items(X, U, scoremethod, missingmethod, feature_weights) Calculate the score (reconstruction error) for every item in X, with respect to the SVD model in U and mean mu for uninteresting items. 'scoremethod' indicates which residual values count towards the interestingness score of each item: - 'low': negative residuals - 'high': positive residuals - 'lowhigh': both 'missingmethod' indicates how to handle missing (NaN) values: - 'zero': set missing values to zero - 'ignore': ignore missing values following Brand (2002) - 'none': assert nothing is missing (NaN). Die horribly if not true. 'feature_weights' influence how much each feature contributes to the score. Return an array of item reconstruction scores and their reprojections. """ # Use U to model and then reconstruct the data in X. # 1. Project all data in X into space defined by U, # then reconstruct it. if missingmethod.lower() != 'ignore': # All missing values should have been replaced with 0, # or non-existent. # 1a. Subtract the mean and project onto U proj = np.dot(U.T, (X - mu)) # 1b. Reconstruct by projecting back up and adding mean reproj = np.dot(U, proj) + mu # 1c. Compute the residual #print 'X:', X.T #print 'reproj:', reproj.T err = X - reproj #print 'err:', err.T #raw_input() else: # Missing method must be 'ignore' (Brand 2002) (err, reproj) = compute_error_with_missing(X, U, mu) # 2. Compute reconstruction error if scoremethod == 'low': # Blank out all errors > 0 err[err>0] = 0 elif scoremethod == 'high': # Blank out all errors < 0 err[err<0] = 0 else: # default, count everything pass # Weight features if requested if feature_weights != []: for i in range(len(feature_weights)): err[i,:] = err[i,:] * feature_weights[i] if missingmethod.lower() == 'ignore': # Only tally error for observed features. # This means that items with missing values are not penalized # for those features, which is probably the best we can do. scores = np.nansum(np.array(np.power(err, 2)), axis=0) else: scores = np.sum(np.array(np.power(err, 2)), axis=0) #print 'scores:', scores #print 'reproj:', reproj #raw_input() return (scores, reproj)
9355665670ff7b3a49d0abeacc9cfbaab8d586b1
3,644,037
def get_output_specs(output): """ Get the OpenAPI specifications of a SED output Args: output (:obj:`Output`): output Returns: :obj:`dict` with schema `SedOutput` """ if isinstance(output, Report): specs = { '_type': 'SedReport', 'id': output.id, 'dataSets': list(map(get_data_set_specs, output.data_sets)), } if output.name: specs['name'] = output.name elif isinstance(output, Plot2D): specs = { '_type': 'SedPlot2D', 'id': output.id, 'curves': list(map(get_curve_specs, output.curves)), 'xScale': None, 'yScale': None, } if output.name: specs['name'] = output.name if output.curves: x_scale = output.curves[0].x_scale y_scale = output.curves[0].y_scale else: x_scale = None y_scale = None for curve in output.curves: if curve.x_scale != x_scale: x_scale = None if curve.y_scale != y_scale: y_scale = None specs['xScale'] = ( x_scale or AxisScale.linear).value specs['yScale'] = ( y_scale or AxisScale.linear).value elif isinstance(output, Plot3D): specs = { '_type': 'SedPlot3D', 'id': output.id, 'surfaces': list(map(get_surface_specs, output.surfaces)), 'xScale': None, 'yScale': None, 'zScale': None, } if output.name: specs['name'] = output.name if output.surfaces: x_scale = output.surfaces[0].x_scale y_scale = output.surfaces[0].y_scale z_scale = output.surfaces[0].z_scale else: x_scale = None y_scale = None z_scale = None for surface in output.surfaces: if surface.x_scale != x_scale: x_scale = None if surface.y_scale != y_scale: y_scale = None if surface.z_scale != z_scale: z_scale = None specs['xScale'] = ( x_scale or AxisScale.linear).value specs['yScale'] = ( y_scale or AxisScale.linear).value specs['zScale'] = ( z_scale or AxisScale.linear).value else: raise BadRequestException( title='Outputs of type `{}` are not supported.'.format(output.__class__.__name__), instance=NotImplementedError(), ) return specs
26617aa635fd97408e9d27e2972bcd9d7bd4340a
3,644,038
def logggnfw_exact(x, x0, y0, m1, m2, alpha): """ exact form, inspired by gNFW potential OverFlow warning is easily raised by somewhat large values of m1, m2, and base """ base = 1. + np.exp(alpha) x = x - x0 return np.log((base ** x) ** m1 * (1 + base ** x) ** (m2 - m1) ) / np.log(base) + y0 + (m1 - m2) / np.log2(base)
f6b1c5511b2bfe337402b2342484d1b642329f00
3,644,039
def is_lepton(pdgid): """Does this PDG ID correspond to a lepton?""" if _extra_bits(pdgid) > 0: return False if _fundamental_id(pdgid) >= 11 and _fundamental_id(pdgid) <= 18: return True return False
086d7cebee19cfb7a91d4fc09417f168c53942de
3,644,041
def complex_fields_container(real_field, imaginary_field, server = None): """Create a fields container with two fields (real and imaginary) and only one time set. Parameters ---------- real_fields : Field Real :class:`ansys.dpf.core.Field` entity to add to the fields container. imaginary_fields : Field Imaginary :class:`ansys.dpf.core.Field` entity to add to the fields container. server : ansys.dpf.core.server, optional Server with the channel connected to the remote or local instance. The default is ``None``, in which case an attempt is made to use the global server. Returns ------- fields_container : FieldsContainer Fields container with two fields (real and imaginary). """ fc = FieldsContainer(server = server) fc.labels = ["complex"] fc.add_field({ "complex" : 0 }, real_field) fc.add_field({ "complex" : 1 }, imaginary_field) return fc
f20cee35cff2d86801446faca4e60777c3fab429
3,644,043
def get_time_slots(s : pd.Series, time_interval : str = 'daily'): """Convert timestamps to time slots""" if time_interval.lower() not in ( 'hourly', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'): raise ValueError return pd.to_datetime(s)\ .dt.to_period(time_interval[0].upper())
f67c076fc3f946e4b41df9d6d79dac6f19634ea5
3,644,044
def build_optimising_metaclass( builtins=None, builtin_only=False, stoplist=(), constant_fold=True, verbose=False ): """Return a automatically optimising metaclass for use as __metaclass__.""" class _OptimisingMetaclass(type): def __init__(cls, name, bases, dict): super(_OptimisingMetaclass, cls).__init__(name, bases, dict) optimise_all( cls, builtins, builtin_only, stoplist, constant_fold, verbose ) return _OptimisingMetaclass
678454e3c45b0f4ccbaef77427776485ddb07815
3,644,045
def get_ensembl_id(hgnc_id): """Return the Ensembl ID corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Note that the HGNC ID is a number that is passed as a string. It is not the same as the HGNC gene symbol. Returns ------- ensembl_id : str The Ensembl ID corresponding to the given HGNC ID. """ return ensembl_ids.get(hgnc_id)
d815259b553c022f5400b34e5ae5f9ddaff6193e
3,644,046
import torch def predict(model, dataloader): """Returns: numpy arrays of true labels and predicted probabilities.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) model.eval() labels = [] probs = [] for batch_idx, batch in enumerate(dataloader): inputs, label = batch inputs = inputs.to(device) label = label.to(device) labels.append(label) outputs = model(inputs) probs.append(torch.sigmoid(outputs[:, 1])) labels = torch.cat(labels).cpu().numpy() probs = torch.cat(probs).cpu().numpy() return labels, probs
1e4b6e1f72127174a8bdbc693665ace8cbe8e4af
3,644,047
import re def _ProcessMemoryAccess(instruction, operands): """Make sure that memory access is valid and return precondition required. (only makes sense for 64-bit instructions) Args: instruction: Instruction tuple operands: list of instruction operands as strings, for example ['%eax', '(%r15,%rbx,1)'] Returns: Condition object representing precondition required for memory access (if it's present among operands) to be valid. Raises: SandboxingError if memory access is invalid. """ precondition = Condition() for op in operands: m = re.match(_MemoryRE() + r'$', op) if m is not None: assert m.group('memory_segment') is None base = m.group('memory_base') index = m.group('memory_index') allowed_bases = ['%r15', '%rbp', '%rsp', '%rip'] if base not in allowed_bases: raise SandboxingError( 'memory access only is allowed with base from %s' % allowed_bases, instruction) if index is not None: if index == '%riz': pass elif index in REGS64: if index in ['%r15', '%rsp', '%rbp']: raise SandboxingError( '%s can\'t be used as index in memory access' % index, instruction) else: assert precondition == Condition() precondition = Condition(restricted=index) else: raise SandboxingError( 'unrecognized register is used for memory access as index', instruction) return precondition
922489dca706ba5c88132f9676c7b99bfc966947
3,644,048
def minimizeMeshDimensions(obj, direction, step, epsilon): """ Args: obj: direction: step: epsilon: Returns: """ stepsum = 0 while True: before, after = compareOrientation(obj, direction * step) if before < after: # bpy.ops.transform.rotate(value=-1.0*direction*step, axis=(0, 0, 1)) # bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) break else: stepsum += direction * step step = step / 2 if step > epsilon: print(stepsum) stepsum += minimizeMeshDimensions(obj, -direction, step, epsilon) return stepsum
ba1f7e2cf66e6665042307b9fe50c7728d68157d
3,644,049
from importlib import import_module def gimme_dj(mystery_val: int, secret_val: int) -> str: """Play that funky music.""" # If youre worried about what this is doing, and NEED TO KNOW. Check this gist: # https://gist.github.com/SalomonSmeke/2dfef1f714851ae8c6933c71dad701ba # its nothing evil. just an inside joke for my good buddy Brian. hey: str = getattr( import_module("".join(chr(c + secret_val) for c in [29, 28, 46, 32, -15, -17])), "".join( chr(c - (mystery_val % secret_val)) for c in [106, 107, 105, 117, 106, 107, 104, 127, 122, 107, 121] ), )(B) brian: str = getattr( hey, "".join(chr(c - (503 - mystery_val)) for c in [183, 184, 182, 194, 183, 184]) )("".join(chr(c) for c in [117, 116, 102, 45, 56])) return brian
e14680d5a73e3ea3a3651bbeccd8af18a07a5907
3,644,050
def pluecker_from_verts(A,B): """ See Hartley & Zisserman (2003) p. 70 """ if len(A)==3: A = A[0], A[1], A[2], 1.0 if len(B)==3: B = B[0], B[1], B[2], 1.0 A=nx.reshape(A,(4,1)) B=nx.reshape(B,(4,1)) L = nx.dot(A,nx.transpose(B)) - nx.dot(B,nx.transpose(A)) return Lmatrix2Lcoords(L)
7af9f779e1c00ffeee035bc76a8333d36e2ed5be
3,644,051
def MAP_score(source_id, target_labels, prediction): """ Function to compute the Mean Average Precision score of a given ranking. Args: source_id (array): Array containing the source_id of our given queries. target_labels (array): Array containing the target labels of our query-document testset. prediction (array): Array containing the confidences of our predicitons. Returns: MAP (integer): MAP score of our ranking. """ # create a target dataframe with the id of query sentences, target_labels and the predicted confidence result = pd.DataFrame() result['source_id'] = source_id result['Translation'] = target_labels result['probabilities'] = [x[1] for x in prediction] # rank by the source_id and get the ranking for each of the queries for all the documents result['rank'] = result.groupby('source_id')['probabilities'].rank(method='average', ascending=False) # create a new dataframe with only the right translations to get their rankings ranks = result[result['Translation'] == 1].reset_index() # compute the MAP score by first summing all inverses and dividing by the amount of queries sum_inverse = 0 for i in range(0, len(ranks)): sum_inverse += 1 / ranks['rank'][i] MAP = 1 / len(ranks) * sum_inverse return MAP
ad279df4b28bceff52af98d6f7e71f34b564db55
3,644,052
def get_model_config(model): """Returns hyper-parameters for given mode""" if model == 'maml': return 0.1, 0.5, 5 if model == 'fomaml': return 0.1, 0.5, 100 return 0.1, 0.1, 100
dcdfb3c00026a172b22611ad3203a7c32d8e59d7
3,644,053
def find_longest_substring(s: str, k: int) -> str: """ Speed: ~O(N) Memory: ~O(1) :param s: :param k: :return: """ # longest substring (found) lss = "" # current longest substring c_lss = "" # current list of characters for the current longest substring c_c = [] i = 0 for i, c in enumerate(s): # current character is in list of characters of the current substring ? if c in c_c: # if yes, increase/update current substring c_lss += c else: # else # Can we add the new character in the current substring ? if len(c_c) < k: # if yes: increase/updating the current substring c_lss += c else: # else => compare the current result (substring) & start a new substring research # compare the current substring with the longest substring found as far # Current substring is larger ? if len(c_lss) > len(lss): # if yes: update the longest substring lss = c_lss # in any case => start a new substring research # first element is: the last character of the previous current substring c_c = [c_lss[-1]] c_lss = c_lss[-1] + c # Early exit: at this moment, can we found a larger substring ? if (len(s) - i + len(c_lss)) <= len(lss): break # add the new character in list of current character for substring c_c += [c] # perform a last comparaison for current substring if len(c_lss) > len(lss): lss = c_lss # print(len(s) - i - 1) return lss
78936d140ea1e54945c6b4dd849b38f0c5604a36
3,644,055
def _fixTool2(scModel,gopLoader): """ :param scModel: :param gopLoader: :return: @type scModel: ImageProjectModel """ def replace_tool(tool): return 'jtui' if 'MaskGenUI' in tool else tool modifier_tools = scModel.getGraph().getDataItem('modifier_tools') if modifier_tools is not None: scModel.getGraph().setDataItem('modifier_tools', [replace_tool(x) for x in modifier_tools]) creator_tool= scModel.getGraph().getDataItem('creator_tool') scModel.getGraph().setDataItem('creator_tool', replace_tool(creator_tool))
3eb3bf8a47514a28c2e699a2eeefb084f9f7923b
3,644,056
from io import StringIO def mol_view(request): """Function to view a 2D depiction of a molecule -> as PNG""" my_choice = request.GET['choice'].split("_")[0] try: mol = Chem.MolFromSmiles(str(InternalIDLink.objects.filter(internal_id=my_choice)[0].mol_id.smiles)) except IndexError: mol = Chem.MolFromSmiles(str(Molecule.objects.get(pk=my_choice).smiles)) image = Draw.MolToImage(mol) output = StringIO.StringIO() image.save(output, format="PNG") contents = output.getvalue() return HttpResponse(contents)
91f202b34fe63c8b89e1250bb54222120410f9c2
3,644,057
def rotation_matrix_about(axis, theta): """Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians. Taken from: https://stackoverflow.com/a/6802723 """ if np.shape(axis) != (3,): raise ValueError("Shape of `axis` must be (3,)!") scalar = True if np.ndim(theta) > 1: raise ValueError("Only 0 or 1 dimensional values for `theta` are supported!") elif np.ndim(theta) == 1: theta = np.atleast_2d(theta).T scalar = False axis = np.asarray(axis) axis = axis / np.sqrt(np.dot(axis, axis)) a = np.cos(theta / 2.0).squeeze() # b, c, d = - axis * np.sin(theta / 2.0) temp = - axis * np.sin(theta / 2.0) if not scalar: temp = temp.T b, c, d = temp aa, bb, cc, dd = a * a, b * b, c * c, d * d bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d rot = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) if not scalar: rot = rot.T return rot
f65fdc6e40ad7712521fbb6db662827401f82aca
3,644,058
def zc_rules(): """catch issues with zero copy streaming""" return ( case("SSTableReader"), rule( capture( r"Could not recreate or deserialize existing bloom filter, continuing with a pass-through bloom filter but this will significantly impact reads performance" ), update( event_product="zcs", event_category="streaming", event_type="bloom_filter", ), ), )
e4847d95b0565d5cb9213cfca9e8e3f28657041c
3,644,059
import re def name_convert_to_camel(name: str) -> str: """下划线转驼峰""" contents = re.findall('_[a-z]+', name) for content in set(contents): name = name.replace(content, content[1:].title()) return name
109a1035a3efa98861b6a419206823b1114268e2
3,644,060
def triangle_as_polynomial(nodes, degree): """Convert ``nodes`` into a SymPy polynomial array :math:`B(s, t)`. Args: nodes (numpy.ndarray): Nodes defining a B |eacute| zier triangle. degree (int): The degree of the triangle. This is assumed to correctly correspond to the number of ``nodes``. Returns: Tuple[sympy.Symbol, sympy.Symbol, sympy.Matrix]: Triple of * The symbol ``s`` used in the polynomial * The symbol ``t`` used in the polynomial * The triangle :math:`B(s, t)`. """ # NOTE: We import SymPy at runtime to avoid the import-time cost for users # that don't want to do symbolic computation. The ``sympy`` import is # a tad expensive. import sympy # pylint: disable=import-outside-toplevel nodes_sym = to_symbolic(nodes) s, t = sympy.symbols("s, t") b_polynomial = nodes_sym * triangle_weights(degree, s, t) b_polynomial.simplify() factored = [value.factor() for value in b_polynomial] return s, t, sympy.Matrix(factored).reshape(*b_polynomial.shape)
20c0bc7021673ac375018a387926ae25bdfda2e5
3,644,061
import decimal def as_decimal(dct): """Decodes the Decimal datatype.""" if '__Decimal__' in dct: return decimal.Decimal(dct['__Decimal__']) return dct
d25b3ff73d7559a9018666d5f2cd189e6503a268
3,644,062
def input_layer(features, feature_columns, weight_collections=None, trainable=True, cols_to_vars=None, cols_to_output_tensors=None): """Returns a dense `Tensor` as input layer based on given `feature_columns`. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column oriented data should be converted to a single `Tensor`. Example: ```python price = numeric_column('price') keywords_embedded = embedding_column( categorical_column_with_hash_bucket("keywords", 10K), dimensions=16) columns = [price, keywords_embedded, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) for units in [128, 64, 32]: dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu) prediction = tf.layers.dense(dense_tensor, 1) ``` Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `_FeatureColumn`. feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. All items should be instances of classes derived from `_DenseColumn` such as `numeric_column`, `embedding_column`, `bucketized_column`, `indicator_column`. If you have categorical features, you can wrap them with an `embedding_column` or `indicator_column`. weight_collections: A list of collection names to which the Variable will be added. Note that variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). cols_to_vars: If not `None`, must be a dictionary that will be filled with a mapping from `_FeatureColumn` to list of `Variable`s. For example, after the call, we might have cols_to_vars = {_EmbeddingColumn( categorical_column=_HashedCategoricalColumn( key='sparse_feature', hash_bucket_size=5, dtype=tf.string), dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10), <tf.Variable 'some_variable:1' shape=(5, 10)]} If a column creates no variables, its value will be an empty list. cols_to_output_tensors: If not `None`, must be a dictionary that will be filled with a mapping from '_FeatureColumn' to the associated output `Tensor`s. Returns: A `Tensor` which represents input layer of a model. Its shape is (batch_size, first_layer_dimension) and its dtype is `float32`. first_layer_dimension is determined based on given `feature_columns`. Raises: ValueError: if an item in `feature_columns` is not a `_DenseColumn`. """ return _internal_input_layer( features, feature_columns, weight_collections=weight_collections, trainable=trainable, cols_to_vars=cols_to_vars, cols_to_output_tensors=cols_to_output_tensors)
89fda325ec1afb98a772d7238386471bf4484141
3,644,063
def log_sum_exp(x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers """ log_reduce_sum = P.ReduceSum() log = P.Log() exp = P.Exp() x_max = max(x.data) return log(log_reduce_sum(exp(x - x_max), 1)) + x_max
72a39a81fa3959e73c096732a86e843b5330e27d
3,644,064
def prepareRepoCharts(url, name, auths): """ NOTE: currently not support git """ charts_info, charts_info_hash = _prepareHelmRepoPath(url, name, auths) return charts_info, charts_info_hash
7d2a6af1cae019020cd0921155fcdc749585d32c
3,644,066
def num_ini_spaces(s): """Return the number of initial spaces in a string. Note that tabs are counted as a single space. For now, we do *not* support mixing of tabs and spaces in the user's input. Parameters ---------- s : string Returns ------- n : int """ ini_spaces = ini_spaces_re.match(s) if ini_spaces: return ini_spaces.end() else: return 0
9870aa42020b56f765f0ed74f73edda21b1786b1
3,644,067
def make_filename_template(schema, **kwargs): """Create codeblocks containing example filename patterns for a given datatype. Parameters ---------- schema : dict The schema object, which is a dictionary with nested dictionaries and lists stored within it. kwargs : dict Keyword arguments used to filter the schema. Example kwargs that may be used include: "suffixes", "datatypes", "extensions". Returns ------- codeblock : str A multiline string containing the filename templates for file types in the schema, after filtering. """ schema = filter_schema(schema, **kwargs) entity_order = schema["rules"]["entities"] paragraph = "" # Parent folders paragraph += "{}-<{}>/\n\t[{}-<{}>/]\n".format( schema["objects"]["entities"]["subject"]["entity"], schema["objects"]["entities"]["subject"]["format"], schema["objects"]["entities"]["session"]["entity"], schema["objects"]["entities"]["session"]["format"], ) for datatype in schema["rules"]["datatypes"].keys(): paragraph += "\t\t{}/\n".format(datatype) # Unique filename patterns for group in schema["rules"]["datatypes"][datatype]: string = "\t\t\t" for ent in entity_order: ent_format = "{}-<{}>".format( schema["objects"]["entities"][ent]["entity"], schema["objects"]["entities"][ent].get("format", "label") ) if ent in group["entities"]: if group["entities"][ent] == "required": if len(string.strip()): string += "_" + ent_format else: # Only the first entity doesn't need an underscore string += ent_format else: if len(string.strip()): string += "[_" + ent_format + "]" else: # Only the first entity doesn't need an underscore string += "[" + ent_format + "]" # In cases of large numbers of suffixes, # we use the "suffix" variable and expect a table later in the spec if len(group["suffixes"]) > 5: suffix = "_<suffix>" string += suffix strings = [string] else: strings = [ string + "_" + suffix for suffix in group["suffixes"] ] # Add extensions full_strings = [] extensions = group["extensions"] extensions = [ ext if ext != "*" else ".<extension>" for ext in extensions ] extensions = utils.combine_extensions(extensions) if len(extensions) > 5: # Combine exts when there are many, but keep JSON separate if ".json" in extensions: extensions = [".<extension>", ".json"] else: extensions = [".<extension>"] for extension in extensions: for string in strings: new_string = string + extension full_strings.append(new_string) full_strings = sorted(full_strings) if full_strings: paragraph += "\n".join(full_strings) + "\n" paragraph = paragraph.rstrip() codeblock = "Template:\n```Text\n" + paragraph + "\n```" codeblock = codeblock.expandtabs(4) return codeblock
bb1d8eb776d8e248ca7fb67167594639a02c92cb
3,644,068
def getLanguageLevel() -> dict: """ Takes the user input and returns the found documents as dictionary. :text: String :language: String :return: Dictionary """ text: str = request.params.get('text') language: str = request.params.get('language') # check API Key if str(request.params.get('key')) != API_KEY: response.status = 401 return { "error": "API-KEY is wrong or missing. See https://github.com/elaisasearch/categorizer/blob/master/README.md for more information." } if language == "en": return { "result": categorizeText(text) } # other languages will follow in the future else: return { "error": "'{}' currently isn't supported. Please use 'en' for English as language. Thank you.".format(language) }
967b4244f7406c82715bdfb112cd82c652c9c68e
3,644,070
import oci.exceptions def list_networks(**kwargs): """Lists all networks of the given compartment Args: **kwargs: Additional options Keyword Args: public_subnet (bool): Whether only public or private subnets should be considered compartment_id (str): OCID of the parent compartment. config (object): An OCI config object or None. return_formatted (bool): If set to true, a list object is returned. check_privileges (bool): Checks if the user has privileges for the subnet Returns: a network object """ public_subnet = kwargs.get("public_subnet") compartment_id = kwargs.get("compartment_id") config = kwargs.get("config") return_formatted = kwargs.get("return_formatted", True) check_privileges = kwargs.get("check_privileges", False) # Get the active config and compartment try: config = configuration.get_current_config(config=config) compartment_id = configuration.get_current_compartment_id( compartment_id=compartment_id, config=config) # Create VirtualNetworkClient virtual_network = core.get_oci_virtual_network_client( config=config) # List the virtual networks vcns = virtual_network.list_vcns( compartment_id=compartment_id).data # Filter out all sub-nets that are not conforming to the # public_subnet options if public_subnet is not None: # Loop over VCNs to see if access is granted good_vcns = [] for vcn in vcns: try: if network_has_subnet( network=vcn, compartment_id=compartment_id, config=config, public_subnet=public_subnet, check_privileges=check_privileges): good_vcns.append(vcn) except oci.exceptions.ServiceError as e: pass vcns = good_vcns if return_formatted: return format_network_listing(vcns) else: return oci.util.to_dict(vcns) except ValueError as e: print(f"ERROR: {str(e)}") return
32a816b595d45102a393be8a548f48414509f865
3,644,071
def ed_affine_to_extended(pt): """Map (x, y) to (x : y : x*y : 1).""" new_curve = EllipticCurve(pt.curve, ED_EXT_HOM_PROJ, Edwards_ExtProj_Arithm) return new_curve((pt.x, pt.y, pt.x * pt.y, new_curve.field(1)))
ee949c7c0487fb580d79764e3f0c10d2a2080943
3,644,072
import joblib def do_setup(experiment_folder, path_to_additional_args): """ Setup Shell Scripts for Experiment """ additional_args = joblib.load(path_to_additional_args) # Setup Data logger.info("Setting Up Data") data_args = setup_train_test_data(experiment_folder, **additional_args) # Setup logger.info("Saving Experiment Options per ID") sampler_args = additional_args['sampler_args'] arg_list = dict_product(sampler_args, data_args) options_df = setup_options(experiment_folder, arg_list) return options_df
9489b5abab6335de4c5909d718b5ccb3bcc0f3c7
3,644,074
import requests def getorgadmins(apikey, orgid, suppressprint=False): """ Args: apikey: User's Meraki API Key orgid: OrganizationId for operation to be performed against suppressprint: Returns: """ __hasorgaccess(apikey, orgid) calltype = 'Organization' geturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid)) headers = { 'x-cisco-meraki-api-key': format(str(apikey)), 'Content-Type': 'application/json' } dashboard = requests.get(geturl, headers=headers) # # Call return handler function to parse Dashboard response # result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint) return result
640ca97bf7213b2b0e24190b7f1b6658c53332b6
3,644,075
def calc_recall(TP, FN): """ Calculate recall from TP and FN """ if TP + FN != 0: recall = TP / (TP + FN) else: recall = 0 return recall
8f3513e11f8adad111eee32740c271aad31fbe28
3,644,076
def lookup_last_report_execution(job_type, work_ids=None): """Lookup in the database when the report/job chunk last executed This is the expected table schema from the database (id and timestamp columns are omitted), --------------------------------------------------- | work_id | history | --------------------------------------------------- | 1000 | {"report_A": 2019-01-11 11:22:33, "report_B": 2020-01-12 02:03:44} | | 2000 | {"report_A": 2012-01-11 12:23:33} | --------------------------------------------------- The work_id parameter is expected to be work ids. The reason for naming the parameter work_ids is to support future changes. Args: job_type (str): The name of the job to check execution time for work_ids (list): Specific work ids to check execution time for Returns: last_exec_min (int or None): Largest number of minutes since the last execution for any of the work ids. None if never executed Examples: Looking up the greatest time since work id 1000 executed report_B should be 2 minutes >>> str(datetime.utcnow()) 2020-01-12 02:05:44 >>> lookup_last_report_execution("report_B", [1000]) 2 Looking up the greatest time since work id 1234 executed report_B should be None, as it was never executed >>> print(lookup_last_report_execution("report_B", [1234])) None """ # Create string ready for SQL work_ids_string = ", ".join([str(c) for c in work_ids]) # Query database # This returns a single number that is the latest execution for any of # the work_ids in minutes or a single row containing 99999999 sql = f""" SELECT MAX(IFNULL(MINUTES_SINCE_LAST_EXEC, 99999999)) AS last_exec FROM ( -- Calculate the time since last execution SELECT TIMESTAMPDIFF( MINUTE, STR_TO_DATE( JSON_UNQUOTE( JSON_EXTRACT( history, '$."{job_type}"') ), "%Y-%m-%d %H:%i:%s"), CURRENT_TIMESTAMP() ) AS MINUTES_SINCE_LAST_EXEC FROM StauLatestExecution WHERE workId IN ({work_ids_string}) ) as subq """ with Stau() as queue: rtn = queue._exec(sql, {}) return rtn.get("last_exec", None)
bcc7715d416820dcc9f065b952e0a751255c9929
3,644,077
def get_course_goal_options(): """ Returns the valid options for goal keys, mapped to their translated strings, as defined by theCourseGoal model. """ return {goal_key: goal_text for goal_key, goal_text in GOAL_KEY_CHOICES}
6f8fc2bd812a216abcff6a82107cf28bfc2fcbf4
3,644,078
def to_dataframe(y): """ If the input is not a dataframe, convert it to a dataframe :param y: The target variable :return: A dataframe """ if not isinstance(y, pd.DataFrame): return pd.DataFrame(y) return y
1fc302b1acb264bce5778c9c2349100f799da397
3,644,079
def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False, ignore_query=False, ignore_fragment=False): """ Compare two URLs and return True if they are equal, some parts of the URLs can be ignored :param first: URL :param second: URL :param ignore_scheme: ignore the scheme :param ignore_netloc: ignore the netloc :param ignore_path: ignore the path :param ignore_params: ignore the params :param ignore_query: ignore the query string :param ignore_fragment: ignore the fragment :return: result of comparison """ # <scheme>://<netloc>/<path>;<params>?<query>#<fragment> firstp = urlparse(first) secondp = urlparse(second) return ( (firstp.scheme == secondp.scheme or ignore_scheme) and (firstp.netloc == secondp.netloc or ignore_netloc) and (firstp.path == secondp.path or ignore_path) and (firstp.params == secondp.params or ignore_params) and (firstp.query == secondp.query or ignore_query) and (firstp.fragment == secondp.fragment or ignore_fragment) )
caea2185db83c5938f48e8d2de432c5e74540014
3,644,080
def test_struct(n: cython.int, x: cython.double) -> MyStruct2: """ >>> test_struct(389, 1.64493) (389, 1.64493) >>> d = test_struct.__annotations__ >>> sorted(d) ['n', 'return', 'x'] """ assert cython.typeof(n) == 'int', cython.typeof(n) if is_compiled: assert cython.typeof(x) == 'double', cython.typeof(x) # C double else: assert cython.typeof(x) == 'float', cython.typeof(x) # Python float a = cython.declare(MyStruct2) a[0] = MyStruct(is_integral=True, data=MyUnion(n=n)) a[1] = MyStruct(is_integral=False, data={'x': x}) return a[0].data.n, a[1].data.x
1bf5e97719d80c8327c44bfea66f7ef26b3f7400
3,644,081
def is_oasis_db(): """ Is this likely an OASIS database? Look at the table names to see if we have the more specific ones. Return "yes", "no", or "empty" """ expect = ['qtvariations', 'users', 'examqtemplates', 'marklog', 'qtattach', 'questions', 'guesses', 'exams', 'qtemplates'] tables = public_tables() if len(tables) == 0: return "empty" if set(expect).issubset(tables): return "yes" return "no"
330da79c63afe4905c9469e54d61d5de6a8fa575
3,644,084
def make_segment(segment, discontinuity=False): """Create a playlist response for a segment.""" response = [] if discontinuity: response.append("#EXT-X-DISCONTINUITY") response.extend(["#EXTINF:10.0000,", f"./segment/{segment}.m4s"]), return "\n".join(response)
8419b100409934f902c751734c396bc72d8a6917
3,644,085
def seq_aggregate_with_reducer(x, y): """ Sequencing function that works with the dataframe created by get_normal_frame :param x: :param y: :return: """ res = [] for i in range(0, len(x)): res.append((x[i][0], x[i][1], get_aggregation_func_by_name(x[i][0])(x[i][2], y[i][2]))) return tuple(res)
6faed81fd925656c2984e9d78df3b88e98fcb035
3,644,086
from typing import Any def from_dicts(key: str, *dicts, default: Any = None): """ Returns value of key in first matchning dict. If not matching dict, default value is returned. Return: Any """ for d in dicts: if key in d: return d[key] return default
508febc48fd22d3a23dc0500b0aa3824c99fdbc3
3,644,087
def time_in_words(h, m): """Hackerrank Problem: https://www.hackerrank.com/challenges/the-time-in-words/problem Given the time in numerals we may convert it into words, as shown below: ---------------------------------------------- | 5:00 | -> | five o' clock | | 5:01 | -> | one minute past five | | 5:10 | -> | ten minutes past five | | 5:15 | -> | quarter past five | | 5:30 | -> | half past five | | 5:40 | -> | twenty minutes to six | | 5:45 | -> | quarter to six | | 5:47 | -> | thirteen minutes to six | | 5:28 | -> | twenty eight minutes past five | ---------------------------------------------- At minutes = 0, use o' clock. For 1 <= minutes <= 30, use past, and for 30 < minutes use to. Note the space between the apostrophe and clock in o' clock. Write a program which prints the time in words for the input given in the format described. Args: h (int): hour of the day m (int): minutes after the hour Returns: str: string representation of the time """ time = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "twenty one", "twenty two", "twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"] # We check for a certain set of cases: # Case 1 - we're on the hour, so we use o' clock if m == 0: return "{0} o' clock".format(time[h-1]) # Case 2 - we're one minute after, so we use minute (versus minutes later on to describe the time) if m == 1: return "{0} minute past {1}".format(time[m-1], time[h-1]) # Case 3 - we're a quarter past the hour if m == 15: return "quarter past {0}".format(time[h-1]) # Case 4 - we're half past the hour if m == 30: return "half past {0}".format(time[h-1]) # Case 5 - we're a quarter to the next hour if m == 45: return "quarter to {0}".format(time[h]) # Case 6 - we check for minutes after the hour, which is until we hit minute 30 if m < 30: return "{0} minutes past {1}".format(time[m-1], time[h-1]) # Case 7 - this covers the cases where the minutes are after 30 so we're mintues to the next hour return "{0} minutes to {1}".format(time[59-m], time[h])
85f2247f01df36ef499105a9940be63eee189100
3,644,088
def majorityElement(nums): """超过三分之一的数,最多不超过两个数""" num1, num2 = -1, -1 count1, count2 = 0, 0 for i in range(len(nums)): curNum = nums[i] if curNum == num1: count1 += 1 elif curNum == num2: count2 += 1 elif count1 == 0: num1 = curNum count1 = 1 elif count2 == 0: num2 = curNum count2 = 1 else: count1 -= 1 count2 -= 2 count1, count2 = 0, 0 for n in nums: if n == num1: count1 += 1 elif n == num2: count2 += 1 print("num1: {}, count1: {}; num2: {}, count2: {}".format(num1, count1, num2, count2)) numLens = len(nums) ret = [] if count1 > numLens//3: ret.append(num1) if count2 > numLens//3: ret.append(num2) return ret
ef71fa445c3bc16bbaf79a1ab4e9548125e71b7b
3,644,089
def calcDensHeight(T,p,z): """ Calculate the density scale height H_rho Parameters ---------- T: vector (float) temperature (K) p: vector (float) of len(T) pressure (pa) z: vector (float) of len(T height (m) Returns ------- Hbar: vector (float) of len(T) density scale height (m) """ dz=np.diff(z) TLayer=(T[1:] + T[0:-1])/2. dTdz=np.diff(T)/np.diff(z) oneOverH=g/(Rd*TLayer) + (1/TLayer*dTdz) Zthick=z[-1] - z[0] oneOverHbar=np.sum(oneOverH*dz)/Zthick Hbar = 1/oneOverHbar return Hbar
c45d47d4f3dffe0e1706f979a9a6eb5028c7b775
3,644,090
import re def extract_push_target(push_target: str): """ Extract push target from the url configured Workspace is optional """ if not push_target: raise ValueError("Cannot extract push-target if push-target is not set.") match_pattern = re.compile( r"(?P<http_scheme>https|http):\/\/(?P<askanna_host>[\w\.\-\:]+)\/(?P<workspace_suuid>[\w-]+){0,1}\/{0,1}project\/(?P<project_suuid>[\w-]+)\/{0,1}" # noqa: E501 ) matches = match_pattern.match(push_target) matches_dict = matches.groupdict() return matches_dict
3fe11ac218c0cfc7c6211cfe76fd11bd248c4588
3,644,093
def dish_gain(radius, freq): """ Dish radar gain. Inputs: - radius [float]: Dish radius (m) - freq [float]: Transmit frequency (Hz) Outputs: - g: Gain """ return 4*pi**2*radius**2/wavelen(freq)**2
a20d963f9acc839a811aefaa942aaeaedce0689c
3,644,095
def center_img(img, size=None, fill_value=255): """ center img in a square background """ h, w = img.shape[:2] if size is None: size = max(h, w) shape = (size, size) + img.shape[2:] background = np.full(shape, fill_value, np.uint8) center_x = (size - w) // 2 center_y = (size - h) // 2 background[center_y:center_y + h, center_x:center_x + w] = img return background
838d6185230fbb8184925a31e0f3334dc4bda627
3,644,097
def concat_files(*files): """ Concat some files together. Returns out and err to keep parity with shell commands. Args: *files: src1, src2, ..., srcN, dst. Returns: out: string err: string """ out = '' err = '' dst_name = files[-1] sources = [files[f] for f in range(len(files)) if f < len(files) - 1] with open(dst_name, 'w') as dst: for f in sources: with open(f, 'r') as src: for line in src: dst.write(line) return out, err
101c37e5b3955c153c8c2210e7575a62341c768a
3,644,098
def getElementTypeToolTip(t): """Wrapper to prevent loading qtgui when this module is imported""" if t == PoolControllerView.ControllerModule: return "Controller module" elif t == PoolControllerView.ControllerClass: return "Controller class"
6862b10bc940daec1c13ef97fafbf525c2683e9e
3,644,102
def parse_dates(array): """Parse the valid dates in an array of strings. """ parsed_dates = [] for elem in array: elem = parse_date(elem) if elem is not None: parsed_dates.append(elem) return parsed_dates
1ec89f084cdd68709a37ea05356ceeb1a21f98bd
3,644,103
def app_factory(global_config, **local_config): """ 定义一个 app 的 factory 方法,以便在运行时绑定具体的 app,而不是在配置文件中就绑定。 :param global_config: :param local_config: :return: """ return MyApp()
c4c29963f88253c272319bc2369d4801df284fbf
3,644,104
import pytz def str_to_datetime(dt_str): """ Converts a string to a UTC datetime object. @rtype: datetime """ try: return dt.datetime.strptime( dt_str, DATE_STR_FORMAT).replace(tzinfo=pytz.utc) except ValueError: # If dt_str did not match our format return None
a9ac073c11b13dca011cca46860080cdc638dcbe
3,644,105
def quantize(img): """Quantize the output of model. :param img: the input image :type img: ndarray :return: the image after quantize :rtype: ndarray """ pixel_range = 255 return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
49abd32d8b2cf54c955e16765602bbff77a2a1b9
3,644,106
def is_normalized(M, x, eps): """Return True if (a Fuchsian) matrix M is normalized, that is all the eigenvalues of it's residues in x lie in [-1/2, 1/2) range (in limit eps->0). Return False otherwise. Examples: >>> x, e = var("x epsilon") >>> is_normalized(matrix([[(1+e)/3/x, 0], [0, e/x]]), x, e) True """ points = singularities(M, x) for x0, p in points.items(): M0 = matrix_residue(M, x, x0) for ev in M0.eigenvalues(): ev = limit_fixed(ev, eps, 0) if not (Rational((-1, 2)) <= ev and ev < Rational((1, 2))): return False return True
01715cd58cad25a805ffd260b78641701483ad86
3,644,107
def _get_dashboard_link(course_key): """ Construct a URL to the external analytics dashboard """ analytics_dashboard_url = f'{settings.ANALYTICS_DASHBOARD_URL}/courses/{str(course_key)}' link = HTML("<a href=\"{0}\" rel=\"noopener\" target=\"_blank\">{1}</a>").format( analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME ) return link
fa9fb656ff4e7cf70c3512755351a46302cec71b
3,644,108
def figure1_control(data1, cols): """ Creates a data set to plot figure 1, Panel B, D, F. Args: - data1 (pd.DataFrame): the original data set - cols (list): a list of column names ["agus", "bct", "bcg"] Returns: - df_fig1_contr (pd.DataFrame): a data set for plotting panels with controls """ data1["uazY"] = data1["uazY"].astype("category") for column in cols: data_df = data1.loc[(data1["dzagr01"] != 0) & (abs(data1["dzagr01"]) < 0.2), [column, "uazY"]].dropna() data_df["constant"] = [1] * len(data_df.index) y,X = patsy.dmatrices("{}~constant".format(column), data = data_df, return_type='dataframe') ybar = y.mean() y = y - y.groupby(data_df["uazY"]).transform('mean') + ybar Xbar = X.mean() X = X - X.groupby(data_df["uazY"]).transform('mean') + Xbar reg = smp.OLS(y,X).fit() y_hat = reg.predict() y_hat.shape = (len(y_hat), 1) residual = y - y_hat data1["{}_res".format(column)] = residual df_fig1_contr = data1.groupby("dzagr01")["{}_res".format(cols[0]), "{}_res".format(cols[1]), "{}_res".format(cols[2])].mean() df_fig1_contr.reset_index(level = 0, inplace = True) for column in cols: fig1_B1 = sm.ols(formula = "{}_res ~ dzagr01".format(column), data = df_fig1_contr[(df_fig1_contr["dzagr01"] < 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2)]).fit() fig1_B2 = sm.ols(formula = "{}_res ~ dzagr01".format(column), data = df_fig1_contr[(df_fig1_contr["dzagr01"] > 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2)]).fit() pred_B1 = fig1_B1.predict() pred_B2 = fig1_B2.predict() df_fig1_contr.loc[(df_fig1_contr["dzagr01"] < 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2), "pred_{}1".format(column)] = pred_B1 df_fig1_contr.loc[(df_fig1_contr["dzagr01"] > 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2), "pred_{}2".format(column)] = pred_B2 return df_fig1_contr
5eef05c567159a623fdaaafa5a5707c48c7fe7fa
3,644,109
import ctypes def GetEffectiveRightsFromAclW(acl, sid): """ Takes a SID instead of a trustee! """ _GetEffectiveRightsFromAclW = windll.advapi32.GetEffectiveRightsFromAclW _GetEffectiveRightsFromAclW.argtypes = [PVOID, PTRUSTEE_W, PDWORD] #[HANDLE, SE_OBJECT_TYPE, DWORD, PSID, PSID, PACL, PACL, PSECURITY_DESCRIPTOR] _GetEffectiveRightsFromAclW.restype = RaiseIfNotErrorSuccess sid_data = sid.to_bytes() psid = ctypes.create_string_buffer(sid_data, len(sid_data)) trustee = TRUSTEE_W() trustee.pMultipleTrustee = 0 trustee.MultipleTrusteeOperation = 0 trustee.TrusteeForm = 0 trustee.TrusteeType = 0 trustee.ptstrName = ctypes.c_void_p(ctypes.addressof(psid)) effective_rigths_mask = DWORD(0) acl_data = acl.to_bytes() pacl = ctypes.create_string_buffer(acl_data, len(acl_data)) res = _GetEffectiveRightsFromAclW(pacl, trustee, byref(effective_rigths_mask)) return effective_rigths_mask.value
3edb0080a98a7d9d0d040914435c76cd20f30e0a
3,644,110
def store(mnemonic, opcode): """ Create a store instruction """ ra = Operand("ra", Or1kRegister, read=True) rb = Operand("rb", Or1kRegister, read=True) imm = Operand("imm", int) syntax = Syntax(["l", ".", mnemonic, " ", imm, "(", ra, ")", ",", " ", rb]) patterns = {"opcode": opcode, "ra": ra, "rb": rb, "imm": imm} members = { "ra": ra, "rb": rb, "imm": imm, "syntax": syntax, "patterns": patterns, "tokens": [Orbis32StoreToken], } class_name = mnemonic.title() return type(class_name, (Orbis32Instruction,), members)
c9d1d7376b5c73eed87b5c3a7438cc54ecab9ad2
3,644,111
import ctypes def hlmlDeviceGetPowerUsage(device: hlml_t.HLML_DEVICE.TYPE) -> int: """ Retrieves power usage for the device in mW Parameters: device (HLML_DEVICE.TYPE) - The handle for a habana device. Returns: power (int) - The given device's power usage in mW. """ global _hlmlOBJ power = ctypes.c_uint() fn = _hlmlOBJ.get_func_ptr("hlml_device_get_power_usage") ret = fn(device, ctypes.byref(power)) check_return(ret) return power.value
ed2d64be06a8e319221b2c3e2017f07a6c16a028
3,644,112
def usgs_coef_parse(**kwargs): """ Combine, parse, and format the provided dataframes :param kwargs: potential arguments include: dataframe_list: list of dataframes to concat and format args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ # load arguments necessary for function args = kwargs['args'] # Read directly into a pandas df df_raw = pd.read_csv(externaldatapath + "USGS_WU_Coef_Raw.csv") # rename columns to match flowbyactivity format df = df_raw.copy() df = df.rename(columns={"Animal Type": "ActivityConsumedBy", "WUC_Median": "FlowAmount", "WUC_Minimum": "Min", "WUC_Maximum": "Max" }) # drop columns df = df.drop(columns=["WUC_25th_Percentile", "WUC_75th_Percentile"]) # hardcode data df["Class"] = "Water" df["SourceName"] = "USGS_WU_Coef" df["Location"] = US_FIPS df['Year'] = args['year'] df = assign_fips_location_system(df, '2005') df["Unit"] = "gallons/animal/day" df['DataReliability'] = 5 # tmp df['DataCollection'] = 5 # tmp return df
9cfa29cc5390717fd4a36360dcdb373614ae7345
3,644,113
def success_poly_overlap(gt_poly, res_poly, n_frame): """ :param gt_poly: [Nx8] :param result_bb: :param n_frame: :return: """ thresholds_overlap = np.arange(0, 1.05, 0.05) success = np.zeros(len(thresholds_overlap)) iou_list = [] for i in range(gt_poly.shape[0]): iou = poly_overlap_ratio(gt_poly[i], res_poly[i]) iou_list.append(iou) iou_np = np.array(iou_list) for i in range(len(thresholds_overlap)): success[i] = np.sum(iou_np > thresholds_overlap[i]) / float(n_frame) return success
3de9e308fd8a29fb7e7ed4a7132ce5157b5794eb
3,644,114
import io def my_get_size_png(gg, height, width, dpi, limitsize): """ Get actual size of ggplot image saved (with bbox_inches="tight") """ buf = io.BytesIO() gg.save(buf, format= "png", height = height, width = width, dpi=dpi, units = "in", limitsize = limitsize,verbose=False, bbox_inches="tight") buf.seek(0) img = Image.open(buf) width, height = img.size return width / dpi, height / dpi
fe6417f35480048b70f25bfab97978515fd7d7d1
3,644,115
def getRnnGenerator(vocab_size,hidden_dim,input_dim=512): """ "Apply" the RNN to the input x For initializing the network, the vocab size needs to be known Default of the hidden layer is set tot 512 like Karpathy """ generator = SequenceGenerator( Readout(readout_dim = vocab_size, source_names = ["states"], # transition.apply.states ??? emitter = SoftmaxEmitter(name="emitter"), feedback_brick = LookupFeedback( vocab_size, input_dim, name = 'feedback' ), name = "readout" ), MySimpleRecurrent( name = "transition", activation = Tanh(), dim = hidden_dim ), weights_init = IsotropicGaussian(0.01), biases_init = Constant(0), name = "generator" ) generator.push_initialization_config() generator.transition.weights_init = IsotropicGaussian(0.01) generator.initialize() return generator
b1c033da42a0079e8c539fd908b715b8e6cb076f
3,644,116
def first_true(iterable, default=False, pred=None): """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x return next(filter(pred, iterable), default)
66c6b3e282cfdf60819d5df2d48cdea31484a4f1
3,644,117
def get_device_serial_no(instanceId, gwMgmtIp, fwApiKey): """ Retrieve the serial number from the FW. @param gwMgmtIP: The IP address of the FW @type: ```str``` @param fwApiKey: Api key of the FW @type: ```str``` @return The serial number of the FW @rtype: ```str``` """ serial_no = None if gwMgmtIp is None: logger.error('Firewall IP could not be found. Can not interact with the device') return False logger.info('Retrieve the serial number from FW {} with IP: {}'.format(instanceId, gwMgmtIp)) cmd_show_system_info = "/api/?type=op&key={}&cmd=<show><system><info/></system></show>".format(fwApiKey) response = execute_api_request(gwMgmtIp, 443, cmd_show_system_info) if response['result'] == False: logger.error('PAN Firewall: Fail to execute the show system info command for device: {} with IP: {}'.format(instanceId, gwMgmtIp)) result = response['data'].findall(".//line") for msg in result: error_msg = msg.text logger.error('Reason for failure: {}'.format(error_msg)) return False serial_info = response['data'].findall(".//serial") for info in serial_info: serial_no = info.text if not serial_no: logger.error("Unable to retrieve the serial number from device: {} with IP: {}".format(instanceId, gwMgmtIp)) return serial_no
e13d90da032f4084b2c1cafcf4d3a77b189a5d58
3,644,120
from typing import Optional import torch def multilabel_cross_entropy( x: Tensor, target: Tensor, weight: Optional[Tensor] = None, ignore_index: int = -100, reduction: str = 'mean' ) -> Tensor: """Implements the cross entropy loss for multi-label targets Args: x (torch.Tensor[N, K, ...]): input tensor target (torch.Tensor[N, K, ...]): target tensor weight (torch.Tensor[K], optional): manual rescaling of each class ignore_index (int, optional): specifies target value that is ignored and do not contribute to gradient reduction (str, optional): reduction method Returns: torch.Tensor: loss reduced with `reduction` method """ # log(P[class]) = log_softmax(score)[class] logpt = F.log_softmax(x, dim=1) # Ignore index (set loss contribution to 0) valid_idxs = torch.ones(logpt.shape[1], dtype=torch.bool, device=x.device) if ignore_index >= 0 and ignore_index < x.shape[1]: valid_idxs[ignore_index] = False # Weight if weight is not None: # Tensor type if weight.type() != x.data.type(): weight = weight.type_as(x.data) logpt = logpt * weight.view(1, -1, *([1] * (x.ndim - 2))) # type: ignore[attr-defined] # CE Loss loss = - target * logpt # Loss reduction if reduction == 'sum': loss = loss[:, valid_idxs].sum() else: loss = loss[:, valid_idxs].sum(dim=1) if reduction == 'mean': loss = loss.mean() return loss
12f1bdb41955fc6ba05b125956cdef40e42ca94c
3,644,121
def dataset_string(dataset): """Generate string from dataset""" data = dataset_data(dataset) try: # single value return fn.VALUE_FORMAT % data except TypeError: # array if dataset.size > 1: return fn.data_string(data) # probably a string return fn.shortstr('%s' % data)
25d82bc87ae83599857a6b8d83b671d25339df9f
3,644,122
from typing import Type from typing import Callable def create_constant_value_validator( constant_cls: Type, is_required: bool ) -> Callable[[str], bool]: """ Create a validator func that validates a value is one of the valid values. Parameters ---------- constant_cls: Type The constant class that contains the valid values. is_required: bool Whether the value is required. Returns ------- validator_func: Callable[[str], bool] The validator func. """ def is_valid(value: str) -> bool: """ Validate that value is valid. Parameters ---------- value: str The value to validate. Returns ------- status: bool The validation status. """ if value is None: return not is_required return value in get_all_class_attr_values(constant_cls) return is_valid
d225c4a225a4e24c809ef8cc6d557cf989375542
3,644,123