content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def import_odim_hdf5(filename, **kwargs): """Import a precipitation field (and optionally the quality field) from a HDF5 file conforming to the ODIM specification. Parameters ---------- filename : str Name of the file to import. Other Parameters ---------------- qty : {'RATE', 'ACRR', 'DBZH'} The quantity to read from the file. The currently supported identitiers are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value is 'RATE'. Returns ------- out : tuple A three-element tuple containing the OPERA product for the requested quantity and the associated quality field and metadata. The quality field is read from the file if it contains a dataset whose quantity identifier is 'QIND'. """ if not h5py_imported: raise MissingOptionalDependency( "h5py package is required to import " "radar reflectivity composites using ODIM HDF5 specification " "but it is not installed" ) qty = kwargs.get("qty", "RATE") if qty not in ["ACRR", "DBZH", "RATE"]: raise ValueError( "unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'" ) f = h5py.File(filename, "r") R = None Q = None for dsg in f.items(): if dsg[0][0:7] == "dataset": what_grp_found = False # check if the "what" group is in the "dataset" group if "what" in list(dsg[1].keys()): qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group( dsg[1]["what"] ) what_grp_found = True for dg in dsg[1].items(): if dg[0][0:4] == "data": # check if the "what" group is in the "data" group if "what" in list(dg[1].keys()): qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group( dg[1]["what"] ) elif not what_grp_found: raise DataModelError( "Non ODIM compilant file: " "no what group found from {} " "or its subgroups".format(dg[0]) ) if qty_.decode() in [qty, "QIND"]: ARR = dg[1]["data"][...] MASK_N = ARR == nodata MASK_U = ARR == undetect MASK = np.logical_and(~MASK_U, ~MASK_N) if qty_.decode() == qty: R = np.empty(ARR.shape) R[MASK] = ARR[MASK] * gain + offset R[MASK_U] = 0.0 R[MASK_N] = np.nan elif qty_.decode() == "QIND": Q = np.empty(ARR.shape, dtype=float) Q[MASK] = ARR[MASK] Q[~MASK] = np.nan if R is None: raise IOError("requested quantity %s not found" % qty) where = f["where"] proj4str = where.attrs["projdef"].decode() pr = pyproj.Proj(proj4str) LL_lat = where.attrs["LL_lat"] LL_lon = where.attrs["LL_lon"] UR_lat = where.attrs["UR_lat"] UR_lon = where.attrs["UR_lon"] if ( "LR_lat" in where.attrs.keys() and "LR_lon" in where.attrs.keys() and "UL_lat" in where.attrs.keys() and "UL_lon" in where.attrs.keys() ): LR_lat = float(where.attrs["LR_lat"]) LR_lon = float(where.attrs["LR_lon"]) UL_lat = float(where.attrs["UL_lat"]) UL_lon = float(where.attrs["UL_lon"]) full_cornerpts = True else: full_cornerpts = False LL_x, LL_y = pr(LL_lon, LL_lat) UR_x, UR_y = pr(UR_lon, UR_lat) if full_cornerpts: LR_x, LR_y = pr(LR_lon, LR_lat) UL_x, UL_y = pr(UL_lon, UL_lat) x1 = min(LL_x, UL_x) y1 = min(LL_y, LR_y) x2 = max(LR_x, UR_x) y2 = max(UL_y, UR_y) else: x1 = LL_x y1 = LL_y x2 = UR_x y2 = UR_y if "xscale" in where.attrs.keys() and "yscale" in where.attrs.keys(): xpixelsize = where.attrs["xscale"] ypixelsize = where.attrs["yscale"] else: xpixelsize = None ypixelsize = None if qty == "ACRR": unit = "mm" transform = None elif qty == "DBZH": unit = "dBZ" transform = "dB" else: unit = "mm/h" transform = None if np.any(np.isfinite(R)): thr = np.nanmin(R[R > np.nanmin(R)]) else: thr = np.nan metadata = { "projection": proj4str, "ll_lon": LL_lon, "ll_lat": LL_lat, "ur_lon": UR_lon, "ur_lat": UR_lat, "x1": x1, "y1": y1, "x2": x2, "y2": y2, "xpixelsize": xpixelsize, "ypixelsize": ypixelsize, "yorigin": "upper", "institution": "Odyssey datacentre", "accutime": 15.0, "unit": unit, "transform": transform, "zerovalue": np.nanmin(R), "threshold": thr, } f.close() return R, Q, metadata
650875bb3d04627f4570507892ee26b42912c39e
3,641,329
def sugerir(update: Update, _: CallbackContext) -> int: """Show new choice of buttons""" query = update.callback_query query.answer() keyboard = [ [ InlineKeyboardButton("\U0001F519 Volver", callback_data=str(NINE)), InlineKeyboardButton("\U0001F44B Salir", callback_data=str(TEN)), ] ] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text( text="\U0001F91A Sugerir cuentos:\n\n Responde este mensaje para sugerir un personaje o para realizar el aporte de un cuento\n", reply_markup=reply_markup ) return NINE
e278c6bdab82e4fdfc38c7a4bb58a5511a003515
3,641,330
def clone_subgraph(*, outputs, inputs, new_inputs, suffix="cloned"): """ Take all of the tensorflow nodes between `outputs` and `inputs` and clone them but with `inputs` replaced with `new_inputs`. Args: outputs (List[tf.Tensor]): list of output tensors inputs (List[tf.Tensor]): list of input tensors new_inputs (List[tf.Tensor]): list of new input tensors suffix (str, optional): suffix to the transformed operation names Returns: List[T]: list of transformed outputs """ return transform(outputs=outputs, inputs=inputs, transformed_inputs=new_inputs, transformer=lambda op, inputs: clone_op(op, inputs, suffix=suffix))
b61d73d79635551f8277cbc0c2da97d0c5c2908e
3,641,331
async def refresh_replacements(db, sample_id: str) -> list: """ Remove sample file `replacement` fields if the linked files have been deleted. :param db: the application database client :param sample_id: the id of the sample to refresh :return: the updated files list """ files = await virtool.db.utils.get_one_field(db.samples, "files", sample_id) for file in files: replacement = file.get("replacement") if replacement and not await db.files.count_documents({"_id": replacement["id"]}): file["replacement"] = None document = await db.samples.find_one_and_update({"_id": sample_id}, { "$set": { "files": files } }) return document["files"]
43667801bf6bb96edbeb59bf9d538b62c9bf9785
3,641,332
def torch_model (model_name, device, checkpoint_path = None): """ select imagenet models by their name and loading weights """ if checkpoint_path: pretrained = False else: pretrained = True model = models.__dict__ [model_name](pretrained) if hasattr (model, 'classifier'): if model_name == 'mobilenet_v2': model.classifier = nn.Sequential( nn.Dropout (0.2), nn.Linear (model.classifier [-1].in_features, 2)) else: model.classifier = nn.Sequential( nn.Linear (model.classifier.in_features, 2)) elif hasattr (model, 'fc'): model.fc = nn.Linear (model.fc.in_features, 2) model.to(device) if checkpoint_path: load_checkpoint (checkpoint_path, model, device) return model
831cf1edd83b76049e7f6d60434961cbd44e4bd9
3,641,333
from typing import Tuple from datetime import datetime def get_timezone() -> Tuple[datetime.tzinfo, str]: """Discover the current time zone and it's standard string representation (for source{d}).""" dt = get_datetime_now().astimezone() tzstr = dt.strftime("%z") tzstr = tzstr[:-2] + ":" + tzstr[-2:] return dt.tzinfo, tzstr
f73cedb8fb91c75a19104d4d8bef29f73bfb9b1a
3,641,334
def get_timed_roadmaps_grid_common( ins: Instance, T: int, size: int, ) -> list[TimedRoadmap]: """[deprecated] get grid roadmap shared by all agents Args: ins (Instance): instance T (int): assumed makespan size (int): size x size grid will be constructed Returns: list[np.ndarray]: locations Note: use get_timed_roadmaps_grid_common_2d_fast in 2d environment """ if ins.dim == 2: return get_timed_roadmaps_grid_common_2d_fast(ins, T, size) return get_common_roadmaps(ins, T, get_grid(size, ins.rads[0], ins))
9b8e283ad66db35132393b53af2bfa36fc4aaf83
3,641,337
def arithmetic_series(a: int, n: int, d: int = 1) -> int: """Returns the sum of the arithmetic sequence with parameters a, n, d. a: The first term in the sequence n: The total number of terms in the sequence d: The difference between any two terms in the sequence """ return n * (2 * a + (n - 1) * d) // 2
168f0b07cbe6275ddb54c1a1390b41a0f340b0a6
3,641,338
import re def get_arc_proxy_user(proxy_file=None): """ Returns the owner of the arc proxy. When *proxy_file* is *None*, it defaults to the result of :py:func:`get_arc_proxy_file`. Otherwise, when it evaluates to *False*, ``arcproxy`` is queried without a custom proxy file. """ out = _arc_proxy_info(args=["--infoitem=identity"], proxy_file=proxy_file)[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in arc proxy: {}".format(out))
01f1040cd1217d7722a691a78b5884125865cf39
3,641,339
def pass_hot_potato(names, num): """Pass hot potato. A hot potato is sequentially passed to ones in a queue line. After a number of passes, the one who got the hot potato is out. Then the passing hot potato game is launched againg, until the last person is remaining one. """ name_queue = Queue() for name in names: name_queue.enqueue(name) while name_queue.size() > 1: for i in xrange(num): name_queue.enqueue(name_queue.dequeue()) name_queue.dequeue() return name_queue.dequeue()
f78a635bdf3138809329ef8ad97934b125b9335a
3,641,340
import copy def convert_timeseries_dataframe_to_supervised(df: pd.DataFrame, namevars, target, n_in=1, n_out=0, dropT=True): """ Transform a time series in dataframe format into a supervised learning dataset while keeping dataframe intact. Returns the transformed pandas DataFrame, the name of the target column and the names of the predictor columns Arguments: df: A timeseries dataframe that you want to convert to Supervised dataset. namevars: columns that you want to lag in the data frame. Other columns will be untouched. target: this is the target variable you intend to use in supervised learning n_in: Number of lag periods as input (X). n_out: Number of future periods (optional) as output for the taget variable (y). dropT: Boolean - whether or not to drop columns at time 't'. Returns: df: This is the transformed data frame with the time series columns laggged. Note that the original columns are dropped if you set the 'dropT' argument to True. If not, they are preserved. This Pandas DataFrame of lagged time series data is immediately available for supervised learning. rtype: pd.DataFrame, str, List[str] """ target = copy.deepcopy(target) df = copy.deepcopy(df) int_vars = df.select_dtypes(include='integer').columns.tolist() # Notice that we will create a sequence of columns from name vars with suffix (t-n,... t-1), etc. drops = [] int_changes = [] for i in range(n_in, -1, -1): if i == 0: for var in namevars: addname = var + '(t)' df = df.rename(columns={var:addname}) drops.append(addname) if var in int_vars: int_changes.append(addname) else: for var in namevars: addname = var + '(t-' + str(i) + ')' df[addname] = df[var].shift(i) if var in int_vars: int_changes.append(addname) ## forecast sequence (t, t+1,... t+n) if n_out == 0: n_out = False for i in range(1, n_out): for var in namevars: addname = var + '(t+' + str(i) + ')' df[addname] = df[var].shift(-i) # drop rows with NaN values df = df.dropna() ### Make sure that whatever vars came in as integers return back as integers! if int_changes: ### only do this if there are some changes to implement ### df[int_changes] = df[int_changes].astype(np.int64) # put it all together for each_target in target: df = df.rename(columns={each_target+'(t)':each_target}) if dropT: ### If dropT is true, all the "t" series of the target column (in case it is in the namevars) ### will be removed if you don't want the target to learn from its "t" values. ### Similarly, we will also drop all the "t" series of name_vars if you set dropT to Trueself. try: drops.remove(target) except: pass df.drop(drops, axis=1, inplace=True) preds = [x for x in list(df) if x not in target] return df, target, preds
b62296680f6a871f20078e55eefa20f09392b012
3,641,341
def build_graph(adj_mat): """build sparse diffusion graph. The adjacency matrix need to preserves divergence.""" # sources, targets = adj_mat.nonzero() # edgelist = list(zip(sources.tolist(), targets.tolist())) # g = Graph(edgelist, edge_attrs={"weight": adj_mat.data.tolist()}, directed=True) g = Graph.Weighted_Adjacency(adj_mat) return g
bdc8dc5d1c107086c4c548b500f6958bdbe48103
3,641,342
def retrieve_context_path_comp_service_end_point_end_point(uuid): # noqa: E501 """Retrieve end-point Retrieve operation of resource: end-point # noqa: E501 :param uuid: ID of uuid :type uuid: str :rtype: List[str] """ return 'do some magic!'
e3169e139b5992daf00411b694cf77436fb17fba
3,641,343
def get_external_repos(gh): """ Get all external repositories from the `repos.config` file """ external_repos = [] with open("repos.config") as f: content = f.readlines() content = [x.strip() for x in content] for entry in content: org_name, repo_name = entry.split('/') external_repos.append(gh.get_organization(org_name).get_repo(repo_name)) return external_repos
a83515acd77c7ef9e30bf05d8d4478fa833ab5bc
3,641,344
import json def load_fit_profile(): """ This methods return the FIT profile types based on the Profile.xslx that is included in the Garmin FIT SDK (https://developer.garmin.com/fit/download/). The returned profile can be used to translate e.g. Garmin product names to their corresponding integer product ids. """ fpath = _fit_profile_json_path() with fpath.open("r") as fit_profile_file: profile = json.load(fit_profile_file) return profile
13108546c2d88d77d090b222c1b3ff2b59208310
3,641,346
def mmethod(path, *args, **kwargs): """ Returns a mapper function that runs the path method for each instance of the iterable collection. >>> mmethod('start') is equivalent to >>> lambda thread: thread.start() >>> mmethod('book_set.filter', number_of_pages__gte=100) is equivalent to >>> lambda author: author.book_set.filter(number_of_pages__gte=100) """ return lambda x: mattr(path)(x)(*args, **kwargs)
6ded620d190d338d981c433514018a4182b7e207
3,641,347
def generate_test_demand_design_image() -> TestDataSet: """ Returns ------- test_data : TestDataSet 2800 points of test data, uniformly sampled from (price, time, emotion). Emotion is transformed into img. """ org_test: TestDataSet = generate_test_demand_design(False) treatment = org_test.treatment covariate = org_test.covariate target = org_test.structural emotion_arr = covariate[:, 1].astype(int) emotion_img = attach_image(emotion_arr, False, 42) covariate_img = np.concatenate([covariate[:, 0:1], emotion_img], axis=1) return TestDataSet(treatment=treatment, covariate=covariate_img, structural=target)
238cf11480e0d23f30b426ed19877126edc010fa
3,641,348
def value_iteration(game, depth_limit, threshold): """Solves for the optimal value function of a game. For small games only! Solves the game using value iteration, with the maximum error for the value function less than threshold. This algorithm works for sequential 1-player games or 2-player zero-sum games, with or without chance nodes. Arguments: game: The game to analyze, as returned by `load_game`. depth_limit: How deeply to analyze the game tree. Negative means no limit, 0 means root-only, etc. threshold: Maximum error for state values.. Returns: A `dict` with string keys and float values, mapping string encoding of states to the values of those states. """ if game.num_players() not in (1, 2): raise ValueError("Game must be a 1-player or 2-player game") if (game.num_players() == 2 and game.get_type().utility != pyspiel.GameType.Utility.ZERO_SUM): raise ValueError("2-player games must be zero sum games") # We expect Value Iteration to be used with perfect information games, in # which `str` is assumed to display the state of the game. states = get_all_states.get_all_states( game, depth_limit, True, False, to_string=str) values = {} transitions = {} _initialize_maps(states, values, transitions) error = threshold + 1 # A value larger than threshold min_utility = game.min_utility() while error > threshold: error = 0 for key, state in states.items(): if state.is_terminal(): continue player = state.current_player() value = min_utility if player == 0 else -min_utility for action in state.legal_actions(): next_states = transitions[(key, action)] q_value = sum(p * values[next_state] for next_state, p in next_states) if player == 0: value = max(value, q_value) else: value = min(value, q_value) error = max(abs(values[key] - value), error) values[key] = value return values
2a9ae3903666ee16e86fe30a0458707394fe4695
3,641,349
def _import_and_infer(save_dir, inputs): """Import a SavedModel into a TF 1.x-style graph and run `signature_key`.""" graph = ops.Graph() with graph.as_default(), session_lib.Session() as session: model = loader.load(session, [tag_constants.SERVING], save_dir) signature = model.signature_def[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] assert set(inputs.keys()) == set(signature.inputs.keys()) feed_dict = {} for arg_name in inputs.keys(): feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = ( inputs[arg_name]) output_dict = {} for output_name, output_tensor_info in signature.outputs.items(): output_dict[output_name] = graph.get_tensor_by_name( output_tensor_info.name) return session.run(output_dict, feed_dict=feed_dict)
1610c4d52fa8d18a770f1f347b9cd30b4652ab8b
3,641,351
def nth(seq, idx): """Return the nth item of a sequence. Constant time if list, tuple, or str; linear time if a generator""" return get(seq, idx)
cca44dca33d19a2e0db355be525009dce752445c
3,641,354
def _build_discretize_fn(value_type, stochastic, beta): """Builds a `tff.tf_computation` for discretization.""" @computations.tf_computation(value_type, tf.float32, tf.float32) def discretize_fn(value, scale_factor, prior_norm_bound): return _discretize_struct(value, scale_factor, stochastic, beta, prior_norm_bound) return discretize_fn
75f9f50ec376b1a10b5fcb629527a873b8768235
3,641,356
def expand_mapping_target(namespaces, val): """Expand a mapping target, expressed as a comma-separated list of CURIE-like strings potentially prefixed with ^ to express inverse properties, into a list of (uri, inverse) tuples, where uri is a URIRef and inverse is a boolean.""" vals = [v.strip() for v in val.split(',')] ret = [] for v in vals: inverse = False if v.startswith('^'): inverse = True v = v[1:] ret.append((expand_curielike(namespaces, v), inverse)) return ret
b4a4f08d39728c8f61b7b373a521890f88d6f912
3,641,357
def home(request): """Handle the default request, for when no endpoint is specified.""" return Response('This is Michael\'s REST API!')
a37a2eaa68366de4d8542357c043c4e29ac7a9f9
3,641,358
def create_message(sender, to, subject, message_text, is_html=False): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ if is_html: message = MIMEText(message_text, "html") else: message = MIMEText(message_text) message["to"] = to message["from"] = sender message["subject"] = subject encoded_message = urlsafe_b64encode(message.as_bytes()) return {"raw": encoded_message.decode()}
2b5dc225df5786df9f2650631d209c53e3e8145b
3,641,359
def get_agent(runmode, name): # noqa: E501 """get_agent # noqa: E501 :param runmode: :type runmode: str :param name: :type name: str :rtype: None """ return 'do some magic!'
065302bb7793eff12973208db5f35f3494a83930
3,641,360
def find_splits(array1: list, array2: list) -> list: """Find the split points of the given array of events""" keys = set() for event in array1: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) for event in array2: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) return list(sorted(keys))
c52f696caddf35fa050621e7668eec06686cee14
3,641,361
def to_subtask_dict(subtask): """ :rtype: ``dict`` """ result = { 'id': subtask.id, 'key': subtask.key, 'summary': subtask.fields.summary } return result
5171d055cc693b1aa00976c063188a907a7390dc
3,641,362
from typing import Tuple from typing import Optional def _partition_labeled_span( contents: Text, labeled_span: substitution.LabeledSpan ) -> Tuple[substitution.LabeledSpan, Optional[substitution.LabeledSpan], Optional[substitution.LabeledSpan]]: """Splits a labeled span into first line, intermediate, last line.""" start, end = labeled_span.span first_newline = contents.find('\n', start, end) if first_newline == -1: return (labeled_span, None, None) first, remainder = _split_labeled_span_after(labeled_span, first_newline) last_newline = contents.rfind('\n', *remainder.span) if last_newline == -1: return (first, None, remainder) between, last = _split_labeled_span_after(remainder, last_newline) return (first, between, last)
6f22341d32c03ba0057fbfd6f08c88ac8736220f
3,641,363
def is_active(relation_id: RelationID) -> bool: """Retrieve an activation record from a relation ID.""" # query to DB try: sups = db.session.query(RelationDB) \ .filter(RelationDB.supercedes_or_suppresses == int(relation_id)) \ .first() except Exception as e: raise DBLookUpError from e # return true if there is no superceder/suppressor return bool(sups is None)
352f44e2f025ac0918519d0fe8e513b3871be7b9
3,641,364
def vectorize_with_similarities(text, vocab_tokens, vocab_token_to_index, vocab_matrix): """ Generate a vector representation of a text string based on a word similarity matrix. The resulting vector has n positions, where n is the number of words or tokens in the full vocabulary. The value at each position indicates the maximum similarity between that corresponding word in the vocabulary and any of the words or tokens in the input text string, as given by the input similarity matrix. Therefore, this is similar to an n-grams approach but uses the similarity between non-identical words or tokens to make the vector semantically meaningful. Args: text (str): Any arbitrary text string. vocab_tokens (list of str): The words or tokens that make up the entire vocabulary. vocab_token_to_index (dict of str:int): Mapping between words in the vocabulary and an index in rows and columns of the matrix. vocab_matrix (numpy.array): A pairwise distance matrix holding the similarity values between all possible pairs of words in the vocabulary. Returns: numpy.Array: A numerical vector with length equal to the size of the vocabulary. """ doc_tokens = [token for token in text.split() if token in vocab_tokens] vector = [max([vocab_matrix[vocab_token_to_index[vocab_token]][vocab_token_to_index[doc_token]] for doc_token in doc_tokens]) for vocab_token in vocab_tokens] return(vector)
5b843ffbfdefbf691fb5766bbe6772459568cf78
3,641,365
def get_puppet_node_cert_from_server(node_name): """ Init environment to connect to Puppet Master and retrieve the certificate for that node in the server (if exists) :param node_name: Name of target node :return: Certificate for that node in Puppet Master or None if this information has not been found """ _init_puppet_master_connection() return _execute_command(COMMAND_PUPPET_GET_CERT.format(node_name))
7f7fa2164bf7f289ce9dbc1b35f2d8aea546bb60
3,641,366
from typing import Optional def get_notebook_workspace(account_name: Optional[str] = None, notebook_workspace_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotebookWorkspaceResult: """ A notebook workspace resource :param str account_name: Cosmos DB database account name. :param str notebook_workspace_name: The name of the notebook workspace resource. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['accountName'] = account_name __args__['notebookWorkspaceName'] = notebook_workspace_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20190801:getNotebookWorkspace', __args__, opts=opts, typ=GetNotebookWorkspaceResult).value return AwaitableGetNotebookWorkspaceResult( id=__ret__.id, name=__ret__.name, notebook_server_endpoint=__ret__.notebook_server_endpoint, status=__ret__.status, type=__ret__.type)
d9020323c0ea520951730a31b2f457ab80fcc931
3,641,367
def get_current_player(player_one_turn: bool) -> str: """Return 'player one' iff player_one_turn is True; otherwise, return 'player two'. >>> get_current_player(True) 'player one' >>> get_current_player(False) 'player two' """ if player_one_turn: return P1 else: return P2 # Complete this function.
6bade089054513943aef7656972cadd2d242807c
3,641,368
def is_word(s): """ String `s` counts as a word if it has at least one letter. """ for c in s: if c.isalpha(): return True return False
524ed5cc506769bd8634a46d346617344485e5f7
3,641,370
def index_all_messages(empty_index): """ Expected index of `initial_data` fixture when model.narrow = [] """ return dict(empty_index, **{'all_msg_ids': {537286, 537287, 537288}})
ea2c59a4de8e62d2293f87e26ead1b4c15f15a11
3,641,371
def compute_affine_matrix(in_shape, out_shape, crop=None, degrees=0.0, translate=(0.0, 0.0), flip_h=False, flip_v=False, resize=False, keep_ratio=False): """ Similarity warp transformation of the image keeping center invariant. Args: in_shape (Sequence): the shape of the input image out_shape (Sequence): the shape of the output image crop (Sequence, optional): crop center location, width and height. The center location is relative to the center of the image. If :attr:`resize` is not ``True``, crop is simply a translation in the :attr:`in_shape` space. degrees (float or int, optional): degrees to rotate the crop. (default: ``(0.0)``) translate (Sequence, optional): horizontal and vertical translations. (default: ``(0.0, 0.0)``) flip_h (bool, optional): flip the image horizontally. (default: ``False``) flip_v (bool, optional): flip the image vertically. (default: ``False``) resize (bool, optional): resize the cropped image to fit the output's size. (default: ``False``) keep_ratio (bool, optional): match the smaller edge to the corresponding output edge size, keeping the aspect ratio after resize. Has no effect if :attr:`resize` is ``False``. (default: ``False``) """ if crop is not None: T_crop_x, T_crop_y, crop_w, crop_h = crop else: T_crop_x, T_crop_y = 0, 0 crop_w, crop_h = in_shape r = np.deg2rad(degrees) tx, ty = translate fh = 1 - 2 * float(flip_h) fv = 1 - 2 * float(flip_v) # # H = T_inshape*T_crop*R*S_resize*T_outshapeT # T_i_x = (in_shape[0] - 1) / 2 T_i_y = (in_shape[1] - 1) / 2 T_inshape = np.asarray([[fh, 0, T_i_x], [0, fv, T_i_y], [0, 0, 1]]) T_crop = np.asarray([[1, 0, T_crop_x], [0, 1, T_crop_y], [0, 0, 1]]) R = np.asarray([[+np.cos(r), -np.sin(r), 0], [+np.sin(r), +np.cos(r), 0], [0, 0, 1]]) S_r_x = 1 S_r_y = 1 if resize: top_left, bot_right = R.dot([[-crop_w / 2, crop_w / 2], [-crop_h / 2, crop_h / 2], [1, 1]]).transpose()[:, 0:2] crop_w, crop_h = np.absolute(bot_right - top_left) S_r_x = crop_w / out_shape[0] S_r_y = crop_h / out_shape[1] if keep_ratio: scale_ratio = min(S_r_x, S_r_y) S_r_x = scale_ratio S_r_y = scale_ratio S_resize = np.asarray([[S_r_x, 0, 0], [0, S_r_y, 0], [0, 0, 1]]) T_o_x = tx - (out_shape[0] - 1) / 2 T_o_y = ty - (out_shape[1] - 1) / 2 T_outshapeT = np.asarray([[1, 0, T_o_x], [0, 1, T_o_y], [0, 0, 1]]) return T_inshape.dot(T_crop).dot(R).dot(S_resize).dot(T_outshapeT)
0c3786c44d35341e5e85d3756e50eb59dd473d64
3,641,372
def Bern_to_Fierz_nunu(C,ddll): """From semileptonic Bern basis to Fierz semileptonic basis for Class V. C should be the corresponding leptonic Fierz basis and `ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.""" ind = ddll.replace('l_','').replace('nu_','') return { 'F' + ind + 'nu': C['nu1' + ind], 'F' + ind + 'nup': C['nu1p' + ind], }
4f08f79d6614c8929c3f42096fac71b04bfe7b4b
3,641,373
def enforce_boot_from_volume(client): """Add boot from volume args in create server method call """ class ServerManagerBFV(servers.ServerManager): def __init__(self, client): super(ServerManagerBFV, self).__init__(client) self.bfv_image_client = images.ImageManager(client) def create(self, name, image, flavor, **kwargs): image_obj = self.bfv_image_client.get(image) if "block_device_mapping" not in image_obj.metadata.keys() and \ not "block_device_mapping_v2" in kwargs.keys() and \ not "block_device_mapping" in kwargs.keys(): if 'volume_size' in kwargs: vol_size = kwargs.pop('volume_size') else: vol_size = CONF.nova_server_volume_size bv_map = [{ "source_type": "image", "destination_type": "volume", "delete_on_termination": "1", "boot_index": 0, "uuid": image, "device_name": "vda", "volume_size": str(vol_size)}] bdm_args = { 'block_device_mapping_v2' : bv_map, } kwargs.update(bdm_args) image = '' return super(ServerManagerBFV, self).create(name, image, flavor, **kwargs) client.servers = ServerManagerBFV(client)
4ae4d2624f216c96722e811d9d44cb04caa46e1d
3,641,374
def img_to_yuv(frame, mode, grayscale=False): """Change color space of `frame` from any supported `mode` to YUV Args: frame: 3-D tensor in either [H, W, C] or [C, H, W] mode: A string, must be one of [YV12, YV21, NV12, NV21, RGB, BGR] grayscale: discard uv planes return: 3-D tensor of YUV in [H, W, C] """ _planar_mode = ('YV12', 'YV21', 'NV12', 'NV21') _packed_mode = ('RGB', 'BGR') _allowed_mode = (*_planar_mode, *_packed_mode) if not isinstance(frame, list): raise TypeError("frame must be a list of numpy array") if not mode in _allowed_mode: raise ValueError("invalid mode: " + mode) if mode in _planar_mode: if mode in ('YV12', 'YV21'): y, u, v = frame elif mode in ('NV12', 'NV21'): y, uv = frame u = uv.flatten()[0::2].reshape([1, uv.shape[1] // 2, uv.shape[2]]) v = uv.flatten()[1::2].reshape([1, uv.shape[1] // 2, uv.shape[2]]) else: y = u = v = None y = np.transpose(y) u = np.transpose(u) v = np.transpose(v) if '21' in mode: u, v = v, u if not grayscale: up_u = np.zeros(shape=[u.shape[0] * 2, u.shape[1] * 2, u.shape[2]]) up_v = np.zeros(shape=[v.shape[0] * 2, v.shape[1] * 2, v.shape[2]]) up_u[0::2, 0::2, :] = up_u[0::2, 1::2, :] = u up_u[1::2, ...] = up_u[0::2, ...] up_v[0::2, 0::2, :] = up_v[0::2, 1::2, :] = v up_v[1::2, ...] = up_v[0::2, ...] yuv = np.concatenate([y, up_u, up_v], axis=-1) yuv = np.transpose(yuv, [1, 0, 2]) # PIL needs [W, H, C] img = Image.fromarray(yuv.astype('uint8'), mode='YCbCr') else: y = np.squeeze(y) img = Image.fromarray(np.transpose(y).astype('uint8'), mode='L') elif mode in _packed_mode: assert len(frame) is 1 rgb = np.asarray(frame[0]) if mode == 'BGR': rgb = rgb[..., ::-1] rgb = np.transpose(rgb, [1, 0, 2]) if not grayscale: img = Image.fromarray(rgb, mode='RGB').convert('YCbCr') else: img = Image.fromarray(rgb, mode='RGB').convert('L') else: raise RuntimeError("unreachable!") # return img_to_array(image1) if turn_array else image1 return img
002506b3a46fa6b601f4ca65255c8f06b990992d
3,641,375
def assemblenet_kinetics600() -> cfg.ExperimentConfig: """Video classification on Videonet with assemblenet.""" exp = video_classification.video_classification_kinetics600() feature_shape = (32, 224, 224, 3) exp.task.train_data.global_batch_size = 1024 exp.task.validation_data.global_batch_size = 32 exp.task.train_data.feature_shape = feature_shape exp.task.validation_data.feature_shape = (120, 224, 224, 3) exp.task.train_data.dtype = 'bfloat16' exp.task.validation_data.dtype = 'bfloat16' model = AssembleNetModel() model.backbone.assemblenet.model_id = '50' model.backbone.assemblenet.blocks = flat_lists_to_blocks( asn50_structure, asn_structure_weights) model.backbone.assemblenet.num_frames = feature_shape[0] exp.task.model = model assert exp.task.model.backbone.assemblenet.num_frames > 0, ( f'backbone num_frames ' f'{exp.task.model.backbone.assemblenet}') return exp
3356b6ea758baf04cc98421d700f25e342884d5a
3,641,376
import math import torch def channel_selection(inputs, module, sparsity=0.5, method='greedy'): """ 현재 모듈의 입력 채널중, 중요도가 높은 채널을 선택합니다. 기존의 output을 가장 근접하게 만들어낼 수 있는 입력 채널을 찾아냅니댜. :param inputs: torch.Tensor, input features map :param module: torch.nn.module, layer :param sparsity: float, 0 ~ 1 how many prune channel of output of this layer :param method: str, how to select the channel :return: list of int, indices of channel to be selected and pruned """ num_channel = inputs.size(1) # 채널 수 num_pruned = int(math.ceil(num_channel * sparsity)) # 입력된 sparsity 에 맞춰 삭제되어야 하는 채널 수 num_stayed = num_channel - num_pruned print('num_pruned', num_pruned) if method == 'greedy': indices_pruned = [] while len(indices_pruned) < num_pruned: min_diff = 1e10 min_idx = 0 for idx in range(num_channel): if idx in indices_pruned: continue indices_try = indices_pruned + [idx] inputs_try = torch.zeros_like(inputs) inputs_try[:, indices_try, ...] = inputs[:, indices_try, ...] output_try = module(inputs_try) output_try_norm = output_try.norm(2) if output_try_norm < min_diff: min_diff = output_try_norm min_idx = idx indices_pruned.append(min_idx) print('indices_pruned !!! ', indices_pruned) indices_stayed = list(set([i for i in range(num_channel)]) - set(indices_pruned)) elif method == 'greedy_GM': indices_stayed = [] while len(indices_stayed) < num_stayed: max_farthest_channel_norm = 1e-10 farthest_channel_idx = 0 for idx in range(num_channel): if idx in indices_stayed: continue indices_try = indices_stayed + [idx] inputs_try = torch.zeros_like(inputs) inputs_try[:, indices_try, ...] = inputs[:, indices_try, ...] output_try = module(inputs_try).view(num_channel,-1).cpu().detach().numpy() similar_matrix = distance.cdist(output_try, output_try,'euclidean') similar_sum = np.sum(np.abs(similar_matrix), axis=0) similar_large_index = similar_sum.argsort()[-1] farthest_channel_norm= np.linalg.norm(similar_sum[similar_large_index]) if max_farthest_channel_norm < farthest_channel_norm : max_farthest_channel_norm = farthest_channel_norm farthest_channel_idx = idx print(farthest_channel_idx) indices_stayed.append(farthest_channel_idx) print('indices_stayed !!! ', indices_stayed) indices_pruned = list(set([i for i in range(num_channel)]) - set(indices_stayed)) elif method == 'lasso': y = module(inputs) if module.bias is not None: # bias.shape = [N] bias_size = [1] * y.dim() # bias_size: [1, 1, 1, 1] bias_size[1] = -1 # [1, -1, 1, 1] bias = module.bias.view(bias_size) # bias.view([1, -1, 1, 1] = [1, N, 1, 1]) y -= bias # output feature 에서 bias 만큼을 빼줌 (y - b) else: bias = 0. y = y.view(-1).data.cpu().numpy() # flatten all of outputs y_channel_spread = [] for i in range(num_channel): x_channel_i = torch.zeros_like(inputs) x_channel_i[:, i, ...] = inputs[:, i, ...] y_channel_i = module(x_channel_i) - bias y_channel_spread.append(y_channel_i.data.view(-1, 1)) y_channel_spread = torch.cat(y_channel_spread, dim=1).cpu() alpha = 1e-7 solver = Lasso(alpha=alpha, warm_start=True, selection='random', random_state=0) # choice_idx = np.random.choice(y_channel_spread.size()[0], 2000, replace=False) # selected_y_channel_spread = y_channel_spread[choice_idx, :] # new_output = y[choice_idx] # # del y_channel_spread, y # 원하는 수의 채널이 삭제될 때까지 alpha 값을 조금씩 늘려나감 alpha_l, alpha_r = 0, alpha num_pruned_try = 0 while num_pruned_try < num_pruned: alpha_r *= 2 solver.alpha = alpha_r # solver.fit(selected_y_channel_spread, new_output) solver.fit(y_channel_spread,y) num_pruned_try = sum(solver.coef_ == 0) # 충분하게 pruning 되는 alpha 를 찾으면, 이후 alpha 값의 좌우를 좁혀 나가면서 좀 더 정확한 alpha 값을 찾음 num_pruned_max = int(num_pruned) while True: alpha = (alpha_l + alpha_r) / 2 solver.alpha = alpha # solver.fit(selected_y_channel_spread, new_output) solver.fit(y_channel_spread,y) num_pruned_try = sum(solver.coef_ == 0) if num_pruned_try > num_pruned_max: alpha_r = alpha elif num_pruned_try < num_pruned: alpha_l = alpha else: break # 마지막으로, lasso coeff를 index로 변환 indices_stayed = np.where(solver.coef_ != 0)[0].tolist() indices_pruned = np.where(solver.coef_ == 0)[0].tolist() else: raise NotImplementedError inputs = inputs.cuda() module = module.cuda() return indices_stayed, indices_pruned
957cbcc799185fd6c2547662bfe79205389d44da
3,641,377
import six def format_host(host_tuple): """ Format a host tuple to a string """ if isinstance(host_tuple, (list, tuple)): if len(host_tuple) != 2: raise ValueError('host_tuple has unexpeted length: %s' % host_tuple) return ':'.join([six.text_type(s) for s in host_tuple]) elif isinstance(host_tuple, six.string_types): return host_tuple else: raise ValueError('host_tuple unexpected type: (%s) %s' % (type(host_tuple), host_tuple))
f4822aec5143a99ccc52bb2657e1f42477c65400
3,641,378
import psutil def get_cpu_stats(): """ Obtains the system's CPU status. :returns: System CPU static. """ return psutil.cpu_stats()
f538977db72083f42c710faa987a97511959c973
3,641,379
def get_minmax_array(X): """Utility method that returns the boundaries for each feature of the input array. Args: X (np.float array of shape (num_instances, num_features)): The input array. Returns: min (np.float array of shape (num_features,)): Minimum values for each feature in array. max (np.float array of shape (num_features,)): Maximum values for each feature in array. """ min = np.min(X, axis=0) max = np.max(X, axis=0) return min, max
5453371759af5bf6d876aa8fe5d2caf88ee6eb08
3,641,383
def getAllHeaders(includeText=False): """ Get a dictionary of dream numbers and headers. If includeText=true, also add the text of the dream to the dictionary as 'text' (note that this key is all lowercase so it will not conflict with the usual convention for header names, even if "Text" would be an odd header name). """ dreams = {} for f in allDreamfiles(): dream = {} textLines = [] inHeaders = True for line in f: if not line.strip(): # end of headers if includeText: inHeaders = False else: break if inHeaders: header, value = (i.strip() for i in line.split(':\t')) dream[header] = value else: textLines.append(line) if includeText: # omit the first blank separator line dream['text'] = '\n'.join(i for i in textLines[1:]) dreams[dream['Id']] = dream return dreams
2bbd78d9c9cbfaa50a62e99c25148844d7c5e330
3,641,384
def zscore(arr, period): """ ZScore transformation of `arr` for rolling `period.` ZScore = (X - MEAN(X)) / STDEV(X) :param arr: :param period: :return: """ if period <= 0: raise YaUberAlgoArgumentError("'{}' must be positive number".format(period)) # Do quick sanity checks of arguments _check_series_args(arr=arr) try: if isinstance(arr, pd.Series): return pd.Series(_zscore(arr.values, period), index=arr.index) elif isinstance(arr, np.ndarray): return _zscore(arr, period) except ValueError as exc: raise YaUberAlgoInternalError(str(exc))
8a49afe3ecefc326b3bd889279085cccd1d19a61
3,641,385
import glob import pandas def _load_event_data(prefix, name): """Load per-event data for one single type, e.g. hits, or particles. """ expr = '{!s}-{}.csv*'.format(prefix, name) files = glob.glob(expr) dtype = DTYPES[name] if len(files) == 1: return pandas.read_csv(files[0], header=0, index_col=False, dtype=dtype) elif len(files) == 0: raise Exception('No file matches \'{}\''.format(expr)) else: raise Exception('More than one file matches \'{}\''.format(expr))
04b2e4a7483ba56fdd282dc6355e9acb2d6da7b1
3,641,386
from datetime import datetime def check_file(file_id: str, upsert: bool = False) -> File: """Checks that the file with file_id exists in the DB Args: file_id: The id for the requested file. upsert: If the file doesn't exist create a placeholder file Returns: The file object Raises: NotFoundError: File with the requested ID doesn't exist and is expected to ModelValidationError: Incorrectly formatted ID is given """ try: ObjectId(file_id) except (InvalidId, TypeError): raise ModelValidationError( f"Cannot create a file id with the string {file_id}. " "Requires 24-character hex string." ) res = db.query_unique(File, id=file_id) if res is None: if upsert: create_file("BG_placeholder", 0, 0, file_id) res = db.query_unique(File, id=file_id) else: raise NotFoundError(f"Tried to fetch an unsaved file {file_id}") db.modify(res, updated_at=datetime.utcnow()) return res
2f4e94a064d0bdfea8f001855eb39675f78ab6e5
3,641,387
def parse(volume_str): """Parse combined k8s volume string into a dict. Args: volume_str: The string representation for k8s volume, e.g. "claim_name=c1,mount_path=/path1". Return: A Python dictionary parsed from the given volume string. """ kvs = volume_str.split(",") volume_keys = [] parsed_volume_dict = {} for kv in kvs: k, v = kv.split("=") if k not in volume_keys: volume_keys.append(k) else: raise ValueError( "The volume string contains duplicate volume key: %s" % k ) if k not in _ALLOWED_VOLUME_KEYS: raise ValueError( "%s is not in the allowed list of volume keys: %s" % (k, _ALLOWED_VOLUME_KEYS) ) parsed_volume_dict[k] = v return parsed_volume_dict
f6984faf90081eb8ca3fbbb8ffaf636b040c7ffc
3,641,388
def longest_common_substring(text1, text2): """最长公共子字符串,区分大小写""" n = len(text1) m = len(text2) maxlen = 0 span1 = (0, 0) span2 = (0, 0) if n * m == 0: return span1, span2, maxlen dp = np.zeros((n+1, m+1), dtype=np.int32) for i in range(1, n+1): for j in range(1, m+1): if text1[i-1] == text2[j-1]: dp[i][j] = dp[i-1][j-1] + 1 if dp[i][j] > maxlen: maxlen = dp[i][j] span1 = (i - maxlen, i) span2 = (j - maxlen, j) return span1, span2, maxlen
ed892739d22ee0763a2fe5dd44b48b8d1902605e
3,641,389
def make_subclasses_dict(cls): """ Return a dictionary of the subclasses inheriting from the argument class. Keys are String names of the classes, values the actual classes. :param cls: :return: """ the_dict = {x.__name__:x for x in get_all_subclasses(cls)} the_dict[cls.__name__] = cls return the_dict
36eb7c9242b83a84fcd6ee18b4ca9297038f9ee6
3,641,390
import time def _parse_realtime_data(xmlstr): """ Takes xml a string and returns a list of dicts containing realtime data. """ doc = minidom.parseString(xmlstr) ret = [] elem_map = {"LineID": "id", "DirectionID": "direction", "DestinationStop": "destination" } ack = _single_element(doc, "Acknowledge") if ack == None or ack.attributes["Result"].nodeValue != "ok": return None curtime = time.mktime(time.strptime( ack.attributes["TimeStamp"].nodeValue[:-10], "%Y-%m-%dT%H:%M:%S")) for elem in doc.getElementsByTagName("DISDeviation"): entry = {"is_realtime": False} for name, value in [ (e.nodeName, _get_text(e.childNodes)) \ for e in elem.childNodes \ if e.nodeType == e.ELEMENT_NODE ]: if name in elem_map: entry[elem_map[name]] = unicode(value) elif name == "TripStatus": entry["is_realtime"] = value == "Real" if entry["is_realtime"]: timeele = _single_element(elem, "ExpectedDISDepartureTime") else: timeele = _single_element(elem, "ScheduledDISDepartureTime") parsed_time = time.strptime( _get_text(timeele.childNodes)[:-10], "%Y-%m-%dT%H:%M:%S") entry["time"] = parsed_time entry["wait_time"] = int(time.mktime(parsed_time) - curtime) ret.append(entry) return ret
90958c7f66072ecfd6c57b0da95293e35196354c
3,641,391
def tocopo_accuracy_fn(tocopo_logits: dt.BatchedTocopoLogits, target_data: dt.BatchedTrainTocopoTargetData, oov_token_id: int, pad_token_id: int, is_distributed: bool = True) -> AccuracyMetrics: """Computes accuracy metrics. Args: tocopo_logits: Predictions from model (unnormalized log scores). target_data: target data to compare prediction against. oov_token_id: Id of out of vocabulary token. pad_token_id: Id of pad token. is_distributed: Whether to perform cross-device aggregation. Returns: A `AccuracyMetrics` instance. """ vocab_size = tocopo_logits.token_logits.shape[2] one_hot_target_tokens = jax.nn.one_hot(target_data.token_ids, vocab_size) # (B, O, U) # Don't give credit for OOV tokens. one_hot_target_tokens = one_hot_target_tokens.at[:, :, oov_token_id].set( jnp.zeros_like(target_data.token_ids)) # Disable predictions for all tokens when there is a pointer. # Mask indicating absence of a pointer at target. not_pointer_mask = target_data.is_target_pointer.sum(axis=2) == 0 # (B, O) one_hot_target_tokens = one_hot_target_tokens * jnp.expand_dims( not_pointer_mask, axis=2) few_hot_targets = jnp.concatenate([ one_hot_target_tokens, target_data.is_target_copy, target_data.is_target_pointer ], axis=2) # (B, O, U+2V) # Get the one hot predictions. tocopo_logits_stacked = jnp.concatenate([ tocopo_logits.token_logits, tocopo_logits.copy_logits, tocopo_logits.pointer_logits ], axis=2) # (B, O, U+2V) prediction_indices = jnp.argmax(tocopo_logits_stacked, axis=2) # (B, O) one_hot_predictions = jax.nn.one_hot( prediction_indices, tocopo_logits_stacked.shape[2]) # (B, O, U+2V) # (B, O) is_pad = (target_data.token_ids == pad_token_id) # (B, O, U+2V) -> (B, O) # If the target is a pad token, then we remove it from consideration when # calculating accuracies. `element_correct_or_pad` array always assign a 1 to # padded prediction (this property is used in the sequence accuracy # computation). element_correct = jnp.sum(one_hot_predictions * few_hot_targets, axis=-1) element_correct_or_pad = jnp.where(is_pad, 1, element_correct) per_element_correct = jnp.sum(element_correct_or_pad * (1 - is_pad)) per_element_attempts = jnp.sum(1 - is_pad) per_sequence_correct = jnp.sum(jnp.prod(element_correct_or_pad, axis=-1)) per_sequence_attempts = element_correct_or_pad.shape[0] pointer_mask = jnp.logical_and( jnp.logical_not(not_pointer_mask), jnp.logical_not(is_pad)) pointer_correct = jnp.sum(element_correct * pointer_mask) pointer_attempts = jnp.sum(pointer_mask) # Pointer sequence accuracy: construct an array of 1s everywhere except where # a pointer is incorrectly predicted. Note: this counts a sequence without # pointers as accurately predicted. pointer_correct_or_toco_or_pad = jnp.where(not_pointer_mask, 1, element_correct_or_pad) per_sequence_po_correct = jnp.sum( jnp.prod(pointer_correct_or_toco_or_pad, axis=-1)) toco_mask = jnp.logical_and(not_pointer_mask, jnp.logical_not(is_pad)) toco_correct = jnp.sum(element_correct * toco_mask) toco_attempts = jnp.sum(toco_mask) # ToCo sequence accuracy: construct an array of 1s everywhere except where # a To/Co is incorrectly predicted. Note: this counts a sequence without # ToCo as accurately predicted. toco_correct_or_po_or_pad = jnp.where(pointer_mask, 1, element_correct_or_pad) per_sequence_toco_correct = jnp.sum( jnp.prod(toco_correct_or_po_or_pad, axis=-1)) # Correct predictions using the To head. is_prediction_token_mask = prediction_indices < vocab_size token_correct = jnp.sum( element_correct * jnp.logical_and(is_prediction_token_mask, jnp.logical_not(is_pad))) # Aggregate across devices. if is_distributed: per_element_correct = jax.lax.psum(per_element_correct, axis_name='i') per_element_attempts = jax.lax.psum(per_element_attempts, axis_name='i') per_sequence_correct = jax.lax.psum(per_sequence_correct, axis_name='i') per_sequence_attempts = jax.lax.psum(per_sequence_attempts, axis_name='i') pointer_correct = jax.lax.psum(pointer_correct, axis_name='i') pointer_attempts = jax.lax.psum(pointer_attempts, axis_name='i') toco_correct = jax.lax.psum(toco_correct, axis_name='i') token_correct = jax.lax.psum(token_correct, axis_name='i') toco_attempts = jax.lax.psum(toco_attempts, axis_name='i') per_sequence_po_correct = jax.lax.psum( per_sequence_po_correct, axis_name='i') per_sequence_toco_correct = jax.lax.psum( per_sequence_toco_correct, axis_name='i') return AccuracyMetrics( num_element_correct=per_element_correct, num_element_attempts=per_element_attempts, num_seq_correct=per_sequence_correct, num_seq_attempts=per_sequence_attempts, num_pointer_correct=pointer_correct, num_pointer_attempts=pointer_attempts, num_pointer_seq_correct=per_sequence_po_correct, num_toco_correct=toco_correct, num_token_correct=token_correct, num_toco_attempts=toco_attempts, num_toco_seq_correct=per_sequence_toco_correct)
828b7d3db40d488a7e05bbfe1f3d2d94f58d8efa
3,641,392
def cols_from_html_tbl(tbl): """ Extracts columns from html-table tbl and puts columns in a list. tbl must be a results-object from BeautifulSoup)""" rows = tbl.tbody.find_all('tr') if rows: for row in rows: cols = row.find_all('td') for i,cell in enumerate(cols): if not'col_list' in locals(): col_list=[[] for x in range(len(cols))] col_list[i].append(cell.text) else: col_list=[] return col_list
94bef05b782073955738cf7b774af34d64520499
3,641,393
from typing import List from typing import Tuple def get_score_park(board: List[List[str]]) -> Tuple[int]: """ Calculate the score for the building - park (PRK). Score 1: If ONLY 1 park. Score 3: If the park size is 2. Score 8: If the park size is 3. Score 16: If the park size is 4. Score 22: If the park size is 5. Score 23: If the park size is 6. Score 24: If the park size is 7. Score 25: If the park size is 8. Score 17 + x: For all park size > 8, where x = size of park Parameters ---------- board: List[List[str]] 2D array containing all the game detail, including column header, row header and placed buildings. Returns ------- score: Tuple[int] A list containing all the score for the specific building - park (PRK). """ type = 'PRK' # @ Convert board into logical matrix, where 1 represent park and other type of building are represent by 0. grid = [[1 if type == col else 0 for col in row] for row in board] visited_location_set = set() score_list = [] table = [ [1, 2, 3, 4, 5, 6, 7, 8], [1, 3, 8, 16, 22, 23, 24, 25] ] for idx_row in range(len(grid)): for idx_col in range(len(grid[0])): score = 0 size = get_island_size(idx_row, idx_col, grid, visited_location_set, direction=('up', 'down', 'left', 'right')) if 0 == size: continue if 8 > size: score_idx = table[0].index(size) score = table[1][score_idx] else: score = 17 + size score_list.append(score) return *score_list,
2bf1629aeb9937dfd871aa118e675cd9358b65ef
3,641,394
def kernel_epanechnikov(inst: np.ndarray) -> np.ndarray: """Epanechnikov kernel.""" if inst.ndim != 1: raise ValueError("'inst' vector must be one-dimensional!") return 0.75 * (1.0 - np.square(inst)) * (np.abs(inst) < 1.0)
7426e068c3a939595b77c129af4f8d30bbfc89fb
3,641,395
def submission_parser(reddit_submission_object): """Parses a submission and returns selected parameters""" post_timestamp = reddit_submission_object.created_utc post_id = reddit_submission_object.id score = reddit_submission_object.score ups = reddit_submission_object.ups downs = reddit_submission_object.downs # post_body = np.nan thread_title = reddit_submission_object.title thread_url = reddit_submission_object.url subreddit = reddit_submission_object.subreddit.display_name return post_timestamp, post_id, score, ups, downs, thread_title, thread_url, subreddit
d2b406f38e799230474e918df91d55e48d27f385
3,641,396
def dashboard(): """Displays dashboard to logged in user""" user_type = session.get('user_type') user_id = session.get('user_id') if user_type == None: return redirect ('/login') if user_type == 'band': band = crud.get_band_by_id(user_id) display_name = band.display_name age = band.age gender = band.gender influences = band.influences location = band.location description = band.description seeking = band.skills genres = band.genres return render_template('dashboard.html', user_type=user_type, display_name=display_name, age=age, gender=gender, influences=influences, location=location, description=description, seeking=seeking, genres=genres) if user_type == 'musician': musician = crud.get_musician_by_id(user_id) display_name = musician.display_name age = musician.age gender = musician.gender influences = musician.influences location = musician.location description = musician.description skills = musician.skills genres = musician.genres return render_template('dashboard.html', user_type=user_type, display_name=display_name, age=age, gender=gender, influences=influences, location=location, description=description, skills=skills, genres=genres)
1cec9fcd17a963921f23f03478a8c3195db9a18e
3,641,397
from bs4 import BeautifulSoup def parse_site(site_content, gesture_id): """ Parses the following attributes: title, image, verbs and other_gesture_ids :param site_content: a html string :param gesture_id: the current id :return: { title: str, img: str, id: number, compares: [ { verb: [str], other_gesture_id: number } ] } """ soup = BeautifulSoup(site_content, 'html.parser') img = soup.body.img img = img['src'] if img else False title = soup.body.font.b.contents[0].lower().strip() table = soup.body.table.tr rows = table.find_all('td') compares = [] for td in rows: content = td.font.contents current_verb = [] current_other = '' for line in content: if str(line) == '<br/>': compares.append({ 'verb': current_verb, 'other_gesture_id': current_other, }) current_verb = [] current_other = '' elif hasattr(line, 'name') and line.name == 'a': current_other = line['href'].replace('.htm', '') else: current_verb.append(str(line).strip().replace('\\n', '').lower()) return { 'id': gesture_id, 'title': title, 'img': img, 'compares': compares, }
b9719dbbd2ca7883257c53410423de5e3df3fe93
3,641,398
from multiprocessing import Pool import multiprocessing def test_multiprocessing_function () : """Test parallel processnig with multiprocessing """ logger = getLogger ("ostap.test_multiprocessing_function") logger.info ('Test job submission with module %s' % multiprocessing ) ncpus = multiprocessing.cpu_count() pool = Pool ( ncpus ) jobs = pool.imap_unordered ( make_histos , zip ( count () , inputs ) ) result = None for h in progress_bar ( jobs , max_value = len ( inputs ) ) : if not result : result = h else : result.Add ( h ) pool.close () pool.join () logger.info ( "Histogram is %s" % result.dump ( 80 , 20 ) ) logger.info ( "Entries %s/%s" % ( result.GetEntries() , sum ( inputs ) ) ) with wait ( 5 ) , use_canvas ( 'test_multiprocessing_function' ) : result.draw ( ) return result
a59635b844b4ff80a090a1ec8e3661e340903269
3,641,399
import math def fnCalculate_Bistatic_Coordinates(a,B): """ Calculate the coordinates of the target in the bistatic plane A,B,C = angles in the triangle a,b,c = length of the side opposite the angle Created: 22 April 2017 """ u = a*math.cos(B); v = a*math.sin(B); return u,v
cc1dce6ef0506b987e42e3967cf36ea7b46a30d7
3,641,400
def _fn_lgamma_ ( self , b = 1 ) : """ Gamma function: f = log(Gamma(ab)) >>> f = >>> a = f.lgamma ( ) >>> a = f.lgamma ( b ) >>> a = lgamma ( f ) """ return _fn_make_fun_ ( self , b , Ostap.MoreRooFit.LGamma , 'lgamma_%s_%s' )
62183327967840e26dfc009c2357de2c31171082
3,641,401
def convolve_smooth(x, win=10, mode="same"): """Smooth data using a given window size, in units of array elements, using the numpy.convolve function.""" return np.convolve(x, np.ones((win,)), mode=mode) / win
b41edf8c0d58355e28b507a96b129c4720412a81
3,641,402
import array def descent(x0, fn, iterations=1000, gtol=10**(-6), bounds=None, limit=0, args=()): """A gradient descent optimisation solver. Parameters ---------- x0 : array-like n x 1 starting guess of x. fn : obj The objective function to minimise. iterations : int Maximum number of iterations. gtol : float Mean residual of the gradient for convergence. bounds : list List of lower and upper bound pairs [lb, ub], None=unconstrained. limit : float Value of the objective function for which to terminate optimisation. args : tuple Additional parameters needed for fn. Returns ------- float Final value of the objective function. array Values of x at the found local minimum. """ r = 0.5 c = 0.0001 n = len(x0) x0 = reshape(array(x0), (n, 1)) if bounds: bounds = array(bounds) lb = bounds[:, 0][:, newaxis] ub = bounds[:, 1][:, newaxis] else: lb = ones((n, 1)) * -10**20 ub = ones((n, 1)) * +10**20 zn = zeros((n, 1)) g = zeros((n, 1)) v = eye(n) * e def phi(x, mu, *args): p = mu * (sum(maximum(lb - x, zn)) + sum(maximum(x - ub, zn)))**2 return fn(x, *args) + p i = 0 mu = 1 while i < iterations: p0 = phi(x0, mu, *args) for j in range(n): vj = v[:, j][:, newaxis] g[j, 0] = (phi(x0 + vj, mu, *args) - p0) / e D = sum(-g * g) a = 1 x1 = x0 - a * g while phi(x1, mu, *args) > p0 + c * a * D: a *= r x1 = x0 - a * g x0 -= a * g mu *= 10 res = mean(abs(g)) i += 1 f1 = phi(x0, mu, *args) if f1 < limit: break if res < gtol: break print('Iteration: {0} fopt: {1:.3g} gres: {2:.3g} step: {3}'.format(i, f1, res, a)) return f1, x0
ec132e7857cf4a941c54fc5db9085bdf013fb7a2
3,641,404
def count_teams_for_party(party_id: PartyID) -> int: """Return the number of orga teams for that party.""" return db.session \ .query(DbOrgaTeam) \ .filter_by(party_id=party_id) \ .count()
07373325dd7d7ab21ef0cb1145d37b2d85292358
3,641,405
def num_series(datetime_series) -> pd.Series: """Return a datetime series with numeric values.""" return datetime_series(LENGTH)
4d208bfbae5f3e7263663d06102aa0b290f4fd4e
3,641,406
import re def obtain_ranks(outputs, targets, mode=0): """ outputs : tensor of size (batch_size, 1), required_grad = False, model predictions targets : tensor of size (batch_size, ), required_grad = False, labels Assume to be of format [1, 0, ..., 0, 1, 0, ..., 0, ..., 0] mode == 0: rank from distance (smaller is preferred) mode == 1: rank from similarity (larger is preferred) """ if mode == 0: calculate_ranks = calculate_ranks_from_distance else: calculate_ranks = calculate_ranks_from_similarities all_ranks = [] prediction = outputs.cpu().numpy().squeeze() label = targets.cpu().numpy() sep = np.array([0, 1], dtype=label.dtype) # fast way to find subarray indices in a large array, c.f. https://stackoverflow.com/questions/14890216/return-the-indexes-of-a-sub-array-in-an-array end_indices = [(m.start() // label.itemsize)+1 for m in re.finditer(sep.tostring(), label.tostring())] end_indices.append(len(label)+1) start_indices = [0] + end_indices[:-1] for start_idx, end_idx in zip(start_indices, end_indices): distances = prediction[start_idx: end_idx] labels = label[start_idx:end_idx] positive_relations = list(np.where(labels == 1)[0]) ranks = calculate_ranks(distances, positive_relations) all_ranks.append(ranks) return all_ranks
72fc737d72fe0d6d3ff4e08a5a16acf05e0e88cb
3,641,407
from typing import Dict from typing import Any def sample_a2c_params(trial: optuna.Trial) -> Dict[str, Any]: """ Sampler for A2C hyperparams. """ lr_schedule = trial.suggest_categorical("lr_schedule", ["linear", "constant"]) learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1) n_steps = trial.suggest_categorical("n_steps", [4, 8, 16, 32, 64, 128]) gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0]) ent_coef = trial.suggest_loguniform("ent_coef", 0.0000001, 0.1) vf_coef = trial.suggest_uniform("vf_coef", 0, 1) normalize_advantage = trial.suggest_categorical("normalize_advantage", [False, True]) max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5]) # Toggle PyTorch RMS Prop (different from TF one, cf doc) use_rms_prop = trial.suggest_categorical("use_rms_prop", [False, True]) # Uncomment for gSDE (continuous actions) #log_std_init = trial.suggest_uniform("log_std_init", -4, 1) #ortho_init = trial.suggest_categorical("ortho_init", [False, True]) # Uncomment for network architecture setting #net_arch = trial.suggest_categorical("net_arch", ["small", "medium"]) # sde_net_arch = trial.suggest_categorical("sde_net_arch", [None, "tiny", "small"]) # full_std = trial.suggest_categorical("full_std", [False, True]) # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu']) activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"]) if lr_schedule == "linear": learning_rate = linear_schedule(learning_rate) # net_arch = { # "small": [dict(pi=[64, 64], vf=[64, 64])], # "medium": [dict(pi=[256, 256], vf=[256, 256])], # }[net_arch] activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn] return { "learning_rate": learning_rate, "n_steps": n_steps, "gae_lambda": gae_lambda, "ent_coef": ent_coef, "vf_coef": vf_coef, "max_grad_norm": max_grad_norm, "use_rms_prop": use_rms_prop, "normalize_advantage": normalize_advantage, "policy_kwargs": dict( #log_std_init=log_std_init, #net_arch=net_arch, activation_fn=activation_fn #ortho_init=ortho_init, ), }
f9f966f3c41a32a15253ba612d94e1254a586e86
3,641,408
def location_parser(selected_variables, column): """ Parse the location variable by creating a list of tuples. Remove the hyphen between the start/stop positions. Convert all elements to integers and create a list of tuples. Parameters: selected_variables (dataframe): The dataframe containing the location of the variables contained in the cps_selected_variables file column (character): The name of the column containing the start/stop positions Returns: selected_fields: A list of tuples containing the start/stop positions """ fields = [] for field in selected_variables[column]: field = field.split('-') field = [int(i) for i in field] fields.append(field) return fields
106f669269276c37652e92e62eb8c2c52dfe7637
3,641,409
import torch import math def get_qmf_bank(h, n_band): """ Modulates an input protoype filter into a bank of cosine modulated filters Parameters ---------- h: torch.Tensor prototype filter n_band: int number of sub-bands """ k = torch.arange(n_band).reshape(-1, 1) N = h.shape[-1] t = torch.arange(-(N // 2), N // 2 + 1) p = (-1)**k * math.pi / 4 mod = torch.cos((2 * k + 1) * math.pi / (2 * n_band) * t + p) hk = 2 * h * mod return hk
87e8cf3b0d85a6717cce9dc09f7a0a3e3581e498
3,641,410
import math def compare_one(col, cons_aa, aln_size, weights, aa_freqs, pseudo_size): """Compare column amino acid frequencies to overall via G-test.""" observed = count_col(col, weights, aa_freqs, pseudo_size) G = 2 * sum(obsv * math.log(obsv / aa_freqs.get(aa, 0.0)) for aa, obsv in observed.items()) pvalue = chisqprob(G, 19) return pvalue
910431062ac9ddef467d4818d3960385a2d4392b
3,641,411
def open(uri, mode='a', eclass=_eclass.manifest): """Open a Blaze object via an `uri` (Uniform Resource Identifier). Parameters ---------- uri : str Specifies the URI for the Blaze object. It can be a regular file too. The URL scheme indicates the storage type: * carray: Chunked array * ctable: Chunked table * sqlite: SQLite table (the URI 'sqlite://' creates in-memory table) If no URI scheme is given, carray is assumed. mode : the open mode (string) Specifies the mode in which the object is opened. The supported values are: * 'r' for read-only * 'w' for emptying the previous underlying data * 'a' for allowing read/write on top of existing data Returns ------- out : an Array or Table object. """ ARRAY = 1 TABLE = 2 uri = urlparse(uri) path = uri.netloc + uri.path parms = params(storage=path) if uri.scheme == 'carray': source = CArraySource(params=parms) structure = ARRAY elif uri.scheme == 'ctable': source = CTableSource(params=parms) structure = TABLE elif uri.scheme == 'sqlite': # Empty path means memory storage parms = params(storage=path or None) source = SqliteSource(params=parms) structure = TABLE else: # Default is to treat the URI as a regular path parms = params(storage=path) source = CArraySource(params=parms) structure = ARRAY # Don't want a deferred array (yet) # return NDArray(source) if structure == ARRAY: if eclass is _eclass.manifest: return Array(source) elif eclass is _eclass.delayed: return NDArray(source) elif structure == TABLE: if eclass is _eclass.manifest: return Table(source) elif eclass is _eclass.delayed: return NDTable(source)
c0a5069f5d7f39c87aae5af361df86b6f4fc4189
3,641,412
def create_df(dic_in, cols, input_type): """ Convert JSON output from OpenSea API to pandas DataFrame :param dic_in: JSON output from OpenSea API :param cols: Keys in JSON output from OpenSea API :param input_type: <TBD> save the columns with dictionaries as entries seperately :return: Cleaned DataFrame """ # First pass create dataframe where some of the values are a dictionary with multiple values df = pd.DataFrame(columns=cols) for col in cols: data = [] for row in dic_in: data.append(row.get(col)) df[col] = data # Second Pass get rid of columns with dictionaries, for now just forgetting about dictionary df_2 = df.copy() for col in df_2.columns: if col in map_dic: for df_index, df_row in df.iterrows(): embed_dic_type = map_dic[col] df_2.at[df_index, col] = map_replace_dic[embed_dic_type] return df_2
7b6a9445c956cc5d2850516d4c7dc2208b7391f7
3,641,413
def file_updated_at(file_id, db_cursor): """ Update the last time the file was checked """ db_cursor.execute(queries.file_updated_at, {'file_id': file_id}) db_cursor.execute(queries.insert_log, {'project_id': settings.project_id, 'file_id': file_id, 'log_area': 'file_updated_at', 'log_text': db_cursor.query.decode("utf-8")}) return True
bb0ec859c249b96e3ed066c3664e792100f5f23c
3,641,414
def action_to_upper(action): """ action to upper receives an action in pddl_action_representation, and returns it in upper case. :param action: A action in PddlActionRepresentation :return: PddlActionRepresentation: The action in upper case """ if action: action.name = action.name.upper() action.types = [type.upper() for type in action.types] action.predicates = [pred.upper() for pred in action.predicates] action.requirements = [req.upper() for req in action.requirements] action.action = action.action.upper() return action
e9266ad79d60a58bf61d6ce81284fa2accbb0b8d
3,641,415
from typing import Type from typing import Dict from typing import Any def generate_model_example(model: Type["Model"], relation_map: Dict = None) -> Dict: """ Generates example to be included in schema in fastapi. :param model: ormar.Model :type model: Type["Model"] :param relation_map: dict with relations to follow :type relation_map: Optional[Dict] :return: dict with example values :rtype: Dict[str, int] """ example: Dict[str, Any] = dict() relation_map = ( relation_map if relation_map is not None else translate_list_to_dict(model._iterate_related_models()) ) for name, field in model.Meta.model_fields.items(): populates_sample_fields_values( example=example, name=name, field=field, relation_map=relation_map ) to_exclude = {name for name in model.Meta.model_fields} pydantic_repr = generate_pydantic_example(pydantic_model=model, exclude=to_exclude) example.update(pydantic_repr) return example
1aafb069ff129453f9012de79d09c326224ceb5b
3,641,417
def compare_folder(request): """ Creates the compare folder path `dione-sr/tests/data/test_name/compare`. """ return get_test_path('compare', request)
b78bc261373d47bd3444c24c54c57a600a3855ad
3,641,418
def _get_param_combinations(lists): """Recursive function which generates a list of all possible parameter values""" if len(lists) == 1: list_p_1 = [[e] for e in lists[0]] return list_p_1 list_p_n_minus_1 = _get_param_combinations(lists[1:]) list_p_1 = [[e] for e in lists[0]] list_p_n = [p_1 + p_n_minus_1 for p_1 in list_p_1 for p_n_minus_1 in list_p_n_minus_1] return list_p_n
b4903bea79aebeabf3123f03de986058a06a21f4
3,641,419
def system_mass_spring_dumper(): """マスバネダンパ系の設計例""" # define the system m = 1.0 k = 1.0 c = 1.0 A = np.array([ [0.0, 1.0], [-k/m, -c/m] ]) B = np.array([ [0], [1/m] ]) C = np.eye(2) D = np.zeros((2,1),dtype=float) W = np.diag([1.0, 1.0]) S1, S2, A_, B_, T = optimal_hyperplane_vector(A, B, W) S = np.hstack((S1, S2)) x, u = initialize_system(A, B) x[0] = 0.0 x[1] = 10.0 # define the gain of k = 10 return C, D, S, k, x, u, A_, B_, T
8a054753d7bbaa06b7217ce98d38074122d41f32
3,641,420
import requests def get_green_button_xml( session: requests.Session, start_date: date, end_date: date ) -> str: """Download Green Button XML.""" response = session.get( f'https://myusage.torontohydro.com/cassandra/getfile/period/custom/start_date/{start_date:%m-%d-%Y}/to_date/{end_date:%m-%d-%Y}/format/xml' ) response.raise_for_status() return response.text
2ed71202a40214b75007db7b16d5c1806ae35406
3,641,422
def calculateSecFromEpoch(date,hour): """ Calculates seconds from EPOCH """ months={ '01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May', '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct', '11':'Nov', '12':'Dec' } year=YEAR_PREFIX+date[0:2] month=months[date[2:4]] day=date[4:6] hourF=hour[0:2]+':'+hour[2:4]+':'+hour[4:6] dateFormatted=month+' '+day+','+' '+year+' @ '+hourF+' '+TIME_ZONE secs=timegm(strptime(dateFormatted, '%b %d, %Y @ %H:%M:%S '+TIME_ZONE)) return secs
29adf78dbe795c70cb84f66b1dc249674869c417
3,641,423
def star_noise_simulation(Variance, Pk, nongaussian = False): """simulates star + noise signal, Pk is hyperprior on star variability and flat at high frequencies which is stationary noise""" Pk_double = np.concatenate((Pk, Pk)) phases = np.random.uniform(0, 2 * np.pi, len(Pk)) nodes0 = np.sqrt(Pk_double) * np.concatenate((np.cos(phases), np.sin(phases))) if nongaussian: flux= flux_nodes(nodes0, len(Variance)) #average, sigma = prepare_data.normalization(flux) #flux /= sigma mask = np.random.random(len(flux)) < distribution_parameters[0] outliers = stats.nct.rvs(*distribution_parameters[1:], size=np.sum(mask)) flux[mask] = outliers return flux / Variance else: return (flux_nodes(nodes0, len(Variance))) / Variance
5ccc89f455b7347c11cac36abead172b352f7b9c
3,641,424
from datetime import datetime import time def get_seq_num(): """ Simple class for creating sequence numbers Truncate epoch time to 7 digits which is about one month """ t = datetime.datetime.now() mt = time.mktime(t.timetuple()) nextnum = int(mt) retval = nextnum % 10000000 return retval
34a2b3a7082d061987c7a0b67c91df040b86938c
3,641,425
import logging def get_packages_for_file_or_folder(source_file, source_folder): """ Collects all the files based on given parameters. Exactly one of the parameters has to be specified. If source_file is given, it will return with a list containing source_file. If source_folder is given, it will search recursively all files in the directory and return the list of found files. """ if not bool(source_folder) ^ bool(source_file): log('Source_folder XOR source_file has to be specified, exactly one of them.', logging.ERROR, source_file=source_file, source_folder=source_folder) return () # validate path parameters, collect packages entries = () if source_file: source = abspath(source_file) if isfile(source): entries = [source] else: log('Source file does not exist', logging.ERROR) else: source = abspath(source_folder) if isdir(source): entries = get_files(source) else: log('Source folder does not exist', logging.ERROR) return entries
fc047dd10dfd18fc8efecb240d06aeb91686c0cb
3,641,426
def sanitize_tag(tag: str) -> str: """Clean tag by replacing empty spaces with underscore. Parameters ---------- tag: str Returns ------- str Cleaned tag Examples -------- >>> sanitize_tag(" Machine Learning ") "Machine_Learning" """ return tag.strip().replace(" ", "_")
40ac78846f03e8b57b5660dd246c8a15fed8e008
3,641,427
def _vmf_normalize(kappa, dim): """Compute normalization constant using built-in numpy/scipy Bessel approximations. Works well on small kappa and mu. """ num = np.power(kappa, dim / 2.0 - 1.0) if dim / 2.0 - 1.0 < 1e-15: denom = np.power(2.0 * np.pi, dim / 2.0) * i0(kappa) else: denom = np.power(2.0 * np.pi, dim / 2.0) * iv(dim / 2.0 - 1.0, kappa) if np.isinf(num): raise ValueError("VMF scaling numerator was inf.") if np.isinf(denom): raise ValueError("VMF scaling denominator was inf.") if np.abs(denom) < 1e-15: raise ValueError("VMF scaling denominator was 0.") return num / denom
24d22469a572e7ff4b7e1c918fce7001731cec2a
3,641,428
import urllib def twitter_map(): """ Gets all the required information and returns the start page or map with people locations depending on input """ # get arguments from url account = request.args.get('q') count = request.args.get('count') if account and count: # create map and add custom styles to html or display error try: new_map = create_map(account, count) new_map += render_template('styles.html') return new_map except urllib.error.HTTPError: return render_template('error.html', error='User was not found.') else: # render start page return render_template('index.html')
54a37f91141e52d24f88214ea476a2f199c78674
3,641,429
def path_states(node): """The sequence of states to get to this node.""" if node in (cutoff, failure, None): return [] return path_states(node.parent) + [node.state]
21ed5eb98eca0113dd5f446066cd10df73665f10
3,641,430
def find_named_variables(mapping): """Find correspondance between variable and relation and its attribute.""" var_dictionary = dict() for relation_instance in mapping.lhs: for i, variable in enumerate(relation_instance.variables): name = relation_instance.relation.name field = relation_instance.relation.fields[i] if variable not in var_dictionary.keys(): var_dictionary.update({variable: []}) var_dictionary[variable].append((name, field)) else: if (name, field) not in var_dictionary[variable]: var_dictionary[variable].append((name, field)) return var_dictionary
0b9a78ca94b25e7a91fe88f0f15f8a8d408cb2fd
3,641,431
import urllib def attribute_formatter(attribute): """ translate non-alphabetic chars and 'spaces' to a URL applicable format :param attribute: text string that may contain not url compatible chars (e.g. ' 무작위의') :return: text string with riot API compatible url encoding (e.g. %20%EB%AC%B4%EC%9E%91%EC%9C%84%EC%9D%98) """ tempdict = {'': attribute} formatted = urllib.parse.urlencode(tempdict)[1:].replace('+', '%20') return formatted
6c6745a5cea9a3f6bcee8cbcedb7a1493372dc96
3,641,432
import json def maestro_splits(): """ Get list of indices for each split. Stolen from my work on Perceptual Evaluation of AMT Resynthesized. Leve here for reference. """ d = asmd.Dataset().filter(datasets=['Maestro']) maestro = json.load(open(MAESTRO_JSON)) train, validation, test = [], [], [] for i in range(len(d)): filename = d.paths[i][0][0][23:] split = search_audio_filename_in_original_maestro(filename, maestro) if split == "train": train.append(i) elif split == "validation": validation.append(i) elif split == "test": test.append(i) else: raise RuntimeError(filename + " not found in maestro original json") return train, validation, test
119b033d3fd507b77bbb3d16d993237f8658b5f5
3,641,434
def get_choice_selectivity(trials, perf, r): """ Compute d' for choice. """ N = r.shape[-1] L = np.zeros(N) L2 = np.zeros(N) R = np.zeros(N) R2 = np.zeros(N) nL = 0 nR = 0 for n, trial in enumerate(trials): if not perf.decisions[n]: continue stimulus = trial['epochs']['stimulus'] r_n = r[stimulus,n] left_right = trial['left_right'] if left_right < 0: L += np.sum(r_n, axis=0) L2 += np.sum(r_n**2, axis=0) nL += len(stimulus) else: R += np.sum(r_n, axis=0) R2 += np.sum(r_n**2, axis=0) nR += len(stimulus) mean_L = L/nL var_L = L2/nL - mean_L**2 mean_R = R/nR var_R = R2/nR - mean_R**2 return -utils.div(mean_L - mean_R, np.sqrt((var_L + var_R)/2))
f33593ad06bf3c54c950eda562a93e348320a5e1
3,641,435
def author_productivity(pub2author_df, colgroupby = 'AuthorId', colcountby = 'PublicationId', show_progress=False): """ Calculate the total number of publications for each author. Parameters ---------- pub2author_df : DataFrame, default None, Optional A DataFrame with the author2publication information. colgroupby : str, default 'AuthorId', Optional The DataFrame column with Author Ids. If None then the database 'AuthorId' is used. colcountby : str, default 'PublicationId', Optional The DataFrame column with Publication Ids. If None then the database 'PublicationId' is used. Returns ------- DataFrame Productivity DataFrame with 2 columns: 'AuthorId', 'Productivity' """ # we can use show_progress to pass a label for the progress bar if show_progress: show_progress='Author Productivity' newname_dict = zip2dict([str(colcountby)+'Count', '0'], ['Productivity']*2) return groupby_count(pub2author_df, colgroupby, colcountby, count_unique=True, show_progress=show_progress).rename(columns=newname_dict)
15c56b22cc9d5014fe4dcfab8be37a9e4b0ef329
3,641,436
def smoothed_epmi(matrix, alpha=0.75): """ Performs smoothed epmi. See smoothed_ppmi for more info. Derived from this: #(w,c) / #(TOT) -------------- (#(w) / #(TOT)) * (#(c)^a / #(TOT)^a) ==> #(w,c) / #(TOT) -------------- (#(w) * #(c)^a) / #(TOT)^(a+1)) ==> #(w,c) ---------- (#(w) * #(c)^a) / #(TOT)^a ==> #(w,c) * #(TOT)^a ---------- #(w) * #(c)^a """ row_sum = matrix.sum(axis=1) col_sum = matrix.sum(axis=0).power(alpha) total = row_sum.sum(axis=0).power(alpha)[0, 0] inv_col_sum = 1 / col_sum # shape (1,n) inv_row_sum = 1 / row_sum # shape (n,1) inv_col_sum = inv_col_sum * total mat = matrix * inv_row_sum mat = mat * inv_col_sum return mat
e2f72c4169aee2f394445f42e4835f1b55f347c9
3,641,437
import six def encode(input, errors='strict'): """ convert from unicode text (with possible UTF-16 surrogates) to wtf-8 encoded bytes. If this is a python narrow build this will actually produce UTF-16 encoded unicode text (e.g. with surrogates). """ # method to convert surrogate pairs to unicode code points permitting # lone surrogate pairs (aka potentially ill-formed UTF-16) def to_code_point(it): hi = None try: while True: c = ord(next(it)) if c >= 0xD800 and c <= 0xDBFF: # high surrogate hi = c c = ord(next(it)) if c >= 0xDC00 and c <= 0xDFFF: # paired c = 0x10000 + ((hi - 0xD800) << 10) + (c - 0xDC00) else: yield hi hi = None yield c except StopIteration: if hi is not None: yield hi buf = six.binary_type() for code in to_code_point(iter(input)): if (0 == (code & 0xFFFFFF80)): buf += six.int2byte(code) continue elif (0 == (code & 0xFFFFF800)): buf += six.int2byte(((code >> 6) & 0x1F) | 0xC0) elif (0 == (code & 0xFFFF0000)): buf += six.int2byte(((code >> 12) & 0x0F) | 0xE0) buf += six.int2byte(((code >> 6) & 0x3F) | 0x80) elif (0 == (code & 0xFF300000)): buf += six.int2byte(((code >> 18) & 0x07) | 0xF0) buf += six.int2byte(((code >> 12) & 0x3F) | 0x80) buf += six.int2byte(((code >> 6) & 0x3F) | 0x80) buf += six.int2byte((code & 0x3F) | 0x80) return buf, len(buf)
525199690f384304a72176bd1eaeeb1b9cb30880
3,641,438
def forgot_password(request, mobile=False): """Password reset form. This view sends an email with a reset link. """ if request.method == "POST": form = PasswordResetForm(request.POST) valid = form.is_valid() if valid: form.save(use_https=request.is_secure(), token_generator=default_token_generator, email_template_name='users/email/pw_reset.ltxt') if mobile: if valid: return HttpResponseRedirect(reverse('users.mobile_pw_reset_sent')) else: if not valid: return {'status': 'error', 'errors': dict(form.errors.iteritems())} else: return {'status': 'success'} else: form = PasswordResetForm() if mobile: return jingo.render(request, 'users/mobile/pw_reset_form.html', {'form': form})
ea27378253a7ed1b98cb91fd52fe724e79f35e26
3,641,439
def rotation_components(x, y, eps=1e-12, costh=None): """Components for the operator Rotation(x,y) Together with `rotation_operator` achieves best memory complexity: O(N_batch * N_hidden) Args: x: a tensor from where we want to start y: a tensor at which we want to finish eps: the cutoff for the normalizations (avoiding division by zero) Returns: Five components: u, v, [u,v] and `2x2 rotation by theta`, cos(theta) """ size_batch = tf.shape(x)[0] hidden_size = tf.shape(x)[1] # construct the 2x2 rotation u = tf.nn.l2_normalize(x, 1, epsilon=eps) if costh == None: costh = tf.reduce_sum(u * tf.nn.l2_normalize(y, 1, epsilon=eps), 1) sinth = tf.sqrt(1 - costh ** 2) step1 = tf.reshape(costh, [size_batch, 1]) step2 = tf.reshape(sinth, [size_batch, 1]) Rth = tf.reshape( tf.concat([step1, -step2, step2, step1], axis=1), [size_batch, 2, 2]) # get v and concatenate u and v v = tf.nn.l2_normalize( y - tf.reshape(tf.reduce_sum(u * y, 1), [size_batch, 1]) * u, 1, epsilon=eps) step3 = tf.concat([tf.reshape(u, [size_batch, 1, hidden_size]), tf.reshape(v, [size_batch, 1, hidden_size])], axis=1) # do the batch matmul step4 = tf.reshape(u, [size_batch, hidden_size, 1]) step5 = tf.reshape(v, [size_batch, hidden_size, 1]) return step4, step5, step3, Rth, costh
79cec86425bce65ac92ce8cf9c720f98857d7e1a
3,641,440
def erode(np_image_bin, struct_elem='rect', size=3): """Execute erode morphological operation on binaryzed image Keyword argument: np_image_bin -- binaryzed image struct_elem: cross - cross structural element rect - rectangle structural element circ -- cricle structural element(maybe implemente) size: size of struct element, should be 2N+1 Return: Binarized image after erode operation """ np_image_bin = np_image_bin.astype(np.uint8) np_image_er = np.zeros(np_image_bin.shape, dtype=np.uint8) #np_image_bin = np.arange(625).reshape((25,25)) #rectangle dir_size = int((size-1)/2) #print(x_max, y_max) for index, x in np.ndenumerate(np_image_bin): np_window = bs.getWindow(np_image_bin, index, dir_size, struct_elem) if np_window.max() == 255: np_image_er[index[0], index[1]] = 255 return np_image_er
4692b40555a8047d70ad8c4b33de636a0c6c87b0
3,641,441
def setup_counter_and_timer(nodemap): """ This function configures the camera to setup a Pulse Width Modulation signal using Counter and Timer functionality. By default, the PWM signal will be set to run at 50hz, with a duty cycle of 70%. :param nodemap: Device nodemap. :type nodemap: INodeMap :return: True if successful, False otherwise. :rtype: bool """ print('Configuring Pulse Width Modulation signal') try: result = True # Set Counter Selector to Counter 0 node_counter_selector = PySpin.CEnumerationPtr(nodemap.GetNode('CounterSelector')) # Check to see if camera supports Counter and Timer functionality if not PySpin.IsAvailable(node_counter_selector): print('\nCamera does not support Counter and Timer Functionality. Aborting...\n') return False if not PySpin.IsWritable(node_counter_selector): print('\nUnable to set Counter Selector (enumeration retrieval). Aborting...\n') return False entry_counter_0 = node_counter_selector.GetEntryByName('Counter0') if not PySpin.IsAvailable(entry_counter_0) or not PySpin.IsReadable(entry_counter_0): print('\nUnable to set Counter Selector (entry retrieval). Aborting...\n') return False counter_0 = entry_counter_0.GetValue() node_counter_selector.SetIntValue(counter_0) # Set Counter Event Source to MHzTick node_counter_event_source = PySpin.CEnumerationPtr(nodemap.GetNode('CounterEventSource')) if not PySpin.IsAvailable(node_counter_event_source) or not PySpin.IsWritable(node_counter_event_source): print('\nUnable to set Counter Event Source (enumeration retrieval). Aborting...\n') return False entry_counter_event_source_mhz_tick = node_counter_event_source.GetEntryByName('MHzTick') if not PySpin.IsAvailable(entry_counter_event_source_mhz_tick) \ or not PySpin.IsReadable(entry_counter_event_source_mhz_tick): print('\nUnable to set Counter Event Source (entry retrieval). Aborting...\n') return False counter_event_source_mhz_tick = entry_counter_event_source_mhz_tick.GetValue() node_counter_event_source.SetIntValue(counter_event_source_mhz_tick) # Set Counter Duration to 14000 node_counter_duration = PySpin.CIntegerPtr(nodemap.GetNode('CounterDuration')) if not PySpin.IsAvailable(node_counter_duration) or not PySpin.IsWritable(node_counter_duration): print('\nUnable to set Counter Duration (integer retrieval). Aborting...\n') return False node_counter_duration.SetValue(14000) # Set Counter Delay to 6000 node_counter_delay = PySpin.CIntegerPtr(nodemap.GetNode('CounterDelay')) if not PySpin.IsAvailable(node_counter_delay) or not PySpin.IsWritable(node_counter_delay): print('\nUnable to set Counter Delay (integer retrieval). Aborting...\n') return False node_counter_delay.SetValue(6000) # Determine Duty Cycle of PWM signal duty_cycle = float(node_counter_duration.GetValue()) / (float(node_counter_duration.GetValue() + node_counter_delay.GetValue())) * 100 print('\nThe duty cycle has been set to {}%'.format(duty_cycle)) # Determine pulse rate of PWM signal pulse_rate = 1000000 / float(node_counter_duration.GetValue() + node_counter_delay.GetValue()) print('\nThe pulse rate has been set to {} Hz'.format(pulse_rate)) # Set Counter Trigger Source to Frame Trigger Wait node_counter_trigger_source = PySpin.CEnumerationPtr(nodemap.GetNode('CounterTriggerSource')) if not PySpin.IsAvailable(node_counter_trigger_source) or not PySpin.IsWritable(node_counter_trigger_source): print('\nUnable to set Counter Trigger Source (enumeration retrieval). Aborting...\n') return False entry_counter_trigger_source_ftw = node_counter_trigger_source.GetEntryByName('FrameTriggerWait') if not PySpin.IsAvailable(entry_counter_trigger_source_ftw)\ or not PySpin.IsReadable(entry_counter_trigger_source_ftw): print('\nUnable to set Counter Trigger Source (entry retrieval). Aborting...\n') return False counter_trigger_source_ftw = entry_counter_trigger_source_ftw.GetValue() node_counter_trigger_source.SetIntValue(counter_trigger_source_ftw) # Set Counter Trigger Activation to Level High node_counter_trigger_activation = PySpin.CEnumerationPtr(nodemap.GetNode('CounterTriggerActivation')) if not PySpin.IsAvailable(node_counter_trigger_activation) or \ not PySpin.IsWritable(node_counter_trigger_activation): print('\nUnable to set Counter Trigger Activation (enumeration retrieval). Aborting...\n') return False entry_counter_trigger_source_lh = node_counter_trigger_activation.GetEntryByName('LevelHigh') if not PySpin.IsAvailable(entry_counter_trigger_source_lh) \ or not PySpin.IsReadable(entry_counter_trigger_source_lh): print('\nUnable to set Counter Trigger Activation (entry retrieval). Aborting...\n') return False counter_trigger_level_high = entry_counter_trigger_source_lh.GetValue() node_counter_trigger_activation.SetIntValue(counter_trigger_level_high) except PySpin.SpinnakerException as ex: print('Error: {}'.format(ex)) return False return result
9874b17ce49aca766504891bd9828aad1e075e21
3,641,443
def concat(l1, l2): """ Join two possibly None lists """ if l1 is None: return l2 if l2 is None: return l1 return l1 + l2
9e87bead7eedc4c47f665808b9e0222437bc01b5
3,641,444