content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import re import tempfile from pathlib import Path async def submit_changesheet( uploaded_file: UploadFile = File(...), mdb: MongoDatabase = Depends(get_mongo_db), user: User = Depends(get_current_active_user), ): """ Example changesheet [here](https://github.com/microbiomedata/nmdc-runtime/blob/main/metadata-translation/notebooks/data/changesheet-without-separator3.tsv). """ allowed_to_submit = ("dehays", "dwinston") if user.username not in allowed_to_submit: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=( f"Only users {allowed_to_submit} " "are allowed to apply changesheets at this time." ), ) sheet_in = await raw_changesheet_from_uploaded_file(uploaded_file) df_change = df_from_sheet_in(sheet_in, mdb) _ = _validate_changesheet(df_change, mdb) # create object (backed by gridfs). use "gfs0" id shoulder for drs_object access_id. sheet_id = generate_one_id(mdb, ns="changesheets", shoulder="gfs0") mdb_fs = GridFS(mdb) filename = re.sub(r"[^A-Za-z0-9\.\_\-]", "_", sheet_in.name) PortableFilename(filename) # validates sheet_text = sheet_in.text drs_id = local_part(sheet_id) DrsId(drs_id) # validates mdb_fs.put( sheet_text, _id=drs_id, filename=filename, content_type=sheet_in.content_type, encoding="utf-8", ) with tempfile.TemporaryDirectory() as save_dir: filepath = str(Path(save_dir).joinpath(filename)) with open(filepath, "w") as f: f.write(sheet_text) object_in = DrsObjectIn( **drs_metadata_for( filepath, base={ "description": f"changesheet submitted by {user.username}", "access_methods": [{"access_id": drs_id}], }, ) ) self_uri = f"drs://{HOSTNAME_EXTERNAL}/{drs_id}" drs_obj_doc = _create_object( mdb, object_in, mgr_site="nmdc-runtime", drs_id=drs_id, self_uri=self_uri ) doc_after = mdb.objects.find_one_and_update( {"id": drs_obj_doc["id"]}, {"$set": {"types": ["metadata-changesheet"]}}, return_document=ReturnDocument.AFTER, ) return doc_after
ebe9306aba0fef88c906c3c584f87e7c783fe9d8
3,645,893
import json def get_notes(request, course, page=DEFAULT_PAGE, page_size=DEFAULT_PAGE_SIZE, text=None): """ Returns paginated list of notes for the user. Arguments: request: HTTP request object course: Course descriptor page: requested or default page number page_size: requested or default page size text: text to search. If None then return all results for the current logged in user. Returns: Paginated dictionary with these key: start: start of the current page current_page: current page number next: url for next page previous: url for previous page count: total number of notes available for the sent query num_pages: number of pages available results: list with notes info dictionary. each item in this list will be a dict """ path = 'search' if text else 'annotations' response = send_request(request.user, course.id, page, page_size, path, text) try: collection = json.loads(response.content.decode('utf-8')) except ValueError: log.error("Invalid JSON response received from notes api: response_content=%s", response.content) raise EdxNotesParseError(_("Invalid JSON response received from notes api.")) # lint-amnesty, pylint: disable=raise-missing-from # Verify response dict structure expected_keys = ['total', 'rows', 'num_pages', 'start', 'next', 'previous', 'current_page'] keys = list(collection.keys()) if not keys or not all(key in expected_keys for key in keys): log.error("Incorrect data received from notes api: collection_data=%s", str(collection)) raise EdxNotesParseError(_("Incorrect data received from notes api.")) filtered_results = preprocess_collection(request.user, course, collection['rows']) # Notes API is called from: # 1. The annotatorjs in courseware. It expects these attributes to be named "total" and "rows". # 2. The Notes tab Javascript proxied through LMS. It expects these attributes to be called "count" and "results". collection['count'] = collection['total'] del collection['total'] collection['results'] = filtered_results del collection['rows'] collection['next'], collection['previous'] = construct_pagination_urls( request, course.id, collection['next'], collection['previous'] ) return collection
3256cacd845cf2fd07027cf6b3f2547a59cefd0f
3,645,894
import numpy def convert_hdf_to_gaintable(f): """ Convert HDF root to a GainTable :param f: :return: """ assert f.attrs['ARL_data_model'] == "GainTable", "Not a GainTable" receptor_frame = ReceptorFrame(f.attrs['receptor_frame']) frequency = numpy.array(f.attrs['frequency']) data = numpy.array(f['data']) gt = GainTable(data=data, receptor_frame=receptor_frame, frequency=frequency) return gt
dd816eb0730b0f9993efe07c2b28db692ac6a06e
3,645,895
import pathlib def list_files(directory): """Returns all files in a given directory """ return [f for f in pathlib.Path(directory).iterdir() if f.is_file() and not f.name.startswith('.')]
a8c5fea794198c17c2aff41a1a07009984a8e61f
3,645,896
def condition_conjunction(conditions): """Do conjuction of conditions if there are more than one, otherwise just return the single condition.""" if not conditions: return None elif len(conditions) == 1: return conditions[0] else: return sql.expression.and_(*conditions)
acf26bd9b8e47d27ad83815be70216db0e4ad091
3,645,897
def get_claimed_referrals(char): """ Return how many claimed referrals this character has. """ return db((db.referral.referrer==char) & (db.referral.claimed==True)).count()
5820cdd21cbb77f6a43537ae18dc227ad4fec1b8
3,645,898
def groupsplit(X, y, valsplit): """ Used to split the dataset by datapoint_id into train and test sets. The data is split to ensure all datapoints for each datapoint_id occurs completely in the respective dataset split. Note that where there is validation set, data is split with 80% for training and 20% for test set. Otherwise, the test set is split further with 60% as test set and 40% as validation set. Args: X: data excluding the target_variable y: target variable with datapoint_id valsplit: flag to indicate if there is a dataframe for the validation set. Accepeted values are "yes" or "no" Returns: X_train: X trainset y_train: y trainset X_test: X testset y_test_complete: Dataframe containing the target variable with corresponding datapointid """ logger.info("groupsplit with valsplit: %s", valsplit) if valsplit == 'yes': gs = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42) else: gs = GroupShuffleSplit(n_splits=2, test_size=.4, random_state=42) train_ix, test_ix = next(gs.split(X, y, groups=X.datapoint_id)) X_train = X.loc[train_ix] y_train = y.loc[train_ix] X_test = X.loc[test_ix] y_test_complete = y.loc[test_ix] return X_train, y_train, X_test, y_test_complete
e8ba393270a32e2464c30409a13b2c5e9528afdd
3,645,899
import urllib def application(environ, start_response): """ make Passenger interpret PATH_INFO the same way that the WSGI standard does """ environ["PATH_INFO"] = urllib.parse.unquote(environ["PATH_INFO"]) return app.app(environ, start_response)
3de57b31206a374da0378788f20e7bd8b1eca9af
3,645,901
from railrl.torch.pytorch_util import set_gpu_mode import random def run_experiment_here( experiment_function, variant=None, exp_id=0, seed=0, use_gpu=True, # Logger params: exp_prefix="default", snapshot_mode='last', snapshot_gap=1, git_infos=None, script_name=None, logger=default_logger, trial_dir_suffix=None, randomize_seed=False, **setup_logger_kwargs ): """ Run an experiment locally without any serialization. :param experiment_function: Function. `variant` will be passed in as its only argument. :param exp_prefix: Experiment prefix for the save file. :param variant: Dictionary passed in to `experiment_function`. :param exp_id: Experiment ID. Should be unique across all experiments. Note that one experiment may correspond to multiple seeds,. :param seed: Seed used for this experiment. :param use_gpu: Run with GPU. By default False. :param script_name: Name of the running script :param log_dir: If set, set the log directory to this. Otherwise, the directory will be auto-generated based on the exp_prefix. :return: """ if variant is None: variant = {} variant['exp_id'] = str(exp_id) if randomize_seed or (seed is None and 'seed' not in variant): seed = random.randint(0, 100000) variant['seed'] = str(seed) reset_execution_environment(logger=logger) actual_log_dir = setup_logger( exp_prefix=exp_prefix, variant=variant, exp_id=exp_id, seed=seed, snapshot_mode=snapshot_mode, snapshot_gap=snapshot_gap, git_infos=git_infos, script_name=script_name, logger=logger, trial_dir_suffix=trial_dir_suffix, **setup_logger_kwargs ) set_seed(seed) set_gpu_mode(use_gpu) run_experiment_here_kwargs = dict( variant=variant, exp_id=exp_id, seed=seed, use_gpu=use_gpu, exp_prefix=exp_prefix, snapshot_mode=snapshot_mode, snapshot_gap=snapshot_gap, git_infos=git_infos, script_name=script_name, **setup_logger_kwargs ) save_experiment_data( dict( run_experiment_here_kwargs=run_experiment_here_kwargs ), actual_log_dir ) return experiment_function(variant)
ee8b0f727027d8dcee804565606a7f82f2c77ca9
3,645,902
from typing import Tuple def normalize_chunks(chunks: Tuple[Tuple[int, int]]) -> Tuple[Tuple[int, int]]: """ Minimize the amount of chunks needed to describe a smaller portion of a file. :param chunks: A tuple with (start, end,) offsets :return: A tuple containing as few as possible (start, end,) offsets """ out = [] start1, end1 = chunks[0] if len(chunks) > 1: for start2, end2 in chunks[1:]: if start2 == end1: end1 = end2 else: out.append((start1, end1)) start1, end1 = start2, end2 out.append((start1, end1)) return tuple(out)
d49d1abed0573a86e0eeee5d2e5ed2e129f3274e
3,645,903
def learning_rate_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step, first_lr_drop_step, second_lr_drop_step, global_step): """Handles linear scaling rule, gradual warmup, and LR decay.""" # lr_warmup_init is the starting learning rate; the learning rate is linearly # scaled up to the full learning rate after `lr_warmup_steps` before decaying. linear_warmup = (lr_warmup_init + (tf.cast(global_step, dtype=tf.float32) / lr_warmup_step * (adjusted_learning_rate - lr_warmup_init))) learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup, adjusted_learning_rate) lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step], [0.01, second_lr_drop_step]] for mult, start_global_step in lr_schedule: learning_rate = tf.where(global_step < start_global_step, learning_rate, adjusted_learning_rate * mult) return learning_rate
26021c7cbdb264ddc84fad94d4a01b51913f3a72
3,645,904
from typing import List import logging def set_power_state_server(power_state: ServerPowerState) -> List[float]: """Record the current power limit and set power limit using nvidia-smi.""" # Record current power limits. if power_state.power_limit: cmd = "nvidia-smi --query-gpu=power.limit --format=csv,noheader,nounits" logging.info(f"Getting current GPU power limits: {cmd}") output = run_command(cmd, get_output=True, tee=False) current_limits = [float(line) for line in output] # Set power limit to the specified value. cmd = f"sudo nvidia-smi -pl {power_state.power_limit}" logging.info(f"Setting current GPU power limits: {cmd}") run_command(cmd) if power_state.cpu_freq: set_cpufreq(power_state.cpu_freq) return ServerPowerState(current_limits, None)
8a9ac60dbd58dedf5f39386a7233b88b7cc5aa79
3,645,905
from typing import Union def score_normalization(extracted_score: Union[str, None]): """ Sofa score normalization. If available, returns the integer value of the SOFA score. """ score_range = list(range(0, 30)) if (extracted_score is not None) and (int(extracted_score) in score_range): return int(extracted_score)
74501e9351296037ecc90ae647155e3c6b76ae01
3,645,906
def wrap(wrapping_key_public, plaintext): """ RSA-OAEP key wrapping. Args: wrapping_key_public: The public key of the RSA wrapping key plaintext: The plaintext key to wrap """ rsa_cipher = PKCS1_OAEP.new( key=wrapping_key_public, hashAlgo=SHA256, mgfunc=lambda x, y: pss.MGF1(x, y, SHA1)) return rsa_cipher.encrypt(plaintext)
171074a46440184138ccb1684754f328afc50efe
3,645,908
def compute_horizontal_vessel_purchase_cost(W, D, F_M): """ Return the purchase cost [Cp; in USD] of a horizontal vessel, including thes cost of platforms and ladders. Parameters ---------- W : float Weight [lb]. D : float Diameter [ft]. F_M : float Vessel material factor. Notes ----- The purchase cost is given by [1]_. See source code for details. The purchase cost is scaled according to BioSTEAM's Chemical Plant Cost Index, `biosteam.CE`. References ---------- .. [1] Seider, W. D., Lewin, D. R., Seader, J. D., Widagdo, S., Gani, R., & Ng, M. K. (2017). Product and Process Design Principles. Wiley. Cost Accounting and Capital Cost Estimation (Chapter 16) """ # C_v: Vessel cost # C_pl: Platforms and ladders cost C_v = exp(5.6336 - 0.4599*ln(W) + 0.00582*ln(W)**2) C_pl = 2275*D**0.20294 return bst.CE/567 * (F_M * C_v + C_pl)
22d38ffc38dddb992d2fd7b2c20c3dc1d0ddb53d
3,645,910
def format_dev_sub_dev_id(pciIdPair): """ pciIdPair (int pci device id, int pci sub device id or None) """ if pciIdPair[1] is None: return "(0x%08X, None)" % pciIdPair[0] return "(0x%08X, 0x%08X)" % pciIdPair
fded71eee57f4fac60175bfb015845bf1eba58f7
3,645,911
def mychats(): """ Show Chats where I can write :return: { error: 0, chats: [...Chat] } """ result = { 'error': 0, 'chats': [] } if 'user_id' in session: chats_rows = query_db('SELECT * FROM chats WHERE user1_id = ? OR user2_id = ?', [session['user_id'], session['user_id']]) result['chats'] = chats_rows # for chat in query_db('select * from chats'): # print(chat['name']) return result
4af7cd34fb8649ed10723b258e7a864e3e12edc2
3,645,912
def polynom_prmzt(x, t, order): """ Polynomial (deterministic) parameterization of fast variables (Y). NB: Only valid for system settings of Wilks'2005. Note: In order to observe an improvement in DA performance w higher orders, the EnKF must be reasonably tuned with There is very little improvement gained above order=1. """ if order == 4: # From Wilks d = 0.262 + 1.45*x - 0.0121*x**2 - 0.00713*x**3 + 0.000296*x**4 elif order == 3: # From Arnold d = 0.341 + 1.30*x - 0.0136*x**2 - 0.00235*x**3 elif order == 1: # From me -- see AdInf/illust_parameterizations.py d = 0.74 + 0.82*x elif order == 0: # From me -- see AdInf/illust_parameterizations.py d = 3.82 elif order == -1: # Leave as dxdt_trunc d = 0 else: raise NotImplementedError return d
80d3f9563c5f8a04a65de7d2d22f5d49d35c71fe
3,645,913
def decode(s): """ Deserialize an EDS object from an EDS string. """ lexer = _EDSLexer.lex(s.splitlines()) return _decode_eds(lexer)
3c7eb8ac7e570aeb1297b052e35c804dd27b0f49
3,645,915
def allowed_file(filename): """ Verifies if file extension is compatible """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
75e0047eff0787e33f687e7a9b689ad8661b7501
3,645,917
def clap_convert(txt): """convert string of clap values on medium to actualy number Args: txt (str): claps values Returns: number on claps (int) """ # Medium annotation if txt[-1] == "K": output = int(float(txt[:-1]) * 1000) return output else: return int(txt)
253e0e2be4f37f1994637bbfc80edfc5d72bc4e5
3,645,918
import io def write_phase1_capsummary(inst, isStringIO=True): """ Write out a multiweek summary of capacity, demand, understaffing. :param inst: Model instance :param isStringIO: True (default) to return StringIO object, False to return string :return: capacity summary as StringIO object or a string. """ param = 'period,day,week,dmd,cap,us1,us2,ustot\n' rows = [(i, j, w, inst.dmd_staff[i, j, w], inst.cov[i, j, w].value, inst.under1[i, j, w].value, inst.under2[i, j, w].value, inst.under1[i, j, w].value + inst.under2[i, j, w].value) for i in inst.PERIODS for j in inst.DAYS for w in inst.WEEKS ] for row in rows: row = [str(r) for r in row] data_row = ','.join(row) data_row += '\n' param += data_row if isStringIO: param_out = io.StringIO() param_out.write(param) return param_out.getvalue() else: return param
6d6e7d083693b74ea27e7f10cec4899735f32541
3,645,919
def glorot_uniform(shape): """ :param shape: tuple with the shape of the wanted output (filters_amount, depth, height, width) :return: array (it's shape=param shape) with initialized values using 'glorot uniform' initializer """ fan_in, fan_out = _calc_fans(shape) scale = 1. / ((fan_in + fan_out) / 2.) limit = np.sqrt(3.0 * scale) return np.random.uniform(low=-limit, high=limit, size=shape)
0cec12b0342db827286248a722b32852cab2bdad
3,645,920
import warnings def second_order_moments(n_components, e2, m1, alpha0): """Second-Order Moments To prevent creating 2nd order moments explicitly, we construct its decomposition with `n_components`. check reference [?] section 5.2 for details. Parameters ---------- n_components: int Number of components e2: sparse matrix, shape=(n_features, n_features) Expectation of word pairs. e2[i, j] is the expectation of word `i` and `j` in the same document. m1: array, shape=(n_features,) Expectation of each words. alpha0: double Sum of topic topic concentration parameter Returns ------- m2_vals : array, shape=(n_components,) eigen values of sencond-order moments m2_vecs : array, shape=(n_features, n_components) eigen values of sencond-order moments """ # eigen values and vectors of E2 n_features = e2.shape[0] #print("%d ; %d" % (n_features, n_components)) if n_components == n_features: # run full svd, convert e2 to dense array first e2_vecs, e2_vals, _ = LA.svd(e2.toarray()) else: #e2_vals, e2_vecs = sp.linalg.eigsh(e2, k=n_components, which='LM') e2_vecs, e2_vals, _ = sp.linalg.svds(e2, k=n_components, which='LM', return_singular_vectors=True) e2_vals *= (alpha0 + 1.) m1_p = np.dot(e2_vecs.T, m1) # section 5.2 part 1. m2_p = (-1. * alpha0) * (m1_p * m1_p[:, np.newaxis]) m2_p[np.diag_indices_from(m2_p)] += e2_vals # section 5.2 part 1. # eigen values and vectors of M2 prime try: m2p_vecs, m2p_vals, _ = LA.svd(m2_p) m2_vals = m2p_vals m2_vecs = np.dot(e2_vecs, m2p_vecs) except LA.LinAlgError: # In order to pass `check_estimator` test. # convert this error to warnings. warnings.warn("SVD in second_order_moments did not converge. " "the algorithm will not work.", ConvergenceWarning) m2_vals = np.ones(m2_p.shape[0]) m2_vecs = m2_p return (m2_vals, m2_vecs)
4b2ac9d43352d856875d86cd1975ec59ac5664c8
3,645,922
def remove_names(df: pd.DataFrame) -> pd.DataFrame: """Convert personal names to numerical values.""" df = df.reset_index() df.drop(columns='Name', inplace=True) return df
9dab1803a153d5effd2e08b6e6ff5df30fee8407
3,645,924
import torch def handle_epoch_metrics(step_metrics, epoch_labels, epoch_predictions): """ Function that handles the metrics per epoch. Inputs: step_metrics - Dictionary containing the results of the steps of an epoch epoch_labels - List of labels from the different steps epoch_predictions - List of predictions from the different steps Outputs: epoch_merics - Dictionary containing the averaged results of an epoch """ # compute the loss loss = torch.mean(torch.stack(step_metrics['losses'], dim=0), dim=0) loss = round(loss.item(), 4) # compute the accuracy and f1 accuracy, f1 = compute_accuracy_f1(step_metrics['predictions'], step_metrics['labels']) # create a new epoch dictionary epoch_metrics = {'loss': loss, 'accuracy': accuracy, 'f1': f1} # return the epoch dictionary return epoch_metrics
a1d0180095535eec641258dd921c90808aa6858f
3,645,925
def project_disk_sed(bulge_sed, disk_sed): """Project the disk SED onto the space where it is bluer For the majority of observed galaxies, it appears that the difference between the bulge and the disk SEDs is roughly monotonic, making the disk bluer. This projection operator projects colors that are redder onto the same difference in color as the previous wavelength, similar to the way monotonicity works for the morphological `S` matrix of the model. While a single iteration of this model is unlikely to yield results that are as good as those in `project_disk_sed_mean`, after many iterations it is expected to converge to a better value. """ new_sed = disk_sed.copy() diff = bulge_sed - disk_sed for s in range(1, len(diff)-1): if diff[s]<diff[s-1]: new_sed[s] = new_sed[s] + diff[s-1] diff[s] = diff[s-1] return new_sed
5faf8f7d8d0d780f61586f7fae39f4ba04d3752d
3,645,926
def qlog_numpy(q): """ Applies logarithm map to q :param q: (4,) :return: (3,) """ if all(q[1:] == 0): q = np.zeros(3) else: q = np.arccos(q[0]) * q[1:] / np.linalg.norm(q[1:]) return q
82cf0ff2054c02e4cc3dc3a6500b1c8a0e3eb870
3,645,928
def get_ML_features(df: pd.DataFrame, protease: str='trypsin', **kwargs) -> pd.DataFrame: """ Uses the specified score in df to filter psms and to apply the fdr_level threshold. Args: df (pd.DataFrame): psms table of search results from alphapept. protease (str, optional): string specifying the protease that was used for proteolytic digestion. Defaults to 'trypsin'. Returns: pd.DataFrame: df including additional scores for subsequent ML. """ df['decoy'] = df['sequence'].str[-1].str.islower() df['abs_delta_m_ppm'] = np.abs(df['delta_m_ppm']) df['naked_sequence'] = df['sequence'].apply(lambda x: ''.join([_ for _ in x if _.isupper()])) df['n_AA']= df['naked_sequence'].str.len() df['matched_ion_fraction'] = df['hits']/(2*df['n_AA']) df['n_missed'] = df['naked_sequence'].apply(lambda x: count_missed_cleavages(x, protease)) df['n_internal'] = df['naked_sequence'].apply(lambda x: count_internal_cleavages(x, protease)) df['x_tandem'] = get_x_tandem_score(df) return df
4ac4202fa5c86b78b1bda1a2b96d5ed4b8552b4f
3,645,929
def revcomp(sequence): """ Find reverse complementary sequence :param sequence: The RNA sequence in string form :return: The reverse complement sequence in string form """ complement = {"A": "U", "U": "A", "C": "G", "G": "C", "N": "N"} revcompseq = "" sequence_list = list(sequence) sequence_list.reverse() for letter in sequence_list: revcompseq += complement[letter.upper()] return revcompseq
c66b9ad967e612fa97f18bb2932e7eb4bbee8245
3,645,930
def J(X, mean, r): """K-meansの目的関数(最小化を目指す)""" summation = 0.0 for n in range(len(X)): temp = 0.0 for k in range(K): temp += r[n, k] * np.linalg.norm(X[n] - mean[k]) ** 2 summation += temp return summation
1d2dd241fc30cb5897b0224285c5c7f2f2fec675
3,645,931
import time def timesince(): """ Get the amount of time since 00:00 on 1 January 1970, the raw date before formatting it. """ return time.time()
7e6944d74172947c4ac990c0fa993524ab865e18
3,645,933
def gencoords_outside(N, d, rad=None, truncmask=False, trunctype='circ'): """ generate coordinates of all points in an NxN..xN grid with d dimensions coords in each dimension are [-N/2, N/2) N should be even""" if not truncmask: _, truncc, _ = gencoords_outside(N, d, rad, True) return truncc c = geometry.gencoords_base(N, d) if rad is not None: if trunctype == 'circ': r2 = np.sum(c**2, axis=1) trunkmask = r2 > (rad*N/2.0)**2 elif trunctype == 'square': r = np.max(np.abs(c), axis=1) trunkmask = r > (rad*N/2.0) truncc = c[trunkmask, :] else: trunkmask = np.ones((c.shape[0],), dtype=np.bool8) truncc = c return c, truncc, trunkmask
0b4f3db165cb495e5d540412cb77bd36e8a42c62
3,645,934
def map_orientation(cur_orientation, cur_count): """ . . . . . x . . . . . x . . . . . x . . . . . x . . . . . x . . . . . x """ right_edge = 34905131040 """ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . x x x x x x """ bottom_edge = 67645734912 """ we will check if each position of the game peice is valid by investigating if it touches the right edge or the bottom edge using a logica AND (&) operation. The & will be 0 if there is no overlap and <> 0 if there is Pass in peices positioned in the upper left corner so that this check can walk right and down to checkk all conditions """ room_to_move_right = True room_to_move_down = True safe_down = True while safe_down: room_to_move_right = True safe_right = True row_start = cur_orientation while safe_right: peice_orientation_list[cur_count] = cur_orientation cur_count += 1 """ moving piece right 1 bit is the same as multiplying by 2^1 . x . . . . x x x . . . = 450 . . x . . . . x x x . . = 900 """ if room_to_move_right: cur_orientation = cur_orientation << 1 room_to_move_right = ((cur_orientation & right_edge) == 0) else: safe_right = False """ moving down is the same as shifting right 6 times or multiplying by 2^6, aka 64 . x . . . . x x x . . . = 450 . x . . . . x x x . . . = 28,800 """ if room_to_move_down: cur_orientation = row_start << 6 room_to_move_down = ((cur_orientation & bottom_edge) == 0) else: safe_down = False return cur_count
5fa5e0c386da56cab336f33e560bf9591814060c
3,645,935
import ctypes def glGetShaderInfoLog( baseOperation, obj ): """Retrieve the shader's error messages as a Python string returns string which is '' if no message """ target = GLsizei() glGetShaderiv(obj, GL_INFO_LOG_LENGTH,target) length = target.value if length > 0: log = ctypes.create_string_buffer(length) baseOperation(obj, length, None, log) return log.value.strip(_NULL_8_BYTE) # null-termination return ''
0173aa2cbeac8c8b2cb9072d0d56584285af2e0d
3,645,936
def _CheckGrdTranslations(grd_file, grd_lines, wanted_locales): """Check all <file> elements that correspond to an .xtb output file. Args: grd_file: Input .grd file path. grd_lines: List of input .grd lines. wanted_locales: set of wanted Chromium locale names. Returns: List of error message strings. Empty on success. """ wanted_locales = wanted_locales - set([_DEFAULT_LOCALE]) intervals = _BuildIntervalList(grd_lines, _IsTranslationGrdOutputLine) errors = [] for start, end in intervals: errors += _CheckGrdElementRangeLang(grd_lines, start, end, wanted_locales) errors += _CheckGrdTranslationElementRange(grd_lines, start, end, wanted_locales) return errors
49b73187cd2ac3c7b9796a8a139d89f9a74c91a3
3,645,937
def choose_diverging_palette(as_cmap=False): """Launch an interactive widget to choose a diverging color palette. This corresponds with the :func:`diverging_palette` function. This kind of palette is good for data that range between interesting low values and interesting high values with a meaningful midpoint. (For example, change scores relative to some baseline value). Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- diverging_palette : Create a diverging color palette or colormap. choose_colorbrewer_palette : Interactively choose palettes from the colorbrewer set, including diverging palettes. """ pal = [] if as_cmap: cmap = _init_mutable_colormap() @interact def choose_diverging_palette( h_neg=IntSlider(min=0, max=359, value=220), h_pos=IntSlider(min=0, max=359, value=10), s=IntSlider(min=0, max=99, value=74), l=IntSlider(min=0, max=99, value=50), # noqa: E741 sep=IntSlider(min=1, max=50, value=10), n=(2, 16), center=["light", "dark"] ): if as_cmap: colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center) _update_lut(cmap, colors) _show_cmap(cmap) else: pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center) palplot(pal) if as_cmap: return cmap return pal
0c2ffc8710a56e643e6c4ccdd453bd00cc59e6a2
3,645,938
def get_lm_model(args, device, config): """Get language model(based on GPT-2) used for sequence prediction.""" ninp = config["ninp"] nhead = config["nhead"] initrange = config["initrange"] dropout = config["dropout"] vocab_size = config["vocab_size"] nhid = config["nhid"] ndecoder = config["num_decoder_layers"] if args.ssd_offload: return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder) else: return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
071f43b9537ca8024f980271c64750e7afae864e
3,645,939
def get_meminfo(): """ Get and format the content of /proc/meminfo """ buf = open('/proc/meminfo').read() buf = ','.join([v.replace(' ', '') for v in buf.split('\n') if v]) return buf
47a0d57d1b8c90b4907fab8e39154b8e4ad5b7ee
3,645,940
def effective_area(true_energy, reco_energy, simu_area): """ Compute the effective area from a list of simulated energy and reconstructed energy Parameters ---------- true_energy: 1d numpy array reco_energy: 1d numpy array simu_area: float - area on which events are simulated Returns ------- float = effective area """ return simu_area * len(reco_energy) / len(true_energy)
b17efa390a1ae14bb8ecb959740bad8c391b1d2e
3,645,941
def execute(queries, arglists, fetchone=False): """Execute multiple queries to the sqlite3 jobtracker database. All queries will be executed as a single transaction. Return the result of the last query, or the ID of the last INSERT, whichever is applicaple. Inputs: queries: A list of queries to be execute. arglists: A list (same length as queries). Each entry contains the paramters to be substituted into the corresponding query. fetchone: If True, fetch and return only a single row. Otherwise, fetch and return all rows. (Only applies for SELECT statements. Default: fetch all rows). Outputs: results: Single row, or list of rows (for SELECT statements), depending on 'fetchone'. Or, the ID of the last entry INSERT'ed (for INSERT statements). """ not_connected = True count = 0 while not_connected: try: db_conn = sqlite3.connect(config.background.jobtracker_db,timeout=40.0) db_conn.isolation_level = 'DEFERRED' db_conn.row_factory = sqlite3.Row db_cur = db_conn.cursor() for q, args in zip(queries, arglists): db_cur.execute(q, args) db_conn.commit() if db_cur.lastrowid: results = db_cur.lastrowid else: if fetchone: results = db_cur.fetchone() else: results = db_cur.fetchall() db_conn.close() not_connected = False except sqlite3.OperationalError, e: try: db_conn.rollback() db_conn.close() except NameError: # Connection wasn't established, 'db_conn' is not defined. pass if (count % 60) == 0: print "Couldn't connect to DB for %d seconds. Will continue trying. " \ "Error message: %s" % (count, str(e)) time.sleep(1) count+=1 return results
a64e12262150514dbc5e6e7f4c193481ab8162aa
3,645,942
def physical_cpu_mhz(vir_connection): """ Get the CPU frequency in MHz using libvirt. :param vir_connection: A libvirt connection object. :type vir_connection: virConnect :return: The CPU frequency in MHz. :rtype: int """ return vir_connection.getInfo()[3]
f6a404a6d531940fbc762f493e90355e2fc78690
3,645,943
def addstream(bot, input): """Add a stream from the notify list""" if not input.admin: return False if not input.group(2): return stream = input.group(2).lower() if not stream in bot.config.streams: bot.config.set_add('streams', stream) bot.reply("Added {0} to stream list".format(stream)) else: bot.reply("{0} is already in the stream list".format(stream))
48465633ea58968efca31231eb5e1a47a537c979
3,645,944
def get_author(search): """ Queries google scholar to find an author given a search string. If != 0 results are found it gives an error """ authors = list(scholarly.search_author(search)) if len(authors) > 1: raise ValueError(f'Found >1 authors with search string: {searc}, try something more specifc') elif not authors: raise ValueError(f'Could not find authors with search string: {search}') return authors[0].fill(sections=['basics', 'indices', 'publications'])
8fffd75f588194db0707ddd7249823fb73324549
3,645,945
import torch def _named_tensor_generic_operation( tensor: torch.Tensor, tensor_ops_pre: callable = dummy, tensor_ops_post: callable = dummy, name_ops: callable = dummy) -> torch.Tensor: """ generic base function used by others First store the names Args: tensor (): the named tensor to work on tensor_ops_pre (): the operation before the name is removed tensor_ops_post (): the operation after the name is removed that act on the tensor name_ops (): the operation to act on names Returns: """ # Save the names in names_old and then remove the names from the tensor tensor = tensor_ops_pre(tensor) names_old = tensor.names tensor = tensor.rename(None) # operations names_new = name_ops(names_old) # modify the names tensor = tensor_ops_post(tensor) # change the tensor accordingly return tensor.refine_names(*names_new)
8a343f2ab2c4aeaebcf64e8fc5e75cb3d8776241
3,645,946
def normalize_address_components(parsed_addr): # type: (MutableMapping[str, str]) -> MutableMapping[str, str] """Normalize parsed sections of address as appropriate. Processes parsed address through subsets of normalization rules. :param parsed_addr: address parsed into ordereddict per usaddress. :type parsed_addr:Mapping :return: parsed_addr with normalization processing applied to elements. :rtype: dict """ parsed_addr = normalize_numbered_streets(parsed_addr) parsed_addr = normalize_directionals(parsed_addr) parsed_addr = normalize_street_types(parsed_addr) parsed_addr = normalize_occupancy_type(parsed_addr) return parsed_addr
48730c85ee7930b27260b97a6ad876bcecf1b5cc
3,645,947
def is_key_in_store(loc, key): """ A quick check to determine whether the :class:`pandas.HDFStore` has datA for ``key`` :ARGS: loc: :class:`string` of path to :class:`pandas.HDFStore` key: :class:`string` of the ticker to check if currently available :RETURNS: whether ``key`` is currently a part of the data set """ try: store = pandas.HDFStore(path = loc, mode = 'r') except IOError: print loc + " is not a valid path to an HDFStore Object" return store_keys = store.keys() store.close() return key in map(lambda x: x.strip('/'), store_keys )
273bd534daa0f70831e77da88808033e4f1683eb
3,645,949
def transform_rows_nonlinear06(data, **kwargs): """ Nonlinear row transformation 06. 12 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), sin(2*pi*x), cos(pi*x), cos(2*pi*x), x^5, exp2, log10(x-x.min()), boxcox(2), boxcox(4), boxcox(6). """ sources_transformers = [ 1.0, lambda x: 0.5 * np.power((x+1), 2), lambda x: np.sin(np.pi * x), lambda x: np.sin(2.0 * np.pi * x), lambda x: np.cos(np.pi * x), lambda x: np.cos(2.0 * np.pi * x), lambda x: np.power(x, 5), lambda x: np.exp2(x), lambda x: np.log10(x + (-1.0 * x.min()) + 0.01), lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00), lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00), lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00), ] return _generic_data_transformation(data, sources_transformers, **kwargs)
dd51c838d9721fe310463fa8a0cdb0505e9c4f0f
3,645,950
def catch_parameter(opt): """Change the captured parameters names""" switch = {'-h': 'help', '-o': 'one_timestamp', '-a': 'activity', '-f': 'file_name', '-i': 'imp', '-l': 'lstm_act', '-d': 'dense_act', '-p': 'optim', '-n': 'norm_method', '-m': 'model_type', '-z': 'n_size', '-y': 'l_size', '-c': 'folder', '-b': 'model_file', '-x': 'is_single_exec', '-t': 'max_trace_size', '-e': 'splits', '-g': 'sub_group'} try: return switch[opt] except: raise Exception('Invalid option ' + opt)
ad3a25e3786b657947893f96a76e80f17eb3b0f0
3,645,951
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None): """ Fetches info about mgr pods in the cluster Args: mgr_label (str): label associated with mgr pods (default: defaults.MGR_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mgr pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] mgrs = get_pods_having_label(mgr_label, namespace) mgr_pods = [Pod(**mgr) for mgr in mgrs] return mgr_pods
8079d291d5e2b996547ecd615fbc00a8c70aa4e9
3,645,954
from typing import Union def normalise_architecture(architecture: Union[str, int]): """Convert any valid architecture alias to either 'x86_64' or 'i686'. Raise an error for invalid input. """ for (true_name, aliases) in architecture_aliases.items(): if architecture in aliases: return true_name raise ValueError( f"Invalid architecture {repr(architecture)}. " f"Legal 64 bit values are:\n {architecture_aliases['x86_64']}\n" f"And legal 32 bit values are:\n {architecture_aliases['i686']}\n" )
a7e99a2e8cc527028b82c7e628bd18f9c63c7f61
3,645,955
def process_m(filename, m, estimator): """Returns the list of file sizes and PSNR values for compression method m. """ filesize, psnr = [], [] for q in range(0, 101, 5): _size, _psnr = process_q(filename, q, m, estimator) filesize.append(_size / 1024) # in kilobyte(s) psnr.append(_psnr) return filesize, psnr
7bf1bbcdf31709393788006d8d5cd1bef3bf5509
3,645,956
def fmt(n): """format number with a space in front if it is single digit""" if n < 10: return " " + str(n) else: return str(n)
976acc22cafd6d6bdb4e251853f49a114b63ec21
3,645,957
def test_registration(): """Test registering a magic and getting a copy of it and de-registering.""" manager.MagicManager.clear_magics() def my_magic(cell=None, line=None): """This is a magic.""" if not cell: cell = 'foo' if not line: line = 'bar' return f'{cell}{line}' my_magic.magic_name = 'magical_function' my_magic.fn = my_magic manager.MagicManager.register_magic(my_magic) magic_from_manager = manager.MagicManager.get_magic('magical_function') assert magic_from_manager() == 'foobar' my_magic.magic_name = 'other_magic' def conditional(): return False manager.MagicManager.register_magic(my_magic, conditional=conditional) magic_from_manager = manager.MagicManager.get_magic('other_magic') assert magic_from_manager is None manager.MagicManager.register_magic(my_magic) magic_from_manager = manager.MagicManager.get_magic('other_magic') assert magic_from_manager() == 'foobar' manager.MagicManager.deregister_magic('other_magic') magic_from_manager = manager.MagicManager.get_magic('other_magic') assert magic_from_manager is None manager.MagicManager.deregister_magic('magical_function') magic_from_manager = manager.MagicManager.get_magic('magical_function') assert magic_from_manager is None with pytest.raises(KeyError): manager.MagicManager.deregister_magic('does_not_exist')
899573442f12e6e6544f32dcd472fd495eb9dc3b
3,645,958
def stop(): """Stop cleaning This is using docstrings for specifications. --- definitions: stop: type: object properties: did: type: string siid: type: integer aiid: type: integer code: type: integer out: type: array items: {} security: - Bearer: [] responses: 200: description: OK schema: $ref: '#/definitions/stop' 400: description: Bad Request 401: description: Unauthorized """ consoleOutput = ( popen("miiocli dreamevacuum --ip " + creds.ip + " --token " + creds.token + " play_sound") .read() .strip() .rstrip("\n") ) # 400 if consoleOutput.find("Error") != -1: return Response(response=consoleOutput.rstrip("\n"), status=400, mimetype="text/plain") # 200 result = consoleOutput.partition("\n")[2] print(result) if result.find("{'did'") != -1: return Response(response=result.replace("'", '"'), status=200, mimetype="application/json")
e9d7558f4433e73a92229fbf79628eb48357e12b
3,645,959
import json def save_key(access_key, output_filename=DEFAULT_ACCESS_KEY_FILE): """ saves access key to .yc json file """ with open(output_filename, "w+") as f: f.write(json.dumps(access_key, indent=4)) return output_filename
7f15a469ad9b74a39452d8bde46223ef214300d9
3,645,960
def PutObject(object_id: str): """Add/replace DRS object with a user-supplied ID. Args: object_id: Identifier of DRS object to be created/updated. Returns: Identifier of created/updated DRS object. """ return register_object( data=request.json, object_id=object_id, )
faf0aa633ef149c34f3fe0e80d8fdcc9df68dfec
3,645,961
def get_handler_name(method: str, url_path: str, path_params: dict): """ Возвращает имя необходимого хендлера для рефлексифного вызова метода :param method: Метод :param url_path: URL :param path_params: Параметры :return: """ handler = url_path.replace('/', '_') for key, value in path_params.items(): handler = handler.replace(value, key) return method.lower() + handler
e8060538a6bf73e6291ecbcbec14f11997a53507
3,645,962
import json def plot_AP(file_path: str): """ 绘制 AP 柱状图 """ with open(file_path, encoding='utf-8') as f: result = json.load(f) AP = [] classes = [] for k, v in result.items(): if k!='mAP': AP.append(v['AP']) classes.append(k) fig, ax = plt.subplots(1, 1, num='AP 柱状图') ax.barh(range(len(AP)), AP, height=0.6, tick_label=classes) ax.set(xlabel='AP', title=f'mAP: {result["mAP"]:.2%}') return fig, ax
7d691161d07d5f4a70c2b46b8971f54c93972a7b
3,645,964
def partner_data_ingest_new_files(source, destination): """ :param source : list of files to process: :param destination: destination to copy validated files check s3 path for new file, trigger partner_data_ingest for new files. """ hook = S3SyncHook(aws_conn_id="aws_default", verify=True) diff = hook.diff(source, destination) return partner_data_ingest(new_files=diff, destination=destination)
49685cac11c12af7aa3e2e9ecc152dc46f1b2c5e
3,645,965
import six def _mofval(value, indent, maxline, line_pos=0, end_space=0): """ Low level function that returns the MOF representation of a non-string value (i.e. a value that cannot not be split into multiple parts, for example a numeric or boolean value). If the MOF representation of the value does not fit into the remaining space of the current line, it is put into a new line, considering the specified indentation. NOTE: This method is derived from pywbem mofval but differs in that we want to output even if we violate the maxline limit on the new line. This method favors outputting data over exceptions. Parameters: value (:term:`unicode string`): The non-string value. Must not be `None`. indent (:term:`integer`): Number of spaces to indent any new lines that are generated. maxline (:term:`integer`): Maximum line length for the generated MOF. line_pos (:term:`integer`): Length of content already on the current line. end_space (:term:`integer`): Length of space to be left free on the last line. Returns: tuple of * :term:`unicode string`: MOF string. * new line_pos """ assert isinstance(value, six.text_type) # Check for output on current line # if fits or this is first entry on the line avl_len = maxline - line_pos - end_space if len(value) <= avl_len or line_pos == 0: line_pos += len(value) return value, line_pos mof_str = u'\n' + _indent_str(indent) + value line_pos = indent + len(value) return mof_str, line_pos
964e788a228ac88305fb8d82e7e9b9a4a8cd1a2f
3,645,966
def to_vector(texto,model,idf): """ Receives a sentence string along with a word embedding model and returns the vector representation of the sentence""" tokens = normalizer(texto).split() # splits the text by space and returns a list of words vec = np.zeros(300) # creates an empty vector of 300 dimensions for word in tokens: # iterates over the sentence if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model vec += model[word]*idf[word] # adds every word embedding to the vector if np.linalg.norm(vec) > 0: return vec / np.linalg.norm(vec) # divides the vector by their normal else: return vec
24f811110f9b6d9b0fc8a0f6ffcf2d37e1cd6feb
3,645,968
def evaluate_interval_detection(labels, predictions, event_val, def_val, seq_length, other_vals=[]): """Evaluate interval detection for sequences by calculating tp, fp, and fn. Extends the metric outlined by Kyritsis et al. (2019) in Modeling wrist micromovements to measure in-meal eating behavior from inertial sensor data https://ieeexplore.ieee.org/abstract/document/8606156/ by introducing additional possible events. Args: labels: The ground truth [batch_size, seq_length], encoding relevant sequences using the vals given in parameters. predictions: The predictions [batch_size, seq_length], encoding relevant sequences using the vals given in parameters. event_val: The value for true events. def_val: The default value for non-events. other_vals: List or 1-D tensor of vals for other events. seq_length: The sequence length. Returns: tp: True positives (number of true sequences of event_vals predicted with at least one predicting event_val) - scalar fp_1: False positives type 1 (number of excess predicting event_vals matching a true sequence of event_val in excess) - scalar fp_2: False positives type 2 (number of predicting event_vals matching def_val instead of event_val) - scalar fp_3: False positives type 3 (number of predicting event_vals matching other_vals instead of event_val) - 1D tensor with value for each element in other_vals fn: False negatives (number of true sequences of event_vals not matched by at least one predicting event_val) """ def sequence_masks(labels, event_val, def_val, batch_size, seq_length): """Generate masks [labels, max_seq_count, seq_length] for all event sequences in the labels""" # Mask non-event elements as False and event elements as True event_mask = tf.equal(labels, event_val) # Mask elements that are not equal to previous elements diff_mask = tf.not_equal(event_mask[:, 1:], event_mask[:, :-1]) prev_mask = tf.concat([tf.ones_like(labels[:, :1], tf.bool), diff_mask], axis=1) next_mask = tf.concat([diff_mask, tf.ones_like(labels[:, :1], tf.bool)], axis=1) # Test if there are no sequences empty = tf.equal(tf.reduce_sum(tf.cast(event_mask, tf.int32)), 0) # Mask sequence starts and ends seq_start_mask = tf.logical_and(prev_mask, event_mask) seq_end_mask = tf.logical_and(next_mask, event_mask) # Scatter seq_val seq_count_per_batch = tf.reduce_sum(tf.cast(seq_start_mask, tf.int32), axis=[1]) max_seq_count = tf.reduce_max(seq_count_per_batch) seq_val_idx_mask = tf.reshape(tf.sequence_mask(seq_count_per_batch, maxlen=max_seq_count), [-1]) seq_val_idx = tf.boolean_mask(tf.range(tf.size(seq_val_idx_mask)), seq_val_idx_mask) seq_vals = tf.boolean_mask(labels, seq_start_mask) seq_val = tf.scatter_nd( indices=tf.expand_dims(seq_val_idx, axis=1), updates=seq_vals, shape=tf.shape(seq_val_idx_mask)) seq_val = tf.reshape(seq_val, [batch_size, max_seq_count]) # Set elements of seq_val that are not event_val to def_val seq_val = tf.where( tf.not_equal(seq_val, tf.fill(tf.shape(seq_val), event_val)), x=tf.fill(tf.shape(seq_val), def_val), y=seq_val) # Scatter seq_start seq_start_idx = tf.where(seq_start_mask)[:,1] seq_start = tf.scatter_nd( indices=tf.expand_dims(seq_val_idx, axis=1), updates=seq_start_idx, shape=tf.shape(seq_val_idx_mask)) seq_start = tf.reshape(seq_start, [batch_size, max_seq_count]) # Scatter seq_end seq_end_idx = tf.where(seq_end_mask)[:,1] seq_end = tf.scatter_nd( indices=tf.expand_dims(seq_val_idx, axis=1), updates=seq_end_idx, shape=tf.shape(seq_val_idx_mask)) seq_end = tf.reshape(seq_end, [batch_size, max_seq_count]) def batch_seq_masks(starts, ends, length, vals, def_val): """Return seq masks for one batch""" def seq_mask(start, end, length, val, def_val): """Return one seq mask""" return tf.concat([ tf.fill([start], def_val), tf.fill([end-start+1], val), tf.fill([length-end-1], def_val)], axis=0) return tf.map_fn( fn=lambda x: seq_mask(x[0], x[1], length, x[2], def_val), elems=(starts, ends, vals), dtype=tf.int32) seq_masks = tf.cond(empty, lambda: tf.fill([batch_size, 0, seq_length], def_val), lambda: tf.map_fn( fn=lambda x: batch_seq_masks(x[0], x[1], seq_length, x[2], def_val), elems=(seq_start, seq_end, seq_val), dtype=tf.int32)) return seq_masks, max_seq_count labels = tf.cast(labels, dtype=tf.int32) predictions = tf.cast(predictions, dtype=tf.int32) def_val = tf.cast(def_val, dtype=tf.int32) event_val = tf.cast(event_val, dtype=tf.int32) # Dimensions batch_size = labels.get_shape()[0] # Compute whether labels are empty (no event_val sequences) event_mask = tf.equal(labels, event_val) empty = tf.equal(tf.reduce_sum(tf.cast(event_mask, tf.int32)), 0) # Derive positive ground truth mask; reshape to [n_gt_seq, seq_length] pos_mask, max_seq_count = sequence_masks(labels, event_val=event_val, def_val=def_val, batch_size=batch_size, seq_length=seq_length) pos_mask = tf.reshape(pos_mask, [-1, seq_length]) # Mask of default events def_mask = tf.equal(labels, def_val) # Masks for other events other_masks = tf.map_fn(fn=lambda x: tf.equal(labels, x), elems=tf.convert_to_tensor(other_vals, dtype=tf.int32), dtype=tf.bool) # Retain only event_val in predictions predictions = tf.where( tf.not_equal(predictions, tf.fill(tf.shape(predictions), event_val)), x=tf.fill(tf.shape(predictions), def_val), y=predictions) # Stack predictions accordingly pred_stacked = tf.reshape(tf.tile(tf.expand_dims(predictions, axis=1), [1, max_seq_count, 1]), [-1, seq_length]) # Remove empty masks and according preds keep_mask = tf.greater(tf.reduce_sum(tf.cast(tf.not_equal(pos_mask, def_val), tf.int32), axis=1), 0) pos_mask = tf.cond(empty, lambda: pos_mask, lambda: tf.boolean_mask(pos_mask, keep_mask)) pred_stacked = tf.cond(empty, lambda: pred_stacked, lambda: tf.boolean_mask(pred_stacked, keep_mask)) # Calculate number predictions per pos sequence # Reduce predictions to elements in pos_mask that equal event_val, then count them pred_sums = tf.map_fn( fn=lambda x: tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(x[0], tf.equal(x[1], event_val)), event_val), tf.int32)), elems=(pred_stacked, pos_mask), dtype=tf.int32) # Calculate true positive, false positive and false negative count tp = tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 0, lambda: 1, lambda: 0), pred_sums)) fn = tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 0, lambda: 0, lambda: 1), pred_sums)) fp_1 = tf.cond(empty, lambda: 0, lambda: tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 1, lambda: count-1, lambda: 0), pred_sums))) # False positives of type 2 are any detections on default events fp_2 = tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(predictions, def_mask), event_val), tf.int32)) # False positives of type 3 are any detections on other events fp_3 = tf.map_fn( fn=lambda x: tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(predictions, x), event_val), tf.int32)), elems=other_masks, dtype=tf.int32) tp = tf.cast(tp, tf.float32) fp_1 = tf.cast(fp_1, tf.float32) fp_2 = tf.cast(fp_2, tf.float32) fp_3 = tf.cast(fp_3, tf.float32) fn = tf.cast(fn, tf.float32) return tp, fp_1, fp_2, fp_3, fn
4149eaf357d28236077e9cfac7d7ed8ee113818c
3,645,969
def _div(v): """Pure spatial divergence""" return _div_id(np.vstack((v, [np.zeros_like(v[0])])), l1_ratio=0.)
706a97e4d5930067b6524210738fce5b27f407c5
3,645,970
def _always_run(*args, **kwargs) -> bool: """ This returns False to indicate that the step is not already completed. """ return False
db31e0ac20ac0eef410fb051928308ce7414f5b6
3,645,972
def generate_urls(search): """Generates a URLS in the correct format that brings to Google Image seearch page""" return [(BASE_URL+quote(word)+GOOGLE_PICTURE_ID) for word in search]
4d7d13cdf15fb3e029f11bb2e3f28920cf7c2f97
3,645,973
def batch_provider(data, batch_size, processor=None, worker_count=1, queue_size=16, report_progress=True): """ Return an object that produces a sequence of batches from input data Input data is split into batches of size :attr:`batch_size` which are processed with function :attr:`processor` Data is split and processed by separate threads and dumped into a queue allowing continuous provision of data. The main purpose of this primitive is to provide easy to use tool for parallel batch processing/generation in background while main thread runs the main algorithm. Batches are processed in parallel, allowing better utilization of CPU cores and disk that may improve GPU utilization for DL tasks with Storage/IO bottleneck. This primitive can be used in various ways. For small datasets, the input :attr:`data` list may contain actual dataset, while :attr:`processor` function does from small to no data processing. For larger datasets, :attr:`data` list may contain just filenames or keys while :attr:`processor` function reads data from disk or db. There are many purposes that function :attr:`processor` can be used for, depending on your use case. - Reading data from disk or db - Data decoding, e.g. from JPEG. - Augmenting data, flipping, rotating adding nose, etc. - Concatenation of data, stacking to single ndarray, conversion to a tensor, uploading to GPU. - Data generation. Note: Sequential order of batches is guaranteed only if number of workers is 1 (Default), otherwise batches might be supplied out of order. Args: data (list): Input data, each entry in the list should be a separate data point. batch_size (int): Size of a batch. If size of data is not divisible by :attr:`batch_size`, then the last batch will have smaller size. processor (Callable[[list], Any], optional): Function for processing batches. Receives slice of the :attr:`data` list as input. Can return object of any type. Defaults to None. worker_count (int, optional): Number of workers, should be greater or equal to one. To process data in parallel and fully load CPU :attr:`worker_count` should be close to the number of CPU cores. Defaults to one. queue_size (int, optional): Maximum size of the queue, which is number of batches to buffer. Should be larger than :attr:`worker_count`. Typically, one would want this to be as large as possible to amortize all disk IO and computational costs. Downside of large value is increased RAM consumption. Defaults to 16. report_progress (bool, optional): Print a progress bar similar to `tqdm`. You still may use `tqdm` if you set :attr:`report_progress` to False. To use `tqdm` just do :: for x in tqdm(batch_provider(...)): ... Defaults to True. Returns: Iterator: An object that produces a sequence of batches. :meth:`next()` method of the iterator will return object that was produced by :attr:`processor` function Raises: StopIteration: When all data was iterated through. Stops the for loop. Example: :: def process(batch): images = [misc.imread(x[0]) for x in batch] images = np.asarray(images, dtype=np.float32) images = images.transpose((0, 3, 1, 2)) labeles = [x[1] for x in batch] labeles = np.asarray(labeles, np.int) return torch.from_numpy(images) / 255.0, torch.from_numpy(labeles) data = [('some_list.jpg', 1), ('of_filenames.jpg', 2), ('etc.jpg', 4), ...] # filenames and labels batches = dlutils.batch_provider(data, 32, process) for images, labeles in batches: result = model(images) loss = F.nll_loss(result, labeles) loss.backward() optimizer.step() """ class State: def __init__(self): self.current_batch = 0 self.lock = Lock() self.data_len = len(data) self.batch_count = self.data_len // batch_size + (1 if self.data_len % batch_size != 0 else 0) self.quit_event = Event() self.queue = Queue(queue_size) self.batches_done_count = 0 self.progress_bar = None if report_progress: self.progress_bar = ProgressBar(self.batch_count) def get_next_batch_it(self): try: self.lock.acquire() if self.quit_event.is_set() or self.current_batch == self.batch_count: raise StopIteration cb = self.current_batch self.current_batch += 1 return cb finally: self.lock.release() def push_done_batch(self, batch): try: self.lock.acquire() self.queue.put(batch) self.batches_done_count += 1 finally: self.lock.release() def all_done(self): return self.batches_done_count == self.batch_count and self.queue.empty() if processor is None: def processor(x): return x def _worker(state): while not state.quit_event.is_set(): try: cb = state.get_next_batch_it() data_slice = data[cb * batch_size:min((cb + 1) * batch_size, state.data_len)] b = processor(data_slice) state.push_done_batch(b) except StopIteration: break class Iterator: def __init__(self): self.state = State() self.workers = [] for i in range(worker_count): worker = Thread(target=_worker, args=(self.state, )) worker.daemon = True worker.start() self.workers.append(worker) def __len__(self): return self.state.batch_count def __iter__(self): return self def __next__(self): if not self.state.quit_event.is_set() and not self.state.all_done(): item = self.state.queue.get() self.state.queue.task_done() if self.state.progress_bar is not None: self.state.progress_bar.increment() return item else: self.state.quit_event.set() raise StopIteration def __del__(self): self.state.quit_event.set() while not self.state.queue.empty(): self.state.queue.get(False) self.state.queue.task_done() for worker in self.workers: worker.join() return Iterator()
2760e9bc9977f4fcdc07624bf896d6b48ce1276d
3,645,974
import random def simplex(key, log_L_constraint, live_points_U, loglikelihood_from_constrained, prior_transform, sampler_state, replace_id): """ Samples from the prior restricted to the likelihood constraint. This undoes the shrinkage at each step to approximate a bound on the contours. First it does a scaling on each dimension. Args: key: log_L_constraint: live_points_U: loglikelihood_from_constrained: Returns: """ N,D = live_points_U.shape key, width_key = random.split(key, 2) def body(state): (key, i, u_test, x_test, log_L_test) = state key, sample_key, select_key, R_key = random.split(key, 4) i = random.randint(select_key, shape=(), minval=0, maxval=N + 1) # M,M R = random_ortho_matrix(R_key, D) # initial L, R for each direction # t_R[i] = max_(k) (points[k,j] - spawn_point_U[j]) @ R[j,i] # t_L[i] = max_(k) (points[k,j] - spawn_point_U[j]) @ -R[j,i] # N, M dx = live_points_U[sampler_state.knn_indices[i, :], :] - live_points_U[i, :] # [N, M] t = dx @ R # [M] t_R = jnp.maximum(jnp.max(t, axis=0), 0.) t_L = jnp.minimum(jnp.min(t, axis=0), 0.) u_test = live_points_U[i,:] + R @ random.uniform(sample_key, shape=[D], minval=t_L, maxval=t_R) u_test = jnp.clip(u_test, 0., 1.) x_test = prior_transform(u_test) log_L_test = loglikelihood_from_constrained(**x_test) return (key, i + 1, u_test, x_test, log_L_test) (key, num_likelihood_evaluations, u_new, x_new, log_L_new) = while_loop(lambda state: state[-1] <= log_L_constraint, body, (key, 0, live_points_U[0, :], prior_transform(live_points_U[0, :]), log_L_constraint)) new_dist = jnp.linalg.norm(u_new - dynamic_update_slice(live_points_U, u_new[None, :], [replace_id,0]), axis=1) new_dist = jnp.where(new_dist == 0., jnp.inf, new_dist) new_indices = jnp.argsort(new_dist)[:D+1] knn_indices = dynamic_update_slice(sampler_state.knn_indices, new_indices[None, :], [replace_id, 0]) sampler_state = sampler_state._replace(knn_indices=knn_indices) CubesResults = namedtuple('CubesResults', ['key', 'num_likelihood_evaluations', 'u_new', 'x_new', 'log_L_new', 'sampler_state']) return CubesResults(key, num_likelihood_evaluations, u_new, x_new, log_L_new, sampler_state)
f88038eca201b87fdc8f4b4722357a4eafd0366e
3,645,975
def has_anonymous_link(node, auth): """check if the node is anonymous to the user :param Node node: Node which the user wants to visit :param str link: any view-only link in the current url :return bool anonymous: Whether the node is anonymous to the user or not """ if auth.private_link: return auth.private_link.anonymous return False
c5941bce3f0110dfcd5e9bbb19bae0682c5e731f
3,645,976
def lstm(c_prev, x): """Long Short-Term Memory units as an activation function. This function implements LSTM units with forget gates. Let the previous cell state :math:`c_{\\text{prev}}` and the incoming signal :math:`x`. First, the incoming signal :math:`x` is split into four arrays :math:`a, i, f, o` of the same shapes along the second axis. It means that :math:`x` 's second axis must have 4 times the length of :math:`c_{\\text{prev}}`. The splitted input signals are corresponding to: - :math:`a` : sources of cell input - :math:`i` : sources of input gate - :math:`f` : sources of forget gate - :math:`o` : sources of output gate Second, it computes outputs as: .. math:: c &= \\tanh(a) \\text{sigmoid}(i) + c_{\\text{prev}} \\text{sigmoid}(f), \\\\ h &= \\tanh(c) \\text{sigmoid}(o). These are returned as a tuple of two variables. Args: c_prev (~chainer.Variable): Variable that holds the previous cell state. The cell state should be a zero array or the output of the previous call of LSTM. x (~chainer.Variable): Variable that holds the incoming signal. It must have the second dimension four times of that of the cell state, Returns: tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is the updated cell state. ``h`` indicates the outgoing signal. See the original paper proposing LSTM with forget gates: `Long Short-Term Memory in Recurrent Neural Networks \ <http://www.felixgers.de/papers/phd.pdf>`_. .. admonition:: Example Assuming ``y`` is the current input signal, ``c`` is the previous cell state, and ``h`` is the previous output signal from an ``lstm`` function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels. Most typical preparation of ``x`` is: >>> model = FunctionSet(w=F.Linear(n_units, 4 * n_units), ... v=F.Linear(n_units, 4 * n_units), ... ...) >>> x = model.w(y) + model.v(h) >>> c, h = F.lstm(c, x) It corresponds to calculate the input sources :math:`a, i, f, o` from the current input ``y`` and the previous output ``h``. Different parameters are used for different kind of input sources. """ return LSTM()(c_prev, x)
795fb92554c04be29a75f770fe0fb88d4224f94a
3,645,977
import torch import time def hals(video, video_factorization, maxiter_hals=30, nnt=False, verbose=False, indent='', device='cuda', **kwargs): """Perform maxiter HALS updates To Temporal & Spatial Components Parameter: video: LowRankVideo class object video_factorization: localized NMF factors maxiter_hals: maximum number of iterations to tune hals nnt: whether or not temporal components should be constrained to be nonnegative verbose: whether or not print status update indent: previous identation for printing status update device: computation device **kwargs: optional additional input arguments Return: hals iteration counter """ for itr in range(maxiter_hals): if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '|--v HALS Iteration {:g}'.format(itr+1)) itr_t0 = time() step_t0 = itr_t0 # Spatial Update Step video_factorization.update_spatial(video) if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Spatial update took {:g} seconds'.format(time()-step_t0)) step_t0 = itr_t0 # Remove Empty Components video_factorization.prune_empty_components() video_factorization.normalize_spatial() if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Component prune after spatial update took {:g} seconds'.format(time()-step_t0)) step_t0 = itr_t0 # Temporal Update Step video_factorization.update_temporal(video, nonnegative=nnt) if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Temporal update took {:g} seconds'.format(time()-step_t0)) print(indent + '| \'-total : {:g} seconds'.format(time()-itr_t0)) # Remove Empty Components video_factorization.prune_empty_components() if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Component prune after temporal update took {:g} seconds'.format(time()-step_t0)) step_t0 = itr_t0 return itr + 1
a993f9e434c3196110c0569cb124d9edbb794dec
3,645,978
def generate_random_sd(error, seq = None): """ generates random sd with error% error rate If seq is specified, random sd is generated from a substring of it.""" if seq == None: seq1 = randSeq(rand(minLen, maxLen)) else: length = rand(minLen, maxLen) start = rand(0, len(seq) - length - 1) seq1 = seq[start:start + length] sED = rand(max(0, error - maxLED),min(maxSED, error)) seq2 = makeSmall(seq1, sED)[0] seq2 = makeLarge(seq2, error-sED)[0] return seq1, seq2, sED
34694895cd37e5714666c3f9f80ae5a010310d3c
3,645,979
def is_successful(gsm_log): """ Success is defined as having converged to a transition state. """ with open(gsm_log) as f: for line in reversed(f.readlines()): if '-XTS-' in line or '-TS-' in line: return True return False
9bab6837c8e6b818cceb025c5df9aed78074edcf
3,645,980
def indicator(function_array_to_be_indicated, its_domain, barrier): """the indicator influences the function argument, not value. So here it iterates through x-domain and cuts any values of function with an argument less than H""" indicated = [] for index in range(len(its_domain)): if its_domain[index] > barrier: indicated.append(function_array_to_be_indicated[index]) else: indicated.append(0) return indicated
440f423b7b25b0d152bc691acd3d7dea6c785aed
3,645,981
def _causes_name_clash(candidate, path_list, allowed_occurences=1): """Determine if candidate leads to a name clash. Args: candidate (tuple): Tuple with parts of a path. path_list (list): List of pathlib.Paths. allowed_occurences (int): How often a name can occur before we call it a clash. Returns: bool """ duplicate_counter = -allowed_occurences for path in path_list: parts = tuple(reversed(path.parts)) if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate: duplicate_counter += 1 return duplicate_counter > 0
3b874e4ea6d8780483100e464e3325321c82689e
3,645,983
def run_eqm(results: Results, options: Options, state: PromisedObject) -> dict: """Run the eqm jobs.""" # set user-defined valuess results['job_opts_eqm'] = edit_calculator_options( options, ['eqm', 'xtpdft', 'esp2multipole']) cmd_eqm_write = create_promise_command( "xtp_parallel -e eqm -o {} -f {} -s 0 -j write", results['job_opts_eqm']['eqm'], state) results['job_setup_eqm'] = call_xtp_cmd( cmd_eqm_write, options.scratch_dir, expected_output={"eqm_jobs": "eqm.jobs"}) # Select the number of jobs to run based on the input provided by the user results['job_select_eqm_jobs'] = edit_jobs_file( results['job_setup_eqm']['eqm_jobs'], options.eqm_jobs) jobs_eqm = distribute_eqm_jobs(results, options, state) # Finally move all the OR_FILES to the same folder in the scratch_dir names = ('molecule_orb', 'dft_orb', 'mps_file') return move_results_to_workdir(jobs_eqm, names, options.scratch_dir)
941d51a0de22dd4d66f5de68fc315bf318d112cf
3,645,984
def is_within_boundary(boundary_right_most_x, boundary_top_most_y, boundary_left_most_x, boundary_bottom_most_y, cursor): """ Checks if cursor is within given boundary :param boundary_right_most_x: :param boundary_top_most_y: :param boundary_left_most_x: :param boundary_bottom_most_y: :param cursor: :return: boolean """ if cursor.y < boundary_top_most_y: return False elif cursor.y > boundary_bottom_most_y: return False elif cursor.x < boundary_left_most_x: return False elif cursor.x > boundary_right_most_x: return False return True
d53106c9d525eb1bb51cfe4c30bc7e143ac6a517
3,645,985
def save_matchmaking_auth_key(auth_key: str) -> bool: """Register a new matchmaking auth key. !This will overwrite the existing matchmaking key for this chain! Args: auth_key: auth_key to add for matchmaking Returns: Boolean if successful """ try: redis.set_sync(MATCHMAKING_KEY_LOCATION, auth_key) return True except Exception: return False
5f18614f2b2950a942e7a98773911b7f58aabd74
3,645,987
import requests from bs4 import BeautifulSoup def get_game_page(url): """ Get the HTML for a given URL, where the URL is a game's page in the Xbox Store """ try: response = requests.get(url) except (requests.exceptions.MissingSchema, ConnectionError): return None game_page = BeautifulSoup(response.content, "html.parser") return game_page
40d578ce8cda0b5139515e03f8308911169e0442
3,645,988
from typing import List import logging import itertools import asyncio async def generate_spam_round_tx_xdrs(pool, prioritizers: List[Keypair], prioritized_builders, unprioritized_builders, rnd): """Generate transaction XDRs for a single spam round (ledger) according to given builders. Some of the generated transactions are prioritized using given prioritizer seeds, and some are unprioritized and not signed by a prioritizer account. All prioritized transactions are expected to be included in the next ledger. Only one out of all unprioritized transactions is expected to be included in the next ledger. Return a metadata dictionary with the generated XDRs along with additional information. """ logging.info('generating transaction xdrs for round %d', rnd) # make a cyclic list of builders. # we will use this list to fetch a destination address for each payment tx, # making all builders send a tx to the next builder right after them # in line in a cyclic manner. this is done in order to cycle through # destination addresses instead of sending call txs to a single destination # account. cycl = itertools.cycle(unprioritized_builders) next(cycl) # make sure the next cycle call will return the next builder after the current one # generate unprioritized payment transactions # we generate them first, thus will submit them first, # because we want to test if prioritized transactions actually get priority over them loop = asyncio.get_running_loop() futurs = [] for builder in unprioritized_builders: dest_address = next(cycl).keypair.address().decode() f = loop.run_in_executor( pool, build_and_sign, builder, dest_address, PAYMENT_AMOUNT, None) futurs.append(f) if not futurs: raise RuntimeError('no futures to gather') tx_metadata = {} for tx_hash, tx_xdr in await asyncio.gather(*futurs): tx_metadata[tx_hash] = {'round': rnd, 'prioritized': False, 'xdr': tx_xdr} # generate prioritized transactions futurs = [] cycl = itertools.cycle(prioritized_builders) for builder, prioritizer in zip(prioritized_builders, prioritizers): dest_address = next(cycl).keypair.address().decode() f = loop.run_in_executor( pool, build_and_sign, builder, dest_address, PAYMENT_AMOUNT, prioritizer.secret_seed) futurs.append(f) if not futurs: raise RuntimeError('no futures to gather') for tx_hash, tx_xdr in await asyncio.gather(*futurs): tx_metadata[tx_hash] = {'round': rnd, 'prioritized': True, 'xdr': tx_xdr} return tx_metadata
7e33455718f6c99ccb9fc1ad6a7c3de47964ec98
3,645,989
import collections from datetime import datetime def json_custom_parser(obj): """ A custom json parser to handle json.dumps calls properly for Decimal and Datetime data types. """ if isinstance(obj, Decimal): return float(obj) elif not isinstance(obj, basestring) and isinstance(obj, collections.Iterable): return list(obj) elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date): dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.') return obj.isoformat()[:dot_ix] else: raise TypeError(obj)
fb5d14b4416df4540ed3091dcf229aa7b037003d
3,645,990
def fs_open(path, flag, mode=default_file_mode): """ Open a file, potentially creating it. Return the new fd's id or else -1 if file can not be opened (or potentially created) """ # Check if file should be created if it doesn't exist O_CREAT = 64 create = flag & 64 # If requested, try to create the file if create: try: filesys.add_file(path, mode, 0) except AlreadyExistsError: # File may already exist, which is ok with O_CREAT pass except Exception: return -1 # Call the virtual fs to open the file try: inodeid = filesys.open_file(path) except DoesNotExistError: return -1 # Add an fd for this file to the open files state return fstate.create_fd(inodeid)
218940a6fc14c47f7a3df6d9a4e1bbc971b6b0b5
3,645,992
def new_user(): """ Create Instance of User class to be used by the module """ user_details = ['Daudi', 'Jesee', 'dj@mail.com', 'password'] user = Users(user_details) return user
4d5b2c4cad858113fceef150143b9688488000f4
3,645,993
import math def normalise_angle(angle: float) -> float: """Normalises the angle in the range (-pi, pi]. args: angle (rad): The angle to normalise. return: angle (rad): The normalised angle. """ while angle > math.pi: angle -= 2 * math.pi while angle <= -math.pi: angle += 2 * math.pi return angle
0a4cfa6e9da58bfdbb6cd4a04e7a742e8c432002
3,645,994
def get_hdf_len(*path): """ Returns the number of rows in an hdf file as an int. """ path = construct_path(*path) with pd.HDFStore(path) as store: numrows = store.get_storer('data').nrows return numrows
ad188b2733612e7ed1950a2df0ef5164f9cda021
3,645,995
def matmul(A, B, transpose_A=False, transpose_B=False, master='/gpu:0'): """ distributed matrix multiplication. A: DistMat, B: single tensor or a list of tensors. Note: returns a single tensor or a list of tensors, Not a DistMat. """ if isinstance(A, tf.Tensor) or isinstance(A, tf.Variable): if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable): return tf.matmul(A, B) else: raise NotImplementedError if transpose_B: raise NotImplementedError else: if transpose_A: # distributed dim is inner axis if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable): # broadcast partial_sums = [] for i, t in enumerate(A.tensors): with tf.device(t.device): partial_sums.append(tf.matmul(t, B[A.partition[i]:A.partition[i+1],:], transpose_a=True)) with tf.device(master): return tf.add_n(partial_sums) else: partial_sums = [] for t_A, t_B in zip(A.tensors, B.tensors): #print(t_A.device) #print(t_B.device) #assert t_A.device == t_B.device with tf.device(t_A.device): partial_sums.append(tf.matmul(t_A, t_B, transpose_a=True)) with tf.device(master): return tf.add_n(partial_sums) # distributed computation necessary #return tf.add_n([tf.matmul(Apart, Bpart) for Apart, Bpart in zip(A.tensors, B.tensors)]) else: # non-distributed dim is inner axis. merely broacast B. if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable): slices = [] for t in A.tensors: with tf.device(t.device): slices.append(tf.matmul(t, B)) return distmat.DistMat(slices) else: raise NotImplementedError
268068cd73b56ef747142ebc2df839d124d406d5
3,645,996
def celerybeat_started(): """ Returns true/false depending on whether the celerybeat service is started or not """ if is_systemd(): running = 'active' in fabric.api.sudo('systemctl is-active %s' % celerybeat_service_name()) return running return fabtools.service.is_running(celerybeat_service_name())
b3578b6dbe91b9a16342c53c488fe01fc37275cd
3,645,997
def highest_greedy_score(board, disks): """ Compute the highest possible score that can be obtained by dropping each of the given disks on the given board in a greedy way. - The disks must be dropped in the order in which they appear in the given list of disks. Each disk is dropped in the best column as computed by the function best_drop_for_disk. - Upon exit from the function, the board reflects the state obtained from dropping the disks. If not all the given disks can be dropped because the board gets completely filled, the function only drops the disks it can drop. - The function returns a tuple of (1) the highest score followed by (2) a tuple of columns in which the successive disks have been dropped. - Upon return, the given list of disks only stores disks that have not been dropped on the board. - The function will not take into account possible raises of level while dropping disks, i.e. the resulting score only reflects scores obtained from dropping disks as computed by the function drop_disk_at. - This function must be implemented in a RECURSIVE way. ASSUMPTIONS - The given board is a playable board, and each of the given disks is a proper disk for the given board. - None of the given disks is cracked. """ score = 0 columns = () if len(disks) == 0 or Board.is_full(board): # No more disks to drop return score, columns else: disk_to_drop = disks[0] column_best_drop, score_best_drop = best_drop_for_disk(board, disk_to_drop) del disks[0] score, columns = highest_greedy_score(board, disks) columns = (column_best_drop,) + columns score += score_best_drop return score, columns
22fc70db81d6051158bdb9bf80a42d81a215dba1
3,645,998
def normalize_and_discard(df: pd.DataFrame) -> pd.DataFrame: """ Normalize numeric values between 0 and 1 and discard records that are out of bounds. """ # ## 2. Discard values out of range of x and y df_cleaned = df[(df.x >= 0) & (df.x <= 120) & (df.y >= 0) & (df.y <= (160 / 3))] print(f'Shape difference {df.shape[0] - df_cleaned.shape[0]}') # ## 3. Normalize x, y , s, a, dis, o, dir on scale 0-1 # thresholds are determined by examining data from all weeks df_cleaned.x = df_cleaned.x / df.x.max() df_cleaned.y = df_cleaned.y / df.y.max() df_cleaned.s = df_cleaned.s / SPEED_MAX_THRESHOLD df_cleaned.a = df_cleaned.a / ACCELERATION_MAX_THRESHOLD df_cleaned.dis = df_cleaned.dis / DISTANCE_MAX_THRESHOLD df_cleaned.o = df_cleaned.o / 360 df_cleaned.dir = df_cleaned.dir / 360 df_n2 = df_cleaned[[ 'time', 'x', 'y', 's', 'a', 'dis', 'o', 'dir', 'event', 'frameId', 'team', 'gameId', 'playId', 'quarter', 'homeHasPossession', 'down', 'playType', 'defendersInTheBox', 'numberOfPassRushers', 'passResult', 'isDefensivePI' ]] df_n2.quarter /= 5.0 # max quarters df_n2.down /= 4.0 # max quarters df_n2.defendersInTheBox /= 11.0 df_n2.numberOfPassRushers /= 11.0 return df_n2
0b1a1e6ed76c72797cf7b3f65058592c6ec95b03
3,645,999
def gravatar_url(email, size=16): """Return the gravatar image for the given email address.""" return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \ (md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
d3e24e1898d41df791368e7909461135c8118f90
3,646,000
def x2bin(v): """ convert a value into a binary string v: int, bytes, bytearray bytes, bytearray must be in *big* endian. """ if isinstance(v, int): bits = bin(v) size = 8 elif isinstance(v, (bytes,bytearray)): bits = bin(int.from_bytes(v, "big")) size = len(v)*8 return bits[2:].zfill(size)
fcb4f1ab05b5a3878939c84074e70fc2d5ee6397
3,646,001
import re def normalize_url(url): """Function to normalize the url. It will be used as document id value. Returns: the normalized url string. """ norm_url = re.sub(r'http://', '', url) norm_url = re.sub(r'https://', '', norm_url) norm_url = re.sub(r'/', '__', norm_url) return norm_url
79197b9fa1c47da601bdb9c34d626d236b649173
3,646,002
import fsspec def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: """ Validates if filesystem has remote protocol. Args: fs (``fsspec.spec.AbstractFileSystem``): An abstract super-class for pythonic file-systems, e.g. :code:`fsspec.filesystem(\'file\')` or :class:`datasets.filesystems.S3FileSystem` """ if fs is not None and fs.protocol != "file": return True else: return False
c40f9bb4845bbd1fc1a4cf9fce2c1b366cd22354
3,646,003
def get_element_attribute_or_empty(element, attribute_name): """ Args: element (element): The xib's element. attribute_name (str): The desired attribute's name. Returns: The attribute's value, or an empty str if none exists. """ return element.attributes[attribute_name].value if element.hasAttribute(attribute_name) else ""
dbc7f5c24d321c40b46f1c78950d7cf254719b5c
3,646,004
def Matcher(y_true, y_pred_logits, y_pred_bbox): """ y_true: GT list of len batch with each element is an array of shape (n_gt_objects, 5) ; n_gt_objects are number of objects in that image sample and 5 -> (cx,cy,w,h,class_label) where cordinates are in [0,1] range y_pred_logits: model output of shape (batch, num_queries, classes) y_pred_bbox: model output of shape (batch, num_queries, 4) in [0,1] range -> cx,cy,w,h """ y_pred_bbox = y_pred_bbox.numpy() out_loss = 0 batch = len(y_true) b,num_queries,_ = y_pred_logits.shape assert b == batch, 'Batch mismatch!!' batch_query_indices = [] y_pred_logits = tf.math.softmax(y_pred_logits).numpy() for i in range(batch): out_cls_loss = -y_pred_logits[i][:,(y_true[i][:,-1]).astype(int)] out_cdist = distance.cdist(y_pred_bbox[i], y_true[i][:,:4], 'euclidean') out_iou = [] for j in range(len(y_true[i])): giou = tfa.losses.giou_loss(cxcywh_to_xyxy(y_pred_bbox[i]), cxcywh_to_xyxy(y_true[i][j,:4][np.newaxis,:])) out_iou.append(giou) out_iou = -np.array(out_iou).transpose(1,0) comb_loss = out_cls_loss + out_cdist + out_iou row_ind, col_ind = linear_sum_assignment(comb_loss) batch_query_indices.append((row_ind,col_ind)) return batch_query_indices
0918becd40feca73a54ce158a3cb86946cb377ff
3,646,005
import warnings def _addBindInput(self, name, type = DEFAULT_TYPE_STRING): """(Deprecated) Add a BindInput to this shader reference.""" warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return self.addInput(name, type)
79404122d895814f64000876e8f926ecc7d54e3e
3,646,007
import mpmath def sf(x, c, d, scale): """ Survival function of the Burr type XII distribution. """ _validate_params(c, d, scale) with mpmath.extradps(5): x = mpmath.mpf(x) c = mpmath.mpf(c) d = mpmath.mpf(d) scale = mpmath.mpf(scale) if x < 0: return mpmath.mp.one return (1 + (x/scale)**c)**(-d)
2e2649eeb4d32739027eb6ad5a0f3b8c50f7e341
3,646,008
def substitute_word(text): """ word subsitution to make it consistent """ words = text.split(" ") preprocessed = [] for w in words: substitution = "" if w == "mister": substitution = "mr" elif w == "missus": substitution = "mrs" else: substitution = w preprocessed.append(substitution) return " ".join(preprocessed)
0709f4223cb06ddfdc5e9704048f418f275429d1
3,646,009
import re import requests from bs4 import BeautifulSoup def google_wiki(keyword, langid='en', js={}): """Google query targets, output if English wikipedia entry is found""" targets = [] headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0',} googlerx = re.compile('(http[s]?[^\&]*)') # /url?q=https://fr.wikipedia.org/wiki/La_Banque_postale&sa=U&ei=Zn... infoboxrx = re.compile('infobox') domainrx = re.compile('^[a-zA-Z\-]+\.([a-zA-Z\-]+\.)*[a-zA-Z\-]+$') # query = 'http://www.google.com/search?q=wikipedia%20{}%20{}'.format(langid, keyword) query = 'http://www.google.com/search?q=wikipedia%20{}'.format(keyword) r = requests.get(query, headers=headers) soup = BeautifulSoup(r.content) keywords = extract_keywords(js) # phish_tokens = set([word for li in keywords for word in li]) # print(phish_tokens) for a in soup.find_all('a'): search = googlerx.search(a.get('href', '')) if not search: continue url = search.groups()[0] mld, rd = registered_domain(url) if rd == 'wikipedia.org' and '#' not in url: # if '.wikipedia.org' in url and '#' not in url: # if url.startswith('https://{}.wikipedia.org'.format(langid)) and '#' not in url: wikiurl = url r = requests.get(url) html = str(r.content) wikisoup = BeautifulSoup(r.content) title = wikisoup.find(id="firstHeading") title = title.text if not title or keyword not in title.lower(): continue print(wikiurl) infobox = wikisoup.find(class_=infoboxrx) if infobox: for anchor in infobox.find_all('a'): if 'href' in anchor.attrs: targeturl = anchor['href'] # is the link internal if targeturl.startswith('/'): continue reg_domain = registered_domain(targeturl)[1] if reg_domain: t = (title, reg_domain, wikiurl) print(reg_domain) targets.append(t) external_links = wikisoup.find_all('a', class_="external text") external_domains = set() for anchor in external_links.find_all('a'): if 'href' in anchor.attrs: targeturl = anchor['href'] # is the link internal if targeturl.startswith('/'): continue reg_domain = registered_domain(targeturl)[1] if reg_domain: external_domains.add((title, reg_domain, wiki_url)) return targets, sorted(external_domains)
23a449b0b8825d043d1ca3722cba4c373fcc5c3c
3,646,010
def escape(line, chars): """Escapes characters 'chars' with '\\' in 'line'.""" def esc_one_char(ch): if ch in chars: return "\\" + ch else: return ch return u"".join([esc_one_char(ch) for ch in line])
f69409c92eacbbcab4232f7bb0ee244c77a4f219
3,646,012