content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import requests def _getPVGIS(lat, lon): """ This function uses the non-interactive version of PVGIS to extract a tmy dataset to be used to predict VRE yields for future periods. ------ inputs ------ Latitude, in decimal degrees, south is negative. Longitude, in decimal degrees, west is negative. ------- returns ------- tmy as dataframe with datetime as index, containing 9 timeseries Temperature, humidity, global horizontal, beam normal, diffuse horizontal, infrared horizontal, wind speed, wind direction and pressure. From PVGIS [https://ec.europa.eu/jrc/en/PVGIS/tools/tmy] "A typical meteorological year (TMY) is a set of meteorological data with data values for every hour in a year for a given geographical location. The data are selected from hourly data in a longer time period (normally 10 years or more). The TMY is generated in PVGIS following the procedure described in ISO 15927-4. The solar radiation database (DB) used is the default DB for the given location, either PVGIS-SARAH, PVGIS-NSRDB or PVGIS-ERA5. The other meteorogical variables are obtained from the ERA-Inteirm reanalysis." """ outputformat = "json" request_url = f"https://re.jrc.ec.europa.eu/api/tmy?lat={lat}&lon={lon}&outputformat={outputformat}" response = requests.get(request_url) if not response.status_code == 200: raise ValueError("API get request not succesfull, check your input") # store to private df df = pd.DataFrame(response.json()['outputs']['tmy_hourly']) # send to private function to set the date column as index with parser tmy = _tmy_dateparser(df) # for dataframe off-line / in-session storage tmy['lat'] = lat tmy['lon'] = lon tmy.columns = ['T', *tmy.columns[1:6].values, 'WS', 'WD', 'SP', 'lat', 'lon'] return tmy
e4d47cb3efab61bae1e5d38a87c642c687176ed3
3,641,559
def get_metric_key_samples(metricDict, metricNames, keyVal="means"): """ Returns a dictionary of samples for the given metric name, but only extracts the samples for the given key Args: metricDict (dict): Dictionary of sampled metrics metricNames (list): Names of the keys of the metric to return keyVal (str): The value of the key for which data is to be extracted. Must be one of {"mins", "maxs", "means", "vars"} Returns: Dictionary of samples of the given {"mins", "maxs", "means", "vars", "sums"} """ assert keyVal in ["mins", "maxs", "means", "vars", "sums"] retDict = get_metric_samples(metricDict, metricNames) for key in retDict: retDict[key] = retDict[key][keyVal] return retDict
f6b2bb32218654d90404812654623580ab4425df
3,641,560
import requests def swapi_films(episode): """ Gets the films listed in the api. :param episode: :return: response json """ response = requests.get(SWAPI_API + 'films/' + str(episode)) return response
fab283eeb2c96db1e509d4262fed79f7f4652fca
3,641,562
def prepare_qualifications(request, bids=[], lotId=None): """ creates Qualification for each Bid """ new_qualifications = [] tender = request.validated["tender"] if not bids: bids = tender.bids if tender.lots: active_lots = [lot.id for lot in tender.lots if lot.status == "active"] for bid in bids: if bid.status not in ["invalid", "deleted"]: for lotValue in bid.lotValues: if lotValue.status == "pending" and lotValue.relatedLot in active_lots: if lotId: if lotValue.relatedLot == lotId: qualification = Qualification({"bidID": bid.id, "status": "pending", "lotID": lotId}) qualification.date = get_now() tender.qualifications.append(qualification) new_qualifications.append(qualification.id) else: qualification = Qualification( {"bidID": bid.id, "status": "pending", "lotID": lotValue.relatedLot} ) qualification.date = get_now() tender.qualifications.append(qualification) new_qualifications.append(qualification.id) else: for bid in bids: if bid.status == "pending": qualification = Qualification({"bidID": bid.id, "status": "pending"}) qualification.date = get_now() tender.qualifications.append(qualification) new_qualifications.append(qualification.id) return new_qualifications
53399716f029d4b7bebc45ddef8e6f39272e33d1
3,641,563
def int_format(x): """ Format an integer: - upcast to a (u)int64 - determine buffer size - use snprintf """ x = upcast(x) buf = flypy.runtime.obj.core.newbuffer(flypy.types.char, ndigits(x) + 1) formatting.sprintf(buf, getformat(x), x) return flypy.types.String(buf)
363b4998bca8c45eb6a5a3b825270ce48bbb237e
3,641,564
import re def pyccparser2cbmc(srcfile, libs): """ Transforms the result of a parsed file from pycparser to a valid cbmc input. """ fd = open(srcfile, "r") src = fd.read() fd.close() # Replace the definition of __VERIFIER_error with the one for CBMC if "extern void __VERIFIER_error();" in src: # print "__VERIFIER_error found" pos = re.search("extern void __VERIFIER_error\(\);", src).pos # print "position: " + str(pos) vererr = "extern void __VERIFIER_error() __attribute__ ((__noreturn__));" + '\n' src = re.sub("extern void __VERIFIER_error\(\);", vererr, src) # Remove the strip lines with original libs if "_____STARTSTRIPPINGFROMHERE_____" in src: # print "_____STARTSTRIPPINGFROMHERE_____ found" pos = src.find("typedef int _____STARTSTRIPPINGFROMHERE_____;", 0, len(src) ) # print "position: " + str(pos) libstr = "" for lib in reversed(libs): libstr += '#include <' + lib + '>' + '\n' src = src[:pos] + libstr + '\n' + src[pos:] src = strip(src) newfile = srcfile + "_cbmc.c" fd = open(newfile, "w") fd.write(src) fd.close() return newfile
499208680da71382d652d655a95c227d29129ee5
3,641,565
import dill import base64 def check_finished(worker, exec_id): """ :param worker: :param exec_id: :return: """ result = worker.status(exec_id) status = dill.loads(base64.b64decode(result.data)) if status["status"] == "FAILED": raise Exception("Remote job execution failed") elif status["status"] == "INVALID ID": raise Exception("Invalid Id") elif status["status"] == "COMPLETED": return True, status else: return False, status
285090fd0fcdfce6964aa43f4af0fae836175ab1
3,641,566
def round_filters(filters, global_params): """ Calculate and round number of filters based on depth multiplier. """ multiplier = global_params.width_coefficient if not multiplier: return filters divisor = global_params.depth_divisor min_depth = global_params.min_depth filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters)
b39ca8a0b77ae1c134983e20725297fa6bccdac8
3,641,567
def admin_user_detail(): """管理员信息编辑详情页""" if not g.user.is_admin: return redirect('/') if request.method == 'GET': # 获取参数 admin_id = request.args.get('admin_id') if not admin_id: abort(404) try: admin_id = int(admin_id) except Exception as e: current_app.logger.error(e) return render_template('admin/admin_text_edit.html', data={"errmsg": "参数错误"}) # 通过id查询新闻 admin_user_dict = None try: admin_user_dict = User.query.get(admin_id) except Exception as e: current_app.logger.error(e) if not admin_user_dict: return render_template('admin/admin_text_edit.html', data={"errmsg": "未查询到此配置信息"}) # 返回数据 data = { "admin_user_dict": admin_user_dict.to_dict(), } return render_template('admin/admin_user_detail.html', data=data) # 获取post请求参数 admin_id = request.form.get("admin_id") nick_name = request.form.get("nick_name") password = request.form.get("password") mobile = request.form.get("mobile") signature = request.form.get("signature") gender = request.form.get("gender") avatar_url = request.files.get("avatar_url") # 1.1 判断数据是否有值 if not all([nick_name, admin_id, mobile, gender]): return jsonify(errno=RET.PARAMERR, errmsg="参数有误") # 查询指定id的新闻 try: user = User.query.get(admin_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.PARAMERR, errmsg="参数错误") if not user: return jsonify(errno=RET.NODATA, errmsg="未查询到新闻数据") # 1.2 尝试读取图片 if avatar_url: try: wxcode_image = avatar_url.read() except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.PARAMERR, errmsg="参数有误") # 2. 将标题图片上传到七牛 try: key = storage(wxcode_image) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误") user.avatar_url = constants.QINIU_DOMIN_PREFIX + key if password: user.password = password # 3. 设置相关数据 user.nick_name = nick_name user.mobile = mobile user.signature = signature user.gender = gender return jsonify(errno=RET.OK, errmsg='OK')
2b8ec2201688d0e5fcc49e77fd1a238413d259e3
3,641,568
def splitBinNum(binNum): """Split an alternate block number into latitude and longitude parts. Args: binNum (int): Alternative block number Returns: :tuple Tuple: 1. (int) Latitude portion of the alternate block number. Example: ``614123`` => ``614`` 2. (int) Longitude portion of the alternate block number. Example: ``614123`` => ``123`` """ latBin = int(binNum / 1000) longBin = binNum - (latBin * 1000) return (latBin, longBin)
da9b9cc67d592e73da842f4b686c0d16985f3457
3,641,569
def load_model_from_params_file(model): """ case 0: CHECKPOINT.CONVERT_MODEL = True: Convert the model case 1: CHECKPOINT.RESUME = False and TRAIN.PARAMS_FILE is not none: load params_file case 2: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is not none: case 2a: if checkpoint exist: use checkpoint case 2b: if checkpoint not exist: use params_file case 3: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is none: case 3a: if checkpoint exist: use checkpoint case 3b: if checkpoint not exist: set start_model_iter = 0 """ use_checkpoint = cfg.CHECKPOINT.RESUME and find_checkpoint() logger.info("Resume training: {}". format(cfg.CHECKPOINT.RESUME)) if cfg.TRAIN.PARAMS_FILE and cfg.CHECKPOINT.CONVERT_MODEL: # After convert model, should use affine layer assert(cfg.MODEL.USE_AFFINE) converted_checkpoint = convert_model(cfg.TRAIN.PARAMS_FILE) logger.info('Checkpoint model converted') cfg.TRAIN.PARAMS_FILE = converted_checkpoint if cfg.TRAIN.PARAMS_FILE and not use_checkpoint: logger.info('Initializing from pre-trained file...') start_model_iter, prev_lr = initialize_params_from_file( model=model, weights_file=cfg.TRAIN.PARAMS_FILE, load_momentum=False, # We don't load momentum if it is pretrained. ) logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format( start_model_iter, prev_lr)) model.current_lr = prev_lr # Correct start_model_iter if pretraining uses a different batch size # (mainly used for 1-node warmup). if cfg.TRAIN.RESUME_FROM_BATCH_SIZE > 0: start_model_iter = misc.resume_from(start_model_iter) # If we only want the weights. if cfg.TRAIN.RESET_START_ITER: start_model_iter = 0 elif use_checkpoint: logger.info('Initializing from checkpoints...') start_model_iter, prev_lr = initialize_params_from_file( model=model, weights_file=get_checkpoint_resume_file()) logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format( start_model_iter, prev_lr)) model.current_lr = prev_lr else: start_model_iter = 0 logger.info('No checkpoint found; training from scratch...') return start_model_iter
4f7c862829135e8b01038c6c9a540aeb1f55e285
3,641,570
def getPool(pool_type='avg', gmp_lambda=1e3, lse_r=10): """ # NOTE: this function is not used in writer_ident, s. constructor of # ResNet50Encoder params pool_type: the allowed pool types gmp_lambda: the initial regularization parameter for GMP lse_r: the initial regularization parameter for LSE """ if pool_type == 'gmp': pool_layer = GMP(lamb=gmp_lambda) elif pool_type == 'avg': pool_layer = nn.AdaptiveAvgPool2d(1) elif pool_type == 'max': pool_layer = nn.AdaptiveMaxPool2d(1) elif pool_type == 'mixed-pool': pool_layer = MixedPool(0.5) elif pool_type == 'lse': pool_layer = LSEPool(lse_r) else: raise RuntimeError('{} is not a valid pooling' ' strategy.'.format(pool_type)) return pool_layer
751bd851d57d37f7cf0749ba2183b67d59722c83
3,641,571
def draw_transperency(image, mask, color_f, color_b): """ image (np.uint8) mask (np.float32) range from 0 to 1 """ mask = mask.round() alpha = np.zeros_like(image, dtype=np.uint8) alpha[mask == 1, :] = color_f alpha[mask == 0, :] = color_b image_alpha = cv2.add(image, alpha) return image_alpha
900269f7a36a4daa8c87cb2e2b5adc5b9be8728e
3,641,572
def split_in_pairs(s, padding = "0"): """ Takes a string and splits into an iterable of strings of two characters each. Made to break up a hex string into octets, so default is to pad an odd length string with a 0 in front. An alternative character may be specified as the second argument. """ if not isinstance(padding, str) or len(padding) != 1: raise TypeError("Padding must be a single character.") s = padding + s if len(s) % 2 else s v = iter(s) return (a+b for a,b in zip(v,v))
8807448bb8125c80fa78ba32f887a54ba9bab1dd
3,641,573
def make_slicer_query_with_totals_and_references( database, table, joins, dimensions, metrics, operations, filters, references, orders, share_dimensions=(), ): """ :param dataset: :param database: :param table: :param joins: :param dimensions: :param metrics: :param operations: :param filters: :param references: :param orders: :param share_dimensions: :return: """ """ The following two loops will run over the spread of the two sets including a NULL value in each set: - reference group (WoW, MoM, etc.) - dimension with roll up/totals enabled (totals dimension) This will result in at least one query where the reference group and totals dimension is NULL, which shall be called base query. The base query will ALWAYS be present, even if there are zero reference groups or totals dimensions. For a concrete example, check the test case in : ``` fireant.tests.queries.test_build_dimensions.QueryBuilderDimensionTotalsTests #test_build_query_with_totals_cat_dimension_with_references ``` """ totals_dimensions = find_totals_dimensions(dimensions, share_dimensions) totals_dimensions_and_none = [None] + totals_dimensions[::-1] reference_groups = find_and_group_references_for_dimensions(dimensions, references) reference_groups_and_none = [(None, None)] + list(reference_groups.items()) queries = [] for totals_dimension in totals_dimensions_and_none: (dimensions_with_totals, filters_with_totals) = adapt_for_totals_query( totals_dimension, dimensions, filters ) for reference_parts, references in reference_groups_and_none: dimensions_with_ref, metrics_with_ref, filters_with_ref = adapt_for_reference_query( reference_parts, database, dimensions_with_totals, metrics, filters_with_totals, references, ) query = make_slicer_query( database, table, joins, dimensions_with_ref, metrics_with_ref, filters_with_ref, orders, ) # Add these to the query instance so when the data frames are joined together, the correct references and # totals can be applied when combining the separate result set from each query. query._totals = totals_dimension query._references = references queries.append(query) return queries
ea77cf6729cc8b677758801d53338d96e67b167f
3,641,574
def corr_na(array1, array2, corr_method: str = 'spearmanr', **addl_kws): """Correlation method that tolerates missing values. Can take pearsonr or spearmanr. Args: array1: Vector of values array2: Vector of values corr_method: Which method to use, pearsonr or spearmanr. **addl_kws: Additional keyword args to pass to scipy.stats corr methods. Returns: R and p-value from correlation of 2 vectors. """ if corr_method not in ['pearsonr', 'spearmanr']: raise ValueError( 'Method %s is a valid correlation method, must be: %s' % (corr_method, ','.join(['pearsonr', 'spearmanr'])) ) nonull = np.logical_and(not_na(array1), not_na(array2)) if sum(nonull) > 2: return eval(corr_method)(array1[nonull], array2[nonull], **addl_kws) return np.nan, np.nan
b534898dee50b06488514de5b21d6ea7fcf025f6
3,641,575
def has_global(node, name): """ check whether node has name in its globals list """ return hasattr(node, "globals") and name in node.globals
7a2ef301cb25cba242d8544e2c191a537f63bf19
3,641,577
def make_generator_model(input_dim=100) -> tf.keras.Model: """Generator モデルを生成する Args: input_dim (int, optional): 入力次元. Defaults to 100. Returns: tf.keras.Model: Generator モデル """ dense_size = (7, 7, 256) conv2d1_channel = 128 conv2d2_channel = 64 conv2d3_channel = 1 model = tf.keras.Sequential() model.add( layers.Dense( dense_size[0] * dense_size[1] * dense_size[2], use_bias=False, input_shape=(input_dim,), ) ) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape(dense_size)) assert model.output_shape == (None, dense_size[0], dense_size[1], dense_size[2]) _add_conv2d_transpose_layer( model, conv2d1_channel, (5, 5), (1, 1), (None, dense_size[0], dense_size[1], conv2d1_channel), ) _add_conv2d_transpose_layer( model, conv2d2_channel, (5, 5), (2, 2), (None, dense_size[0] * 2, dense_size[1] * 2, conv2d2_channel), ) model.add( layers.Conv2DTranspose( conv2d3_channel, (5, 5), strides=(2, 2), padding="same", use_bias=False, activation="tanh", ) ) assert model.output_shape == ( None, dense_size[0] * 4, dense_size[1] * 4, conv2d3_channel, ) return model
3214afc37153471dae0c599a93cb95def1da8971
3,641,578
from unittest.mock import call def deploy_gradle(app, deltas={}): """Deploy a Java application using Gradle""" java_path = join(ENV_ROOT, app) build_path = join(APP_ROOT, app, 'build') env_file = join(APP_ROOT, app, 'ENV') env = { 'VIRTUAL_ENV': java_path, "PATH": ':'.join([join(java_path, "bin"), join(app, ".bin"), environ['PATH']]) } if exists(env_file): env.update(parse_settings(env_file, env)) if not exists(java_path): makedirs(java_path) if not exists(build_path): echo("-----> Building Java Application") call('gradle build', cwd=join(APP_ROOT, app), env=env, shell=True) else: echo("-----> Removing previous builds") echo("-----> Rebuilding Java Application") call('gradle clean build', cwd=join(APP_ROOT, app), env=env, shell=True) return spawn_app(app, deltas)
d1be9ecd675389c05324d4e1f0e077414db814a5
3,641,579
from typing import Optional def find_badge_by_slug(slug: str) -> Optional[Badge]: """Return the badge with that slug, or `None` if not found.""" badge = db.session \ .query(DbBadge) \ .filter_by(slug=slug) \ .one_or_none() if badge is None: return None return _db_entity_to_badge(badge)
ec4102cf529b247c0b725e7c32d4b9de9c3a1e98
3,641,580
import logging def validate_color(color,default,color_type): """Validate a color against known PIL values. Return the validated color if valid; otherwise return a default. Keyword arguments: color: color to test. default: default color string value if color is invalid. color_type: string name for color type, used for alerting users of defaults. """ # Use exception handling. If a given color throws an error, we may return false. try: c = ImageColor.getcolor(color,'RGB') return color except ValueError as e: logging.warning('"%s" is not a valid color specifier. Defaulting to "%s" for %s color.',color,default,color_type) return default
2a91a9f5db2cbed3d530af12e8c383b65c5e2fa8
3,641,582
def d_xx_yy_tt(psi): """Return the second derivative of the field psi by fft Parameters -------------- psi : array of complex64 for the field Returns -------------- cxx psi_xx+ cyy psi_yy + ctt psi_tt : second derivatives with respect to x """ # this function is to remove global LAPL return fft.ifft2(LAPL * fft.fft2(psi))
12980ca705f5a1f3f3514d792cfc4e06529d0600
3,641,583
from typing import Iterable def negate_objective(objective): """Take the negative of the given objective (converts a gain into a loss and vice versa).""" if isinstance(objective, Iterable): return (list)((map)(negate_objective, objective)) else: return -objective
e24877d00b7c84e04c0cb38b5facdba85694890f
3,641,584
from typing import Any import json def process_vm_size(file_name: str) -> Any: """ Extract VMs instance specification. :file_name (str) File name Return VMs specification object """ current_app.logger.info(f'Processing VM Size {file_name}...') file = open(file_name,) data = json.load(file) return data
7afe372fa82769ac6add9e473bce082f0e268318
3,641,585
def gen_key(password, salt, dkLen=BLOCKSIZE): """ Implement PBKDF2 to make short passwords match the BLOCKSIZE. Parameters --------- password str salt str dkLen int Returns ------- - str """ return KDF.PBKDF2(password, salt, dkLen=BLOCKSIZE)
134d6c7b17f2aea869bfb79f72a0126367d44b36
3,641,586
import six def _bytes_feature(value): """Wrapper for inserting bytes features into Example proto.""" if isinstance(value, six.string_types): value = six.binary_type(value, encoding='utf-8') return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
85bdab9a6445ec224f8e5f54be5b775008582d48
3,641,587
def parse_plot_set(plot_set_string): """ Given one of the string arguments to the --plot-sets option, parse out a data structure representing which conditions ought to be compared against each other, and what those comparison plots/tables should be called. The syntax of a plot set is [title:]condition[,condition[,condition...]]. The first condition is the comparison baseline, when applicable. Returns a tuple of a plot set title, or None if unspecified, and a list of condition names. """ colon_pos = plot_set_string.find(':') if colon_pos != -1: # Pull out the title before the colon title = plot_set_string[0:colon_pos] # And the rest of the specifier after it plot_set_string = plot_set_string[colon_pos + 1:] else: # No title given title = None # Return the title and condition list tuple return (title, plot_set_string.split(','))
1df83681aa3110dfd9302bd7918f15dfbfa497ab
3,641,588
def check_types_excel(row: tuple) -> bool: """Returns true if row from excel file has correct types""" if not isinstance(row[1], (pd.Timestamp, str)): return False if not ((isinstance(row[2], dt.time) and isinstance(row[3], dt.time)) or (isinstance(row[2], str) and isinstance(row[3], str))): return False if not all((isinstance(x, str) for x in row[4:5])): return False if not isinstance(row[6], (str, int)): return False if not isinstance(row[7], (str, int, float)): # 3.27, 3.27a and 137 should all be supported return False return True
80ac33feff968de076bd29f34350bcf518cd34d5
3,641,589
def add(num1, num2): """ Adds two numbers >>> add(2,4) 6 """ return num1 + num2
932981ca91c01817242e57e1be55c35441337fc4
3,641,590
def is_palindrome1(str): """ Create slice with negative step and confirm equality with str. """ return str[::-1] == str
39dbc19d0d73b956c9af24abc1babae18c816d73
3,641,591
from datetime import datetime def number_generetor(view, form): """ Генератор номера платежа (по умолчанию) """ if is_py2: uuid_fields = uuid4().get_fields() else: uuid_fields = uuid4().fields return u'{:%Y%m%d}-{:08x}'.format(datetime.now(), uuid_fields[0])
005cd8347b903be3adffe56d7c8c53ba79ebf2e8
3,641,592
def get_underlay_info(): """ :return: """ return underlay_info
a48f2ede459a4ca8969e095e94ba09b99e59300d
3,641,593
async def get_guild_roles(id_: int): """ Get the roles of a guild :param id_: Guild ID :return: List of roles """ guild = await router.bot.rest.fetch_guild(id_) if guild is None: return status.HTTP_404_NOT_FOUND roles = await guild.fetch_roles() return [to_dict(role) for role in roles]
4d5084f62f29a5038dc3111b047b1644a96a958a
3,641,594
def prior_min_field(field_name, field_value): """ Creates prior min field with the :param field_name: prior name (field name initial) :param field_value: field initial properties :return: name of the min field, updated field properties """ name = field_name value = field_value.copy() value.update({ 'label': 'Min', 'required': False, }) return name + '_min', value
9f331ee58e699318e678d881c0028486b746c05c
3,641,595
def checkpoint_save_config(): """Fixture to create a config for saving attributes of a detector.""" toolset = { "test_id": "Dummy_test", "saved_attributes": { "FeatureExtraction": [ "dummy_dict", "dummy_list", "dummy_tuple", "dummy_tensor", "dummy_val", ], }, "save_attributes": True, "attributes": {}, "save_elementwise": True, } return toolset
6cb7e05a5eb680f6915fc58f40e72403787eea8b
3,641,596
def matrix_sum_power(A, T): """Take the sum of the powers of a matrix, i.e., sum_{t=1} ^T A^t. :param A: Matrix to be powered :type A: np.ndarray :param T: Maximum order for the matrixpower :type T: int :return: Powered matrix :rtype: np.ndarray """ At = np.eye(A.shape[0]) As = np.zeros((A.shape[0], A.shape[0])) for _ in range(T): At = A @ At As += At return As
b590f0751c114bd7cfeaa39d3d03a3de49007c62
3,641,597
def mean_zero_unit_variance(arr, mean_vector=None, std_vector=None, samples_in='row'): """ Normalize input data to have zero mean and unit variance. Return the normalized data, the mean, and the calculated standard deviation which was used to normalize the data [normalized, meanvec, stddev] = mean_zero_unit_variance(data) or [normalized, meanvec, stddev] = mean_zero(data, mean_vector=provided_mean_vector) etc. """ samplesIn = 1 if samples_in == 'col' else 0 dimsIn = int(not samplesIn) nSamples = arr.shape[samplesIn] nDims = arr.shape[dimsIn] theshape = [1, 1] theshape[dimsIn] = nDims if not mean_vector: mean_vector = arr.mean(axis=samplesIn).reshape(theshape) if not std_vector: std_vector = arr.std(axis=samplesIn).reshape(theshape) # If you have a row with absolutely no information, you will divide by zero. Hence... std_vector[std_vector < 1e-6] = 1 norma = (arr - mean_vector) / std_vector return norma, mean_vector, std_vector
38a1ca262362b3f04aed06f3f0d21836eca8d5ad
3,641,598
import torch def soft_precision(scores: torch.FloatTensor, mask: torch.FloatTensor) -> torch.FloatTensor: """ Helper function for computing soft precision in batch. # Parameters scores : torch.FloatTensor Tensor of scores with shape: (num_refs, num_cands, max_ref_len, max_cand_len) mask : torch.FloatTensor Mask for the candidate tensor with shape: (num_cands, max_cand_len) """ max_scores, _ = scores.max(dim=-2) masked_max_scores = max_scores * mask.unsqueeze(dim=0) precision = masked_max_scores.sum(dim=-1) / mask.sum(dim=-1).view(1, -1) return precision
e76552bde3ae58f5b976abbf58e5dac1d4995117
3,641,599
from pathlib import Path import scipy from datetime import datetime def fit_sir(times, T_real, gamma, population, store, pathtoloc, tfmt='%Y-%m-%d', method_solver='DOP853', verbose=True, \ b_scale=1): """ Fit the dynamics of the SIR starting from real data contained in `pathtocssegi`. The initial condition is taken from the real data. The method assumes that in the `store` at the indicated `path`, there are entries in the format %Y-%m-%d that described the infectivity matrices for the times `times[:-1]`. `populations` is the vector with the population per community. OUTPUT: * Xs * ts * scales For the output the dumping interval is one day. """ # initializations nt = len(times) t = times[0] B = read_df(t, tfmt, store, pathtoloc).to_numpy() N = B.shape[0] Y_real = np.einsum('ta,a->t', T_real, population) / np.sum(population) X = np.zeros((2, N), dtype=np.float_) I = T_real[0] S = 1 - I X = sir_SI_to_X(S, I) y = get_sir_omega_X(X, population) ts = [t] Xs = [X.reshape(2,N)] Ys = [y] b_scales = [] blo = 0. # print("nt = ", nt) for i in range(1, nt): if verbose: print(f'Integrating day {t}') mykey = Path(pathtoloc) / t.strftime(tfmt) mykey = str(mykey) if mykey in store.keys(): B = read_df(t, tfmt, store, pathtoloc).to_numpy() elif verbose: print("Infectivity matrix not updated!") tnew = times[i] dt = int((tnew - t).days) ypred = Y_real[i] # root finding method func_root = lambda b: get_sir_omega_X(compute_sir_X(X, dt, b*B, gamma, method_solver), \ population) - ypred # initial bracketing bhi = b_scale fscale = 3. for k in range(1,10): f = func_root(bhi) if f > 0: break else: bhi *= fscale if f < 0: raise ValueError("Problem in bracketing!") # find the root sol = scipy.optimize.root_scalar(func_root, bracket=(blo, bhi), method='brentq', \ options={'maxiter': 100}) if not (sol.converged): raise ValueError("root finding failed!") b_scale = sol.root # compute next state with optimal scale t_eval = np.arange(dt+1) Xnews = compute_sir_X(X, dt, b_scale*B, gamma, method_solver, t_eval=t_eval) Xnew = Xnews[-1] y = get_sir_omega_X(Xnew,population) print(f"b = {b_scale}, y = {y}, ypred = {ypred}, y-ypred = {y-ypred}") # dump # data.append(Xnew.reshape(2,N)) Xs += [Xnew.reshape(2,N) for Xnew in Xnews] ts += [t + datetime.timedelta(days=int(dt)) for dt in t_eval[1:]] Ys.append(y) b_scales.append(b_scale) # update t = tnew X = Xnew b_scales.append(None) # B has ndays-1 entries print("Fitting complete") # prepare export of results S = np.array([X[0] for X in Xs]) I = np.array([X[1] for X in Xs]) clusters = np.arange(N, dtype=np.uint) df_S = pd.DataFrame(data=S, index=ts, columns=clusters) df_I = pd.DataFrame(data=I, index=ts, columns=clusters) df_fit = pd.DataFrame(data=np.array([b_scales, Ys]).T, index=times, columns=["scale", "frac_infected_tot"]) return df_S, df_I, df_fit
7a7da41fc178c805cc334e5a0060a2f9cc5f29d3
3,641,600
from typing import Dict from typing import OrderedDict def panelist_debuts_by_year(database_connection: mysql.connector.connect ) -> Dict: """Returns an OrderedDict of show years with a list of panelists' debut information""" show_years = retrieve_show_years(database_connection) panelists = retrieve_panelists_first_shows(database_connection) years_debut = OrderedDict() for year in show_years: years_debut[year] = [] for panelist in panelists: panelist_info = panelists[panelist] years_debut[panelist_info["year"]].append(panelist_info) return years_debut
40ba0cd67991b7c83b33e77522065b8bb75232c1
3,641,601
def _stirring_conditions_html(stirring: reaction_pb2.StirringConditions) -> str: """Generates an HTML-ready description of stirring conditions. Args: stirring: StirringConditions message. Returns: String description of the stirring conditions. """ if stirring.type == stirring.NONE: return "" txt = "" if stirring.type != stirring.UNSPECIFIED: txt += { stirring.CUSTOM: stirring.details, stirring.STIR_BAR: "stir bar", stirring.OVERHEAD_MIXER: "overhead mixer", stirring.AGITATION: "agitation", }[stirring.type] if stirring.rate.rpm: txt += f" ({stirring.rate.rpm} rpm)" return txt
0f03c67602163da3b732dfdcb0d367c6a0806c0d
3,641,602
def set_effective_property_value_for_node( nodeId: dom.NodeId, propertyName: str, value: str ) -> dict: """Find a rule with the given active property for the given node and set the new value for this property Parameters ---------- nodeId: dom.NodeId The element id for which to set property. propertyName: str value: str """ return { "method": "CSS.setEffectivePropertyValueForNode", "params": {"nodeId": int(nodeId), "propertyName": propertyName, "value": value}, }
36cf035bd878ac4c4936cebbacc115273807b892
3,641,605
def classroom_page(request,unique_id): """ Classroom Setting Page. """ classroom = get_object_or_404(Classroom,unique_id=unique_id) pending_members = classroom.pending_members.all() admins = classroom.special_permissions.all() members = admins | classroom.members.all() is_admin = classroom.special_permissions.filter(username = request.user.username).exists() #classroom_update if request.method=="POST": form = CreateclassForm(request.POST,request.FILES,instance=classroom) if form.is_valid(): form.save() return redirect(reverse('subjects',kwargs={'unique_id':classroom.unique_id})) else: form = CreateclassForm(instance=classroom) params={ 'members':members.distinct(), 'admins':admins, 'pending_members':pending_members, 'classroom':classroom, 'is_admin':is_admin, 'form':form, } return render(request,'classroom_settings.html',params)
fc37979a44da63fb0dc174799523f3a77fefb1e4
3,641,606
def concat_hists(hist_array: np.array): """Concatenate multiple histograms in an array by adding them up with error prop.""" hist_final = hist_array[0] for hist in hist_array[1:]: hist_final.addhist(hist) return hist_final
e659ceb97f38620f561920ddab6339ecb901ee55
3,641,607
def renorm_flux_lightcurve(flux, fluxerr, mu): """ Normalise flux light curves with distance modulus.""" d = 10 ** (mu/5 + 1) dsquared = d**2 norm = 1e18 # print('d**2', dsquared/norm) fluxout = flux * dsquared / norm fluxerrout = fluxerr * dsquared / norm return fluxout, fluxerrout
97f2606d54b106d2051983dfc29d942112e7a1e3
3,641,608
def find_focus(stack): """ Parameters ---------- stack: (nd-array) Image stack of dimension (Z, ...) to find focus Returns ------- focus_idx: (int) Index corresponding to the focal plane of the stack """ def brenner_gradient(im): assert len(im.shape) == 2, 'Input image must be 2D' return np.mean((im[:-2, :] - im[2:, :]) ** 2) focus_scores = [] for img in stack: focus_score = brenner_gradient(img) focus_scores.append(focus_score) focus_idx_min = np.where(focus_scores == np.min(focus_scores))[0][0] focus_idx_max = np.where(focus_scores == np.max(focus_scores))[0][0] return focus_idx_max, focus_idx_min
234cecb9c43f9427cd8c5d1e9b2ae24c14239835
3,641,610
def get_amr_line(input_f): """Read the amr file. AMRs are separated by a blank line.""" cur_amr=[] has_content=False for line in input_f: if line[0]=="(" and len(cur_amr)!=0: cur_amr=[] if line.strip()=="": if not has_content: continue else: break elif line.strip().startswith("#"): # omit the comment in the AMR file continue else: has_content=True cur_amr.append(delete_pattern(line.strip(), '~e\.[0-9]+(,[0-9]+)*')) #cur_amr.append(line.strip()) return "".join(cur_amr)
5b0c980a8c68143d8fdeb413185ee445b11cd30b
3,641,611
def getHwAddrForIp(ip): """ Returns the MAC address for the first interface that matches the given IP Returns None if not found """ for i in netifaces.interfaces(): addrs = netifaces.ifaddresses(i) try: if_mac = addrs[netifaces.AF_LINK][0]['addr'] if_ip = addrs[netifaces.AF_INET][0]['addr'] except IndexError, KeyError: # Ignore ifaces that dont have MAC or IP if_mac = if_ip = None if if_ip == ip: return if_mac return None
efbeb494ed0a3fb135e87a66a170a94f4ca78231
3,641,612
def rbf_multiquadric(r, epsilon=1.0, beta=2.5): """ multiquadric """ return np.sqrt((epsilon*r)**2 + 1.0)
068ab09a609a47e631d91f90634fe4a5810e0fd1
3,641,613
def is_valid_sudoku(board): """ Checks if an input sudoku board is valid Algorithm: For all non-empty squares on board, if value at that square is a number, check if the that value exists in that square's row, column, and minor square. If it is, return False. """ cols = [set() for _ in range(9)] squares = [[set() for _ in range(3)] for x in range(3)] for row in range(9): rows = set() for col in range(9): if board[row][col] == ".": continue # Check row if board[row][col] in rows: return False else: rows.add(board[row][col]) # Check col if board[row][col] in cols[col]: return False else: cols[col].add(board[row][col]) # Check square if board[row][col] in squares[row // 3][col // 3]: return False else: squares[row // 3][col // 3].add(board[row][col]) return True
001a02a47acbaa192215d985f3d743c42a9fb42b
3,641,614
def lab_to_nwb_dict(lab_key): """ Generate a dictionary containing all relevant lab and institution info :param lab_key: Key specifying one entry in element_lab.lab.Lab :return: dictionary with NWB parameters """ lab_info = (lab.Lab & lab_key).fetch1() return dict( institution=lab_info.get("institution"), lab=lab_info.get("lab_name"), )
dcde08b3421d56003d23ca19747430c6d95bf431
3,641,615
from typing import Set from re import A def length(self: Set[A]) -> int: """ Returns the length (number of elements) of the set. `size` is an alias for length. Returns: The length of the set """ return len(self)
cab214f7b06fc8ae604286cd40d6d558d05b7175
3,641,616
import time def timestamp(tdigits=8): """Return a unique timestamp string for the session. useful for ensuring unique function identifiers, etc. """ return str(time.clock()).replace(".", "").replace("-", "")[: tdigits + 1]
b209795f67735ada82238e5fa47f5132efa61384
3,641,617
def is_wrapped_exposed_object(obj): """ Return True if ``obj`` is a Lua (lupa) wrapper for a BaseExposedObject instance """ if not hasattr(obj, 'is_object') or not callable(obj.is_object): return False return bool(obj.is_object())
117a43f9dcc886dc88a77c2ace016b89e43b3c4c
3,641,619
def no_transform(image): """Pass through the original image without transformation. Returns a tuple with None to maintain compatability with processes that evaluate the transform. """ return (image, None)
25b45a5c77d3c2864ebc7a046e0f47b2fafb067b
3,641,620
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None): """Builds a menu with the given style using the provided buttons :return: list of buttons """ menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)] if header_buttons: menu.insert(0, [header_buttons]) if footer_buttons: menu.append([footer_buttons]) return menu
f068ef9222b7e16cf19d901961f0315b2d6aebe3
3,641,621
def SSderivative(ds): """ Given a time-step ds, and an single input time history u, this SS model returns the output y=[u,du/ds], where du/dt is computed with second order accuracy. """ A = np.array([[0]]) Bm1 = np.array([0.5 / ds]) B0 = np.array([[-2 / ds]]) B1 = np.array([[1.5 / ds]]) C = np.array([[0], [1]]) D = np.array([[1], [0]]) # change state Aout, Bout, Cout, Dout = SSconv(A, B0, B1, C, D, Bm1) return Aout, Bout, Cout, Dout
c255937fd1f727932d5b09fc70c586e7bdb10bf1
3,641,623
def clean_post(value): """Remove unwanted elements in post content""" doc = lxml.html.fragment_fromstring(value) doc.tag = 'div' # replaces <li> doc.attrib.clear() # remove comment owner info for e in doc.xpath('//div[@class="weblog_keywords"]'): e.drop_tree() return lxml.html.tostring(doc)
c7670d5632760b577aa7ac9dae24de15bf164c67
3,641,624
def get_houdini_version(as_string=True): """ Returns version of the executed Houdini :param as_string: bool, Whether to return the stiring version or not :return: variant, int or str """ if as_string: return hou.applicationVersionString() else: return hou.applicationVersion()
efcc18a89552f8dd1c4807be2042b51db2c2fb61
3,641,625
import socket def check_port_open(port: int) -> bool: """ Проверка на свободный порт port Является частью логики port_validation """ try: sock = socket.socket() sock.bind(("", port)) sock.close() print(f"Порт {port} свободен") return True except OSError: print(f"Порт {port} занят") return False
76ba3ddd03bf1672b8b4ce5fd048561c3a9e78e8
3,641,626
from datetime import datetime def convert_date_to_tick_tick_format(datetime_obj, tz: str): """ Parses ISO 8601 Format to Tick Tick Date Format It first converts the datetime object to UTC time based off the passed time zone, and then returns a string with the TickTick required date format. !!! info Required Format ISO 8601 Format Example: 2020-12-23T01:56:07+00:00 TickTick Required Format: 2020-12-23T01:56:07+0000 -> Where the last colon is removed for timezone Arguments: datetime_obj (datetime): Datetime object to be parsed. tz: Time zone string. Returns: str: The TickTick accepted date string. ??? info "Import Help" ```python from ticktick.helpers.time_methods import convert_iso_to_tick_tick_format ``` ??? example ```python date = datetime(2022, 12, 31, 14, 30, 45) converted_date = convert_iso_to_tick_tick_format(date, 'US/Pacific') ``` ??? success "Result" The proper format for a date string to be used with TickTick dates. ```python '2022-12-31T22:30:45+0000' ``` """ date = convert_local_time_to_utc(datetime_obj, tz) date = date.replace(tzinfo=datetime.timezone.utc).isoformat() date = date[::-1].replace(":", "", 1)[::-1] return date
9f8efc2136b75310649d31328d4359d2030aff97
3,641,627
def measurement(resp, p): """model measurement effects in the filters by translating the response at each location and stimulus (first 3 axes of resp) toward the filterwise mean (4th axis) according to proportion p. p=1 means that all filters reduce to their respective means; p=0 does nothing; p<0 is possible but probably not something you want.""" resp = tf.convert_to_tensor(resp) # average the filter dim meanresp = tf.reduce_mean(resp, axis=3, keepdims=False) # make resp the origin of meanresp and scale by p transresp = (meanresp[:, :, :, None] - resp) * p return resp + transresp
99d24b3b790c0aa1d2873ca5521144a1e326b661
3,641,628
def irpf(salario,base=12.5,prorrateo=0): """Entra el salario y la base, opcionalmente un parametro para prorratear Si no se da el valor de la bas3e por defecto es 12.5""" if type(salario)==float and type(base)==float: if prorrateo==True: return (salario*(1+2/12))*(base/100) elif prorrateo==False: return salario*(base/100) else: return None
b549e78f2cbd3227cc99d4ce7277a90058696895
3,641,629
def get2p3dSlaterCondonUop(Fdd=(9, 0, 8, 0, 6), Fpp=(20, 0, 8), Fpd=(10, 0, 8), Gpd=(0, 3, 0, 2)): """ Return a 2p-3d U operator containing a sum of different Slater-Condon proccesses. Parameters ---------- Fdd : tuple Fpp : tuple Fpd : tuple Gpd : tuple """ # Calculate F_dd^{0,2,4} FddOp = getUop(l1=2,l2=2,l3=2,l4=2,R=Fdd) # Calculate F_pp^{0,2} FppOp = getUop(l1=1,l2=1,l3=1,l4=1,R=Fpp) # Calculate F_pd^{0,2} FpdOp1 = getUop(l1=1,l2=2,l3=2,l4=1,R=Fpd) FpdOp2 = getUop(l1=2,l2=1,l3=1,l4=2,R=Fpd) FpdOp = addOps([FpdOp1,FpdOp2]) # Calculate G_pd^{1,3} GpdOp1 = getUop(l1=1,l2=2,l3=1,l4=2,R=Gpd) GpdOp2 = getUop(l1=2,l2=1,l3=2,l4=1,R=Gpd) GpdOp = addOps([GpdOp1,GpdOp2]) # Add operators uOp = addOps([FddOp,FppOp,FpdOp,GpdOp]) return uOp
6ae077b1913bf40f93adcdbbbbc882baa9d56eea
3,641,630
from typing import AnyStr import pickle def read_meta_fs(filename: AnyStr): """ Read meta data from disk. """ settings.Path(filename).mkdir(parents=True, exist_ok=True) filepath = settings.pj(filename, "meta.pkl") with open(filepath, "rb") as fh: return pickle.load(fh)
8fdf4c74d34c623cd1ac7d15f32f891685f1d863
3,641,631
def compile(model, ptr, vtr, num_y_per_branch=1): """Create a list with ground truth, loss functions and loss weights. """ yholder_tr = [] losses = [] loss_weights = [] num_blocks = int(len(model.output) / (num_y_per_branch + 1)) printcn(OKBLUE, 'Compiling model with %d outputs per branch and %d branches.' % (num_y_per_branch, num_blocks)) for i in range(num_blocks): for j in range(num_y_per_branch): yholder_tr.append(ptr) losses.append(elasticnet_loss_on_valid_joints) loss_weights.append(1.) yholder_tr.append(vtr) losses.append('binary_crossentropy') loss_weights.append(0.01) printcn(OKBLUE, 'loss_weights: ' + str(loss_weights)) model.compile(loss=losses, optimizer=RMSprop(), loss_weights=loss_weights) return yholder_tr
24af75f3b5bc6ba06d88f81023c2c7011f1d6922
3,641,632
import html def strip_clean(input_text): """Strip out undesired tags. This removes tags like <script>, but leaves characters like & unescaped. The goal is to store the raw text in the database with the XSS nastiness. By doing this, the content in the database is raw and Django can continue to assume that it's unsafe by default. """ return html.unescape(bleach.clean(input_text, strip=True))
83e2bd3cb5c2645dd4ea611fd0e0577d118b8326
3,641,633
def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=DRAW_PROBABILITY, backend=None, env=None): """Setups the global environment. :param env: the specific :class:`TrueSkill` object to be the global environment. It is optional. >>> Rating() trueskill.Rating(mu=25.000, sigma=8.333) >>> setup(mu=50) #doctest: +ELLIPSIS trueskill.TrueSkill(mu=50.000, ...) >>> Rating() trueskill.Rating(mu=50.000, sigma=8.333) """ if env is None: env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend) global_env.__trueskill__ = env return env
ce797c9994e477bc618f8f52cc63babcc61b78fd
3,641,634
def _bytepad(x, length): """Zero pad byte string as defined in NIST SP 800-185""" to_pad = _left_encode(length) + x # Note: this implementation works with byte aligned strings, # hence no additional bit padding is needed at this point. npad = (length - len(to_pad) % length) % length return to_pad + b'\x00' * npad
b02304fbb0e4bc42a80bc3fdc246c4fc9d55c816
3,641,635
def get_scalefactor(metadata): """Add scaling factors to the metadata dictionary :param metadata: dictionary with CZI or OME-TIFF metadata :type metadata: dict :return: dictionary with additional keys for scling factors :rtype: dict """ # set default scale factore to 1 scalefactors = {'xy': 1.0, 'zx': 1.0 } try: # get the factor between XY scaling scalefactors['xy'] = metadata['XScale'] / metadata['YScale'] # get the scalefactor between XZ scaling scalefactors['zx'] = metadata['ZScale'] / metadata['YScale'] except KeyError as e: print('Key not found: ', e) return scalefactors
0619d5fa8f24008ddf4364a965268755c07d09c3
3,641,637
def alignmentEntropy(align, statistic='absolute', removeGaps=False, k=1, logFunc=np.log): """Calculates the entropy in bits of each site (or kmer) in a sequence alignment. Also can compute: - "uniqueness" which I define to be the fraction of unique sequences - "uniquenum" which is the number of unique sequences Parameters ---------- align : pd.Series() or list Alignment of sequences. statistic : str Statistic to be computed: absolute, uniqueness Uniqueness is the fraction of unique sequences. Uniquenum is the number of unique AA at each position. removeGaps : bool Remove from the alignment at each position, kmers that start with a gap character. Also use "non-gapped kmers" (ie skipping gaps) k : int Length of the kmer to consider at each start position in the alignment. (default 1 specifies site-wise entropy) logFunc : function Default is natural log, returning nats. Can also use log2 for bits. Return ------ out : float Output statistic.""" if removeGaps: grabKmerFlag = 1 else: grabKmerFlag = 0 align = padAlignment(align) L = len(align[align.index[0]]) nKmers = L - k + 1 entropy = np.zeros(nKmers, dtype=float) for aai in np.arange(nKmers): kmers = [grabKmer(seq, aai, k)[grabKmerFlag] for seq in align] """kmers that start with a gap or that are at the end and are of insufficent length, will be None""" kmers = [mer for mer in kmers if not mer is None] oh = objhist(kmers) if statistic == 'absolute': entropy[aai] = oh.entropy() elif statistic == 'uniqueness': entropy[aai] = oh.uniqueness() elif statistic == 'uniquenum': entropy[aai] = len(list(oh.keys())) return entropy
ea06ae01cd1aa69cfc7dd19c72caafc5478fda38
3,641,638
def NodeToString(xml_node): """Returns an XML string. Args: xml_node: xml.dom.Node object Returns: String containing XML """ return xml_node.toxml()
043072bbb40f33947febedf967679e3e39931834
3,641,639
def difference(data, interval): """ difference dataset parameters: data: dataset to be differenced interval: the interval between the two elements to be differenced. return: dataset: with the length = len(data) - interval """ return [data[i] - data[i - interval] for i in range(interval, len(data))]
611f4ad36935000ae7dc16f76aef7cbb494b36ac
3,641,640
def merge_dictionaries(dict1, dict2): """ Merges two dictionaries handling embedded lists and dictionaries. In a case of simple type, values from dict1 are preserved. Args: dict1, dict2 dictionaries to merge Return merged dictionaries """ for k2, v2 in dict2.items(): if k2 not in dict1: dict1[k2] = v2 else: if isinstance(v2, list): dict1[k2] = merge_lists(dict1[k2], v2) elif isinstance(v2, dict): dict1[k2] = merge_dictionaries(dict1[k2], v2) else: # if the type is int or strings we do nothing # its already in dict1 pass return dict1
8d46ce04496be2b5ba0e66788aed1a4e5ec1c85c
3,641,641
def build(model_def, model_name, optimizer, loss_name, custom_objects=None): """build keras model instance in FastEstimator Args: model_def (function): function definition of tf.keras model or path of model file(h5) model_name (str, list, tuple): model name(s) optimizer (str, optimizer, list, tuple): optimizer(s) loss_name (str, list, tuple): loss name(s) custom_objects (dict): dictionary that maps custom Returns: model: model(s) compiled by FastEstimator """ with fe.distribute_strategy.scope() if fe.distribute_strategy else NonContext(): if isinstance(model_def, str): model = tf.keras.models.load_model(model_def, custom_objects=custom_objects) else: model = model_def() model = to_list(model) model_name = to_list(model_name) optimizer = to_list(optimizer) loss_name = to_list(loss_name) assert len(model) == len(model_name) == len(optimizer) == len(loss_name) for idx, (m, m_n, o, l_n) in enumerate(zip(model, model_name, optimizer, loss_name)): model[idx] = _fe_compile(m, m_n, o, l_n) if len(model) == 1: model = model[0] return model
28cf56036b00790cf3e6350cc2741d93dd047e3a
3,641,642
import wave def check_audio_file(audio_file): """ Check if the audio file contents and format match the needs of the speech service. Currently we only support 16 KHz, 16 bit, MONO, PCM audio format. All others will be rejected. :param audio_file: file to check :return: audio duration, if file matches the format expected, otherwise None """ # Verify that all wave files are in the right format try: with wave.open(audio_file) as my_wave: frame_rate = my_wave.getframerate() if frame_rate >= 8000 and my_wave.getnchannels() in [1, 2] \ and my_wave.getsampwidth() == 2 and my_wave.getcomptype() == 'NONE': audio_duration = my_wave.getnframes() / frame_rate return audio_duration else: raise InvalidAudioFormatError( "File {0} is not in the right format, it must be: Mono/Stereo, 16bit, PCM, 8KHz or above. " "Found: ChannelCount={1}, SampleWidth={2}, CompType={3}, FrameRate={4}. Ignoring input!".format( audio_file, my_wave.getnchannels(), my_wave.getsampwidth(), my_wave.getcomptype(), frame_rate ) ) except Exception as e: raise InvalidAudioFormatError("Invalid wave file {0}, reason: {1} :{2}".format(audio_file, type(e).__name__, e))
a6807cddefa7440b2f1cb11b2b3b309579f372e0
3,641,643
def uniform(name): """ Calls the findUniform function from util.py to return the uniform bounds for the given molecule. Input: name of molecule Output: array of length [2] with the upper and lower bounds for the uniform prior """ prior = findUniform(name, 'd_h') return prior
e01b8c5056d199a8e0048e148170d5fc4c5c28a1
3,641,644
def merge_two_dicts(x, y): """Merges two dicts, returning a new copy.""" z = x.copy() z.update(y) return z
9126ada395d9d7f3da5a45b7d46c5b440b5cf23d
3,641,645
def num_utterances(dataset: ds.DatasetSplit): """Returns the total number of utterances in the dataset.""" return sum([len(interaction) for interaction in dataset.examples])
0927b96666f2f409c9fb0ec3c63576632810b6dc
3,641,646
def __virtual__(): """ Only return if requests and boto are installed. """ if HAS_LIBS: return __virtualname__ else: return False
633ec9294e7585a6d5fc8a1dba2b436a20a4ab7a
3,641,647
def register(): """Register user""" # User reached route via POST (as by submitting a form via POST) if request.method == "POST": username = request.form.get("username") email = request.form.get("email") password = request.form.get("password") # Logs user into database rows = db.execute("SELECT * FROM users WHERE username = ?",username) email_check = db.execute("SELECT * FROM users WHERE email = ?",email) # Check if Username is taken or not if len(rows) != 0: flash("Username Already Taken!", "danger") return redirect("/register") # Check if Email is taken or not if len(email_check) != 0: flash("Email Already Taken!", "danger") return redirect("/register") # Create a hashed password based on sha256 hashing function and store it into database hashed_password = generate_password_hash(password, method='pbkdf2:sha256', salt_length=8) db.execute("INSERT INTO users(email, username, hash) VALUES(?, ?, ?)", email, username, hashed_password) # Reddirect user back to login page after registering flash("Register Successfully!", "success") return redirect("/login") # User reached route via GET (as by clicking a link or via redirect) else: return render_template("register.html")
1c37ad0eac8f6a2230106cfd9e3754d6053956ff
3,641,648
def _build_tmp_access_args(method, ip, ttl, port, direction, comment): """ Builds the cmd args for temporary access/deny opts. """ opt = _get_opt(method) args = "{0} {1} {2}".format(opt, ip, ttl) if port: args += " -p {0}".format(port) if direction: args += " -d {0}".format(direction) if comment: args += " #{0}".format(comment) return args
17a00e10af84519edb1a5dd8d89be614cb548ea1
3,641,650
def add_two_values(value1, value2): """ Adds two integers Arguments: value1: first integer value e.g. 10 value2: second integer value e.g. 2 """ return value1 + value2
10f71fcbde9d859f094724c94568eee55a7b989a
3,641,651
import pandas def combine_nearby_breakends(events, distance=5000): """ 1d clustering, prioritizing assembled breakpoint coords """ breakends = [] positions = get_positions(events) for (chrom, orientation), cur_events in positions.groupby(["chrom", "orientation"]): cur_events = cur_events.sort_values("pos") groups = ((cur_events["pos"]-cur_events["pos"].shift()) > distance).cumsum() for i, cur_group in cur_events.groupby(groups): if cur_group["assembled"].any(): cur_combined = cur_group.loc[cur_group["assembled"]].copy() cur_combined["assembled"] = True else: cur_orientations = cur_group["orientation"].unique() cur_combined = pandas.DataFrame({"orientation":cur_orientations}) cur_combined["chrom"] = chrom cur_combined["pos"] = int(cur_group["pos"].mean()) cur_combined["assembled"] = False breakends.append(cur_combined) return pandas.concat(breakends, ignore_index=True)
dad6867e7dfa406f8785b131fb2c93694fe60f0d
3,641,652
def get_mongo_database(connection, database_name): """ Access the database Args: connection (MongoClient): Mongo connection to the database database_name (str): database to be accessed Returns: Database: the Database object """ try: return connection.get_database(database_name) except: return None
9299cbe0b697dec2e548fb5e26e2013214007575
3,641,653
from typing import Dict from typing import Callable def make_mappings() -> Dict[str, Callable[[], None]]: """サンプル名と実行する関数のマッピングを生成します""" # noinspection PyDictCreation m = {} extlib.regist_modules(m) return m
598decb0b3197b1c64c982354de1fea9fdb3ce3d
3,641,654
def S(state): """Stringify state """ if state == State.IDLE: return "IDLE" if state == State.TAKING_OFF: return "TAKING_OFF" if state == State.HOVERING: return "HOVERING" if state == State.WAITING_ON_ASSIGNMENT: return "WAITING_ON_ASSIGNMENT" if state == State.FLYING: return "FLYING" if state == State.IN_FORMATION: return "IN_FORMATION" if state == State.GRIDLOCK: return "GRIDLOCK" if state == State.COMPLETE: return "\033[32;1mCOMPLETE\033[0m" if state == State.TERMINATE: return "\033[31;1mTERMINATE\033[0m"
58c6005dcf8549225c233cc1af486fca9578111d
3,641,655
def trace_get_watched_net(trace, i): """ trace_get_watched_net(Int_trace trace, unsigned int i) -> Int_net Parameters ---------- trace: Int_trace i: unsigned int """ return _api.trace_get_watched_net(trace, i)
f7140cbfcc27d511b3212ba7adf97f0b6c91582b
3,641,657
from typing import Optional from typing import OrderedDict def dist_batch_tasks_for_all_layer_mdl_vs_adapted_mdl( mdl: nn.Module, spt_x: Tensor, spt_y: Tensor, qry_x: Tensor, qry_y: Tensor, layer_names: list[str], inner_opt: DifferentiableOptimizer, fo: bool, nb_inner_train_steps: int, criterion: nn.Module, metric_comparison_type: str = 'pwcca', iters: int = 1, effective_neuron_type: str = 'filter', downsample_method: Optional[str] = None, downsample_size: Optional[int] = None, subsample_effective_num_data_method: Optional[str] = None, subsample_effective_num_data_param: Optional[int] = None, metric_as_sim_or_dist: str = 'dist', force_cpu: bool = False, training: bool = True, copy_initial_weights: bool = False, track_higher_grads: bool = False ) -> list[OrderedDict[LayerIdentifier, float]]: """ :param mdl: :param spt_x: not as a tuple due to having to move them to gpu potentially. :param spt_y: :param qry_x: :param qry_y: :param layer_names: :param inner_opt: :param fo: :param nb_inner_train_steps: :param criterion: :param metric_comparison_type: :param iters: :param effective_neuron_type: :param downsample_method: :param downsample_size: :param subsample_effective_num_data_method: :param subsample_effective_num_data_param: :param metric_as_sim_or_dist: :param force_cpu: :param training: :param copy_initial_weights: :param track_higher_grads: :return: """ # - [B, M, C, H, W] -> [B, L] L: int = len(layer_names) B: int = spt_x.size(0) dists_per_batch_per_layer: list[OrderedDict[LayerIdentifier, float]] = [] for t in range(B): spt_x_t, spt_y_t, qry_x_t, qry_y_t = spt_x[t], spt_y[t], qry_x[t], qry_y[t] # adapted_mdl: FuncModel = get_maml_adapted_model_with_higher_one_task(mdl, inner_opt, spt_x_t, spt_y_t, training, copy_initial_weights, track_higher_grads, fo, nb_inner_train_steps, criterion) # - [M, C, H, W], [L] -> [L] X: Tensor = qry_x_t dists_per_layer: OrderedDict[LayerIdentifier, float] = dist_data_set_per_layer(mdl1=mdl, mdl2=adapted_mdl, X1=X, X2=X, layer_names1=layer_names, layer_names2=layer_names, metric_comparison_type=metric_comparison_type, iters=iters, effective_neuron_type=effective_neuron_type, downsample_method=downsample_method, downsample_size=downsample_size, subsample_effective_num_data_method=subsample_effective_num_data_method, subsample_effective_num_data_param=subsample_effective_num_data_param, metric_as_sim_or_dist=metric_as_sim_or_dist, force_cpu=force_cpu ) assert len(dists_per_layer) == L # - appending to [B, L] dists_per_batch_per_layer.append(dists_per_layer) # # del adapted_mdl # gc.collect() assert len(dists_per_batch_per_layer) == B # Invariant due to asserts: [B, L] list # - [B, L] distances ready! return dists_per_batch_per_layer
72830d75e195b8363936d78a8c249b9f6bbd7125
3,641,658
from typing import Callable from typing import List import numbers def adjust_payload(tree: FilterableIntervalTree, a_node: FilterableIntervalTreeNode, adjustment_interval: Interval, adjustments: dict, filter_vector_generator: Callable[[dict], int]=None)\ -> List[FilterableIntervalTreeNode]: """ Adjusts the payload of a node int its tree :param tree: tee to be adjusted :param a_node: node to adjust :param adjustment_interval: the interval for which we would like to see the adjustments made :param adjustments: the changes that we want to see made to the node's payload (only works for dictionaries) :param filter_vector_generator: a function that returns a filter vector for each payload :return: None """ if filter_vector_generator is None: filter_vector_generator = lambda x: a_node.filter_vector old_interval = a_node.key remaining_intervals = old_interval.remove(adjustment_interval) new_payload = a_node.payload.copy() relevant_keys = adjustments.keys() for key in relevant_keys: old_property_value = new_payload.get(key) if isinstance(old_property_value, numbers.Number): new_payload[key] += adjustments[key] else: new_payload[key] = adjustments[key] filter_vector = filter_vector_generator(new_payload) remaining_nodes = \ [FilterableIntervalTreeNode(_, a_node.payload.copy(), a_node.filter_vector) for _ in remaining_intervals] new_node = FilterableIntervalTreeNode(adjustment_interval, new_payload, filter_vector) result_list = [new_node] + remaining_nodes result_list = sorted(result_list, key=lambda node: node.key) added_nodes = set() first_item = result_list[0] last_item = result_list[-1] first_payload = first_item.payload last_payload = last_item.payload pre_node = get_predecessor_for_node(tree, a_node, qualifier=lambda x: x == first_payload) post_node = get_successor_for_node(tree, a_node, qualifier=lambda x: x == last_payload) delete_node(tree, a_node) if pre_node and Interval.touches(pre_node.key, first_item.key) and pre_node.payload == first_item.payload: consolidate_nodes(pre_node, first_item, tree) added_nodes.add(first_item) if post_node and Interval.touches(post_node.key, last_item.key) and post_node.payload == last_item.payload: consolidate_nodes(last_item, post_node, tree) added_nodes.add(last_item) for node in result_list: if node not in added_nodes: add_node(tree, node) return new_node
fa93deede3e7fee950834e5e02bc79bb98e68f03
3,641,659
def get_max(data, **kwargs): """ Assuming the dataset is loaded as type `np.array`, and has shape (num_samples, num_features). :param data: Provided dataset, assume each row is a data sample and \ each column is one feature. :type `np.ndarray` :param kwargs: Dictionary of differential privacy arguments \ for computing the maximum value of each feature across all samples, \ e.g., epsilon and delta, etc. :type kwargs: `dict` :return: A vector of shape (1, num_features) stores the maximum value \ of each feature across all samples. :rtype: `np.array` of `float` """ try: max_vec = np.max(data, axis=0) except Exception as ex: raise FLException('Error occurred when calculating ' 'the maximum value. ' + str(ex)) return max_vec
03697d2a2bc6afe3c1d576bd9f8766c97e86626d
3,641,661
def find_u_from_v(matrix, v, singular_value): """ Finds the u column vector of the U matrix in the SVD UΣV^T. Parameters ---------- matrix : numpy.ndarray Matrix for which the SVD is calculated v : numpy.ndarray A column vector of V matrix, it is the eigenvector of the Gramian of `matrix`. singular_value : float A singular value of `matrix` corresponding to the `v` vector. Returns ------- numpy.ndarray u column vector of the U matrix in the SVD. """ return matrix @ v / singular_value
ef2871c86bf7ddc4c42446a54230068282ad85df
3,641,662
import torch def transform(dataset, perm_idx, model, view): """ for view1 utterance, simply encode using view1 encoder for view 2 utterances: - encode each utterance, using view 1 encoder, to get utterance embeddings - take average of utterance embeddings to form view 2 embedding """ model.eval() latent_zs, golds = [], [] n_batch = (len(perm_idx) + BATCH_SIZE - 1) // BATCH_SIZE for i in range(n_batch): indices = perm_idx[i*BATCH_SIZE:(i+1)*BATCH_SIZE] v1_batch, v2_batch = list(zip(*[dataset[idx][0] for idx in indices])) golds += [dataset[idx][1] for idx in indices] if view == 'v1': latent_z = model(v1_batch, encoder='v1') elif view == 'v2': latent_z_l = [model(conv, encoder='v1').mean(dim=0) for conv in v2_batch] latent_z = torch.stack(latent_z_l) latent_zs.append(latent_z.cpu().data.numpy()) latent_zs = np.concatenate(latent_zs) return latent_zs, golds
484adb7d53f80366b591ef45551b245dce00acca
3,641,663
from typing import List def double(items: List[str]) -> List[str]: """ Returns a new list that is the input list, repeated twice. """ return items + items
9e4b6b9e84a80a9f5cbd512ca820274bb8cad924
3,641,664
def system_from_problem(problem: Problem) -> System: """Extracts the "system" part of a problem. Args: problem: Problem description Returns: A :class:`System` object containing a copy of the relevant parts of the problem. """ return System( id=problem.id, name=problem.name, apps=tuple(w.app for w in problem.workloads), instance_classes=problem.instance_classes, performances=problem.performances, )
42c0db09d00043ba61ae164bb58a0ecb48599027
3,641,665
def get_service_endpoints(ksc, service_type, region_name): """Get endpoints for a given service type from the Keystone catalog. :param ksc: An instance of a Keystone client. :type ksc: :class: `keystoneclient.v3.client.Client` :param str service_type: An endpoint service type to use. :param str region_name: A name of the region to retrieve endpoints for. :raises :class: `keystone_exceptions.EndpointNotFound` """ try: catalog = { endpoint_type: ksc.service_catalog.url_for( service_type=service_type, endpoint_type=endpoint_type, region_name=region_name) for endpoint_type in ['publicURL', 'internalURL', 'adminURL']} except keystone_exceptions.EndpointNotFound: # EndpointNotFound is raised for the case where a service does not # exist as well as for the case where the service exists but not # endpoints. log.error('could not retrieve any {} endpoints'.format(service_type)) raise return catalog
c962ad44e4d73a102f9c09803f94c68cee2aeb51
3,641,666
def get_task_for_node(node_id): """ Get a new task or previously assigned task for node """ # get ACTIVE task that was previously assigned to this node query = Task.query.filter_by(node_id=node_id).filter_by(status=TaskStatus.ACTIVE) task = query.first() if task: return task node = Node.query.filter_by(id=node_id).one() return _assign_task(node)
5a01869f40f5c0840dfdc2ed1e3417c694f51aca
3,641,667
def cik_list(): """Get CIK list and use it as a fixture.""" return UsStockList()
ec845471860dcf4ce9dcf0e82e2effda21bcbf0b
3,641,670
def get_eval_config(hidden_dim, max_input_length=None, num_input_timesteps=None, model_temporal_relations=True, node_position_dim=1, num_input_propagation_steps=None, token_vocab_size=None, node_text_pad_token_id=None, num_transformer_attention_heads=None, num_edge_types=None, num_time_edge_types=None, use_relational_bias=False, max_output_length=None, type_vocab_size=None, output_vocab_size=None, num_output_propagation_steps=None, use_pointer_candidate_masking=False, jax2tf_compatible=None, dropout_rate: float = 0.1): """Returns a model config for evaluating, which disables drop-out.""" return create_model_config( is_training=False, hidden_dim=hidden_dim, max_input_length=max_input_length, num_input_timesteps=num_input_timesteps, model_temporal_relations=model_temporal_relations, node_position_dim=node_position_dim, num_input_propagation_steps=num_input_propagation_steps, token_vocab_size=token_vocab_size, node_text_pad_token_id=node_text_pad_token_id, dropout_rate=dropout_rate, num_transformer_attention_heads=num_transformer_attention_heads, num_edge_types=num_edge_types, num_time_edge_types=num_time_edge_types, use_relational_bias=use_relational_bias, max_output_length=max_output_length, type_vocab_size=type_vocab_size, output_vocab_size=output_vocab_size, num_output_propagation_steps=num_output_propagation_steps, use_pointer_candidate_masking=use_pointer_candidate_masking, jax2tf_compatible=jax2tf_compatible)
90ff743a372a2db3eb52927bf8c6d996a11137cb
3,641,671
def classNew(u_id): """ Allow an ADMIN to create a new class (ADMIN ONLY) Returns: none """ myDb, myCursor = dbConnect() data = request.get_json() createNewClass(myCursor, myDb, data) dbDisconnect(myCursor, myDb) return dumps({})
29532ea5c979b725b46c1dd775c1f093006b1a43
3,641,672