content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import json def deliver_hybrid(): """ Endpoint for submissions intended for dap and legacy systems. POST request requires the submission JSON to be uploaded as "submission", the zipped transformed artifact as "transformed", and the filename passed in the query parameters. """ logger.info('Processing Hybrid submission') filename = request.args.get("filename") meta = MetaWrapper(filename) files = request.files submission_bytes = files[SUBMISSION_FILE].read() survey_dict = json.loads(submission_bytes.decode()) data_bytes = files[TRANSFORMED_FILE].read() meta.set_legacy(survey_dict, data_bytes) return process(meta, data_bytes)
87bb05f376c1791668bd5e160cc5940377363f64
3,644,125
def midi_to_chroma(pitch): """Given a midi pitch (e.g. 60 == C), returns its corresponding chroma class value. A == 0, A# == 1, ..., G# == 11 """ return ((pitch % 12) + 3) % 12
25ef72f78269c3f494ca7431f1291891ddea594a
3,644,127
import re def _snippet_items(snippet): """Return all markdown items in the snippet text. For this we expect it the snippet to contain *nothing* but a markdown list. We do not support "indented" list style, only one item per linebreak. Raises SyntaxError if snippet not in proper format (e.g. contains anything other than a markdown list). """ unformatted = snippet.text and snippet.text.strip() # treat null text value as empty list if not unformatted: return [] # parse out all markdown list items items = re.findall(r'^[-*+] +(.*)$', unformatted, re.MULTILINE) # if there were any lines that didn't yield an item, assume there was # something we didn't parse. since we never want to lose existing data # for a user, this is an error condition. if len(items) < len(unformatted.splitlines()): raise SyntaxError('unparsed lines in user snippet: %s' % unformatted) return items
bdeb5b5c5e97ef3a8082b7131d46990de02a59af
3,644,128
def get_collection(*args, **kwargs): """ Returns event collection schema :param event_collection: string, the event collection from which schema is to be returned, if left blank will return schema for all collections """ _initialize_client_from_environment() return _client.get_collection(*args, **kwargs)
95698a5c750b2d40caad0f0ddfe9e17a8354be03
3,644,129
def get_tf_generator(data_source: extr.PymiaDatasource): """Returns a generator that wraps :class:`.PymiaDatasource` for the tensorflow data handling. The returned generator can be used with `tf.data.Dataset.from_generator <https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_generator>`_ in order to build a tensorflow dataset`_. Args: data_source (.PymiaDatasource): the datasource to be wrapped. Returns: generator: Function that loops over the entire datasource and yields all entries. """ def generator(): for i in range(len(data_source)): yield data_source[i] return generator
2b786b111c2e2b17c3ee2887f93aff02de63f369
3,644,130
def is_mechanical_ventilation_heat_recovery_active(bpr, tsd, t): """ Control of activity of heat exchanger of mechanical ventilation system Author: Gabriel Happle Date: APR 2017 :param bpr: Building Properties :type bpr: BuildingPropertiesRow :param tsd: Time series data of building :type tsd: dict :param t: time step / hour of the year :type t: int :return: Heat exchanger ON/OFF status :rtype: bool """ if is_mechanical_ventilation_active(bpr, tsd, t)\ and has_mechanical_ventilation_heat_recovery(bpr)\ and control_heating_cooling_systems.is_heating_season(t, bpr): # heat recovery is always active if mechanical ventilation is active (no intelligent by pass) # this is the usual system configuration according to Clayton Miller return True elif is_mechanical_ventilation_active(bpr, tsd, t)\ and has_mechanical_ventilation_heat_recovery(bpr)\ and control_heating_cooling_systems.is_cooling_season(t, bpr)\ and tsd['T_int'][t-1] < tsd['T_ext'][t]: return True elif is_mechanical_ventilation_active(bpr, tsd, t) \ and control_heating_cooling_systems.is_cooling_season(t, bpr) \ and tsd['T_int'][t-1] >= tsd['T_ext'][t]: # heat recovery is deactivated in the cooling case, # if outdoor air conditions are colder than indoor (free cooling) return False else: return False
626e24da9f0676be27e15a4422676034a94e1702
3,644,131
import aiohttp async def fetch_user(user_id): """ Asynchronous function which performs an API call to retrieve a user from their ID """ session = aiohttp.ClientSession() res = await session.get(url=str(f'{MAIN_URL}/api/user/{user_id}'), headers=headers) await session.close() # Reminder : 2XX is a success # If unsuccessful we return the error message if res.status != 200: return res.content # However, if successful return the json data that was returned and transform it into its python equivalent return await res.json()
725c4f7f89efc242948799c48541a25a2bd17d8c
3,644,132
from typing import List import requests from bs4 import BeautifulSoup def category(category: str) -> List[str]: """Get list of emojis in the given category""" emoji_url = f"https://emojipedia.org/{category}" page = requests.get(emoji_url) soup = BeautifulSoup(page.content, 'lxml') symbols: List[str] try: ul = soup.find('ul', class_="emoji-list") spans = ul.find_all('span', class_='emoji') symbols = [span.get_text() for span in spans] except: symbols = list() return symbols
61eaff867e9d9c75582f31435a6c22f3b92fd85a
3,644,133
from typing import Optional def calc_cumulative_bin_metrics( labels: np.ndarray, probability_predictions: np.ndarray, number_bins: int = 10, decimal_points: Optional[int] = 4) -> pd.DataFrame: """Calculates performance metrics for cumulative bins of the predictions. Args: labels: An array of true binary labels represented by 1.0 and 0.0. probability_predictions: An array of predicted probabilities between 0.0 and 1.0. number_bins: Number of cumulative bins that we want to divide the ranked predictions into. Default is 10 bins such that the 1st bin contains the highest 10% of the predictions, 2nd bin contains the highest 20% of the predictions and so on. decimal_points: Number of decimal points to use when outputting the calculated performance metrics. Returns: bin_metrics: Following metrics calculated for each cumulative bin. cumulative_bin_number: Bin number starting from 1. bin_size: Total numbers of instances in the bin, bin_size_proportion: Proportion of instances in the bin out of all the instances in the labels. positive_instances: Numbers of positive instances in the bin, precision: Proportion of positive instances out of all the instances in the bin, coverage (recall): Proportion of positives instances in the bin out of all the positive instances in the labels, prop_label_positives: Proportion of positive instances in the labels, precision_uplift: Uplift of precision of the bin compared to the precision of the random prediction (prop_label_positives). """ utils.assert_label_values_are_valid(labels) utils.assert_prediction_values_are_valid(probability_predictions) utils.assert_label_and_prediction_length_match(labels, probability_predictions) # Separate the probability_predictions into bins. label_predictions = pd.DataFrame( list(zip(labels, probability_predictions)), columns=['label', 'prediction']) label_predictions = label_predictions.sort_values( by='prediction', ascending=False) number_total_instances = label_predictions.shape[0] equal_bin_size = number_total_instances / number_bins number_total_positive_instances = label_predictions[ label_predictions['label'] > 0].shape[0] prop_label_positives = round( number_total_positive_instances / number_total_instances, decimal_points) cumulative_bin_metrics_list = list() for i in range(1, (number_bins + 1)): current_bin_size = round(equal_bin_size * i) bin_size_proportion = round(current_bin_size / number_total_instances, decimal_points) bin_instances = label_predictions.head(current_bin_size) number_bin_positive_instances = bin_instances[ bin_instances['label'] > 0].shape[0] bin_precision = round(number_bin_positive_instances / current_bin_size, decimal_points) bin_recall = round( number_bin_positive_instances / number_total_positive_instances, decimal_points) bin_precision_uplift = round(bin_precision / prop_label_positives, decimal_points) cumulative_bin_metrics_list.append( (i, current_bin_size, bin_size_proportion, number_bin_positive_instances, bin_precision, bin_recall, prop_label_positives, bin_precision_uplift)) return pd.DataFrame( cumulative_bin_metrics_list, columns=[ 'cumulative_bin_number', 'bin_size', 'bin_size_proportion', 'positive_instances', 'precision', 'coverage (recall)', 'prop_label_positives', 'precision_uplift' ])
c3574c8e74d5c6fd649ea4258b9a8518811210f6
3,644,134
def rootbeta_cdf(x, alpha, beta_, a, b, bounds=(), root=2.): """ Calculates the cumulative density function of the log-beta distribution, i.e.:: F(z; a, b) = I_z(a, b) where ``z=(ln(x)-ln(a))/(ln(b)-ln(a))`` and ``I_z(a, b)`` is the regularized incomplete beta function. Parameters ---------- x : float or array_like, shape (n,) Realization. alpha : float Shape parameter 1. beta_ : float Shape parameter 2. a : float Minimum. b : float Maximum. bounds : tuple Tuple of minimum and maximum attainable realizations root : float Root. Returns ------- p : float or array_like, shape (n,) Probability. """ _chk_root_mmm_inp(a, b) if not bounds: bounds = (a, b) _chk_beta_inp(alpha, beta_) _chk_dist_inp(x, bounds) return beta_cdf(sqrt(x, root), alpha, beta_, sqrt(a, root), sqrt(b, root))
e0b951c177f288bc89536494485904e1839af7de
3,644,135
def get_scores(treatment, outcome, prediction, p, scoring_range=(0,1), plot_type='all'): """Calculate AUC scoring metrics. Parameters ---------- treatment : array-like outcome : array-like prediction : array-like p : array-like Treatment policy (probability of treatment for each row). scoring_range : 2-tuple Fractional range over which frost score is calculated. First element must be less than second, and both must be less than 1. Returns ------- scores : dict A dictionary containing the following values. Each is also appended with `_cgains` and `_aqini` for the corresponding values for the cumulative gains curve and adjusted qini curve, respectively. q1: Traditional Q score normalized by the theoretical maximal qini. Note the theoretical max here goes up with a slope of 2. q2: Traditional Q score normalized by the practical maximal qini. This curve increases with a slope of 1. Q: Area between qini curve and random selection line. This is named after the notation in Radcliffe & Surry 2011, but note that they normalize their curves differently. Q_max: Maximal possible qini score, which is used for normalization of qini to get frost score. Only obtainable by overfitting. Q_practical_max: Practical maximal qini score, if you are not overfitting. This assumes that all (outcome, treatment) = (1,1) were persuadables, but that there are also an equal number of persuadables in the control group. This is the best possible scenario, but likely assumes too few "sure things". overall_lift: The lift expected from random application of treatment. """ treatment = _ensure_array(treatment) outcome = _ensure_array(outcome) prediction = _ensure_array(prediction) p = _ensure_array(p) Nt1o1, Nt0o1, Nt1o0, Nt0o0 = _get_counts(treatment, outcome, p) Nt1, Nt0, N = _get_tc_counts(Nt1o1, Nt0o1, Nt1o0, Nt0o0) def riemann(x, y): avgy = [(a+b)/2 for (a,b) in zip(y[:-1], y[1:])] dx = [b-a for (a,b) in zip(x[:-1], x[1:])] return sum([a*b for (a,b) in zip(dx, avgy)]) qini_riemann = riemann(*_maximal_qini_curve(_get_overfit_counts, Nt1o1, Nt0o1, Nt1o0, Nt0o0)) practical_qini_riemann = riemann(*_maximal_qini_curve(_get_no_sure_thing_counts, Nt1o1, Nt0o1, Nt1o0, Nt0o0)) overall_lift = (Nt1o1/Nt1-Nt0o1/Nt0) qini_max = qini_riemann - 0.5*overall_lift practical_qini_max = practical_qini_riemann - 0.5*overall_lift # The predicted Qini curve. # First we need to reorder the y values and y_pred based on this reordering # We calculate TOT roughly here so we have a way of distinguishing those that (ordered, treated) and those that (ordered, untreated). y = (2*treatment - 1)*outcome def sortbyprediction(vec): list2 = list(zip(prediction,vec)) # Sort by prediction. list2.sort(key=lambda tup: tup[0], reverse=True) # included the tup[0] because otherwise we run into problems when there are only a few predicted values -- it orders by index i instead -- not what we want! # Extract `y`, sorted by prediction. _, vec_ordered = zip(*list2) return vec_ordered y_ordered = sortbyprediction(y) tr_ordered = sortbyprediction(treatment) p_ordered = sortbyprediction(p) def auc(method='qini'): # Calculate the area. uplift_last = 0 nt1o1 = 0 nt0o1 = 0 nt1 = EPS nt0 = EPS pred_riemann = 0 uplifts = [] for i in range(round(scoring_range[0]*len(treatment)), round(scoring_range[1]*len(treatment))): if y_ordered[i] > 0: nt1o1 += 0.5*(1/p_ordered[i]) elif y_ordered[i] < 0: nt0o1 += 0.5*(1/(1-p_ordered[i])) if tr_ordered[i] == 1: nt1 += 0.5*(1/p_ordered[i]) else: nt0 += 0.5*(1/(1-p_ordered[i])) if method=='qini': uplift_next = nt1o1/Nt1-nt0o1/Nt0 elif method=='cgains': uplift_next = (nt1o1/nt1-nt0o1/nt0)*(nt1+nt0)/N elif method=='aqini': uplift_next = nt1o1/Nt1-nt0o1*nt1/(nt0*Nt1 + EPS) uplifts.append(uplift_next) # each point corresponds to an x delta of 1/N pred_riemann += 1/2*(uplift_next+uplift_last)/N uplift_last = uplift_next AUC = pred_riemann - 0.5*overall_lift*(scoring_range[1]**2 - scoring_range[0]**2) maxgain = np.amax(uplifts) return AUC, maxgain # Dictionary to store all scores. scores = {} # Raw max scores. scores['Q_max'] = qini_max scores['overall_lift'] = overall_lift scores['Q_practical_max'] = practical_qini_max if (plot_type=='qini') or (plot_type=='all'): # Qini curve scores. scores['Q_qini'], scores['max_qini'] = auc(method='qini') scores['q1_qini'] = scores['Q_qini']/scores['Q_max'] scores['q2_qini'] = scores['Q_qini']/scores['Q_practical_max'] if (plot_type=='cgains') or (plot_type=='all'): # Scores for cumulative gains curve. scores['Q_cgains'], scores['max_cgains'] = auc(method='cgains') scores['q1_cgains'] = scores['Q_cgains']/scores['Q_max'] scores['q2_cgains'] = scores['Q_cgains']/scores['Q_practical_max'] if (plot_type=='aqini') or (plot_type=='all'): # Scores for adjusted qini curve. scores['Q_aqini'], scores['max_aqini'] = auc(method='aqini') scores['q1_aqini'] = scores['Q_aqini']/scores['Q_max'] scores['q2_aqini'] = scores['Q_aqini']/scores['Q_practical_max'] return scores
c59cc98e08cfff6b01eff5c3ff4f74973ababf34
3,644,136
def get_arima_nemo_pipeline(): """ Function return complex pipeline with the following structure arima \ linear nemo | """ node_arima = PrimaryNode('arima') node_nemo = PrimaryNode('exog_ts') node_final = SecondaryNode('linear', nodes_from=[node_arima, node_nemo]) pipeline = Pipeline(node_final) return pipeline
1ae171d29624ecc615f213f343c4a88c733d3554
3,644,137
from typing import Counter import math def conditional_entropy(x, y, nan_strategy=REPLACE, nan_replace_value=DEFAULT_REPLACE_VALUE): """ Calculates the conditional entropy of x given y: S(x|y) Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy **Returns:** float Parameters ---------- x : list / NumPy ndarray / Pandas Series A sequence of measurements y : list / NumPy ndarray / Pandas Series A sequence of measurements nan_strategy : string, default = 'replace' How to handle missing values: can be either 'drop' to remove samples with missing values, or 'replace' to replace all missing values with the nan_replace_value. Missing values are None and np.nan. nan_replace_value : any, default = 0.0 The value used to replace missing values with. Only applicable when nan_strategy is set to 'replace'. """ if nan_strategy == REPLACE: x, y = replace_nan_with_value(x, y, nan_replace_value) elif nan_strategy == DROP: x, y = remove_incomplete_samples(x, y) y_counter = Counter(y) xy_counter = Counter(list(zip(x, y))) total_occurrences = sum(y_counter.values()) entropy = 0.0 for xy in xy_counter.keys(): p_xy = xy_counter[xy] / total_occurrences p_y = y_counter[xy[1]] / total_occurrences entropy += p_xy * math.log(p_y / p_xy) return entropy
c0a9c943efdd4da1ad2f248ef7eaa2e4b1b7be06
3,644,138
def peaks_in_time(dat, troughs=False): """Find indices of peaks or troughs in data. Parameters ---------- dat : ndarray (dtype='float') vector with the data troughs : bool if True, will return indices of troughs instead of peaks Returns ------- nadarray of int indices of peaks (or troughs) in dat Note ---- This function does not deal well with flat signal; when the signal is not increasing, it is assumed to be descreasing. As a result, this function finds troughs where the signal begins to increase after either decreasing or remaining constant """ diff_dat = diff(dat) increasing = zeros(len(diff_dat)) increasing[diff_dat > 0] = 1 # mask for all points where dat is increasing flipping = diff(increasing) # peaks are -1, troughs are 1, the rest is zero target = -1 if not troughs else 1 return where(flipping == target)[0] + 1
acafee26ac6bc236aa68f48fbea5953020faa471
3,644,139
def read_submod_def(line): """Attempt to read SUBMODULE definition line""" submod_match = SUBMOD_REGEX.match(line) if submod_match is None: return None else: parent_name = None name = None trailing_line = line[submod_match.end(0):].split('!')[0] trailing_line = trailing_line.strip() parent_match = WORD_REGEX.match(trailing_line) if parent_match is not None: parent_name = parent_match.group(0).lower() if len(trailing_line) > parent_match.end(0)+1: trailing_line = trailing_line[parent_match.end(0)+1:].strip() else: trailing_line = '' # name_match = WORD_REGEX.match(trailing_line) if name_match is not None: name = name_match.group(0).lower() return 'smod', SMOD_info(name, parent_name)
27ed8d88fdb8fd112b072f50dba00bad783eb9f3
3,644,140
def predict(model, images, labels=None): """Predict. Parameters ---------- model : tf.keras.Model Model used to predict labels. images : List(np.ndarray) Images to classify. labels : List(str) Labels to return. """ if type(images) == list: images = tf.stack(images) predictions = model(images) predictions = tf.math.argmax(predictions, axis=1) if labels is not None: predictions = [labels[pred] for pred in predictions] return predictions
a6c2261e7fea262fb1372f870ba3096a9faf2a68
3,644,141
import codecs import re def process_span_file(doc, filename): """Reads event annotation from filename, and add to doc :type filename: str :type doc: nlplingo.text.text_theory.Document <Event type="CloseAccount"> CloseAccount 0 230 anchor 181 187 CloseAccount/Source 165 170 CloseAccount/Source 171 175 CloseAccount/Source 176 180 CloseAccount/Target 191 198 CloseAccount/Target 207 214 CloseAccount/Target 215 229 </Event> """ lines = [] """:type: list[str]""" with codecs.open(filename, 'r', encoding='utf-8') as f: for line in f: lines.append(line.strip()) i = 0 while i < len(lines): line = lines[i] if line.startswith('<Event type='): event_type = re.search(r' type="(.*?)"', line).group(1) event_id = '{}.e-{}'.format(doc.docid, len(doc.events)) event = Event(event_id, event_type) i += 1 line = lines[i] while not line.startswith('</Event>'): tokens = line.split() info = tokens[0] offset = IntPair(int(tokens[1]), int(tokens[2])) if info == event_type or info == 'anchor' or '/' in info: text = doc.get_text(offset.first, offset.second) if text is None or text == '': logger.warning('skipping annotation span {} {}-{} (doc length: {}, #sentences:{})'.format(doc.docid, offset.first, offset.second, doc.text_length(), len(doc.sentences))) else: # sometimes, the UI captures an extra trailing space. Check for that and adjust ending offset if text[-1] == ' ': text = text[0:-1] offset.second = offset.second - 1 if info == event_type: # this is an event span id = '{}.s-{}'.format(event_id, len(event.event_spans)) event.add_event_span(EventSpan(id, offset, text, event_type)) elif info == 'anchor': # anchor span id = '{}.t-{}'.format(event_id, len(event.anchors)) #print('Spannotator, adding ANCHOR with text "{}"'.format(text)) newtext, newoffset = remove_trailing_periods(text, offset) if text != newtext: print('- revising anchor, text=[%s] offset=(%d,%d) newtext=[%s] newoffset=(%d,%d)' % (text, offset.first, offset.second, newtext, newoffset.first, newoffset.second)) event.add_anchor(Anchor(id, newoffset, newtext, event_type)) elif '/' in info: # argument span em_id = 'm-{}-{}'.format(offset.first, offset.second) newtext, newoffset = remove_trailing_periods(text, offset) if text != newtext: print('- revising argument, text=[%s] offset=(%d,%d) newtext=[%s] newoffset=(%d,%d)' % (text, offset.first, offset.second, newtext, newoffset.first, newoffset.second)) em = EntityMention(em_id, newoffset, newtext, 'dummy') # we just use a dummy em first, for creating the EventArgument (notice that this em is not added to the doc) # later, when we annotate sentence, we will find an actual EntityMention that is backed by tokens # and use that to back the EventArgument # Ref: text_theory.annotate_sentence_with_events() arg_role = info[info.index('/') + 1:] arg_id = '{}.t-{}'.format(event_id, len(event.arguments)) event.add_argument(EventArgument(arg_id, em, arg_role)) i += 1 line = lines[i] doc.add_event(event) i += 1 return doc
e2ae8f32947a6c99dfba69b0da06adcfffa3fc3c
3,644,142
from typing import Tuple def mask_frame_around_position( frame: np.ndarray, position: Tuple[float, float], radius: float = 5, ) -> np.ndarray: """ Create a circular mask with the given ``radius`` at the given position and set the frame outside this mask to zero. This is sometimes required for the ``Gaussian2D``-based photometry methods to prevent the Gaussian to try and fit some part of the data that is far from the target ``position``. Args: frame: A 2D numpy array of shape `(x_size, y_size)` containing the data on which to run the aperture photometry. position: A tuple `(x, y)` specifying the position at which to estimate the flux. The position should be in astropy / photutils coordinates. radius: The radius of the mask; this should approximately match the size of a planet signal. Returns: A masked version of the given ``frame`` on which we can perform photometry based on fitting a 2D Gaussian to the data. """ # Define shortcuts frame_size = (frame.shape[0], frame.shape[1]) masked_frame = np.array(np.copy(frame)) # Get circle mask; flip the position because numpy convention circle_mask = get_circle_mask( mask_size=frame_size, radius=radius, center=position[::-1] ) # Apply the mask masked_frame[~circle_mask] = 0 return masked_frame
cf616a0193cf9150821ed00c8e20c61a88b64d9e
3,644,143
import numpy as np def apogeeid_digit(arr): """ NAME: apogeeid_digit PURPOSE: Extract digits from apogeeid because its too painful to deal with APOGEE ID in h5py INPUT: arr (ndarray): apogee_id OUTPUT: apogee_id with digits only (ndarray) HISTORY: 2017-Oct-26 - Written - Henry Leung (University of Toronto) """ if isinstance(arr, np.ndarray) or isinstance(arr, list): arr_copy = np.array(arr) # make a copy for i in range(arr_copy.shape[0]): arr_copy[i] = str(''.join(filter(str.isdigit, arr_copy[i]))) return arr_copy else: return str(''.join(filter(str.isdigit, arr)))
48e21ab69c9f733dbf7b612994bfed35b8980424
3,644,144
def transform_user_weekly_artist_chart(chart): """Converts lastfm api weekly artist chart data into neo4j friendly weekly artist chart data Args: chart (dict): lastfm api weekly artist chart Returns: list - neo4j friendly artist data """ chart = chart['weeklyartistchart'] artists = [] for artist in chart['artist']: artists.append(transform_artist(artist)) return artists
1034211f6c21774044d767aeb7861b6aa80b4023
3,644,145
def plotter(fdict): """ Go """ pgconn = get_dbconn('isuag') ctx = get_autoplot_context(fdict, get_description()) threshold = 50 threshold_c = temperature(threshold, 'F').value('C') hours1 = ctx['hours1'] hours2 = ctx['hours2'] station = ctx['station'] oldstation = XREF[station] df = read_sql(""" with obs as ( select valid, c300, lag(c300) OVER (ORDER by valid ASC) from hourly where station = %s), agg1 as ( select valid, case when c300 > %s and lag < %s then 1 when c300 < %s and lag > %s then -1 else 0 end as t from obs), agg2 as ( SELECT valid, t from agg1 where t != 0), agg3 as ( select valid, lead(valid) OVER (ORDER by valid ASC), t from agg2), agg4 as ( select extract(year from valid) as yr, valid, lead, rank() OVER (PARTITION by extract(year from valid) ORDER by valid ASC) from agg3 where t = 1 and (lead - valid) >= '%s hours'::interval), agg5 as ( select extract(year from valid) as yr, valid, lead from agg3 where t = -1) select f.yr, f.valid as fup, f.lead as flead, d.valid as dup, d.lead as dlead from agg4 f JOIN agg5 d ON (f.yr = d.yr) where f.rank = 1 and d.valid > f.valid ORDER by fup ASC """, pgconn, params=(oldstation, threshold, threshold, threshold, threshold, hours1), index_col=None) if df.empty: raise NoDataFound("No Data Found") df2 = read_sql(""" with obs as ( select valid, tsoil_c_avg, lag(tsoil_c_avg) OVER (ORDER by valid ASC) from sm_hourly where station = %s), agg1 as ( select valid, case when tsoil_c_avg > %s and lag < %s then 1 when tsoil_c_avg < %s and lag > %s then -1 else 0 end as t from obs), agg2 as ( SELECT valid, t from agg1 where t != 0), agg3 as ( select valid, lead(valid) OVER (ORDER by valid ASC), t from agg2), agg4 as ( select extract(year from valid) as yr, valid, lead, rank() OVER (PARTITION by extract(year from valid) ORDER by valid ASC) from agg3 where t = 1 and (lead - valid) >= '%s hours'::interval), agg5 as ( select extract(year from valid) as yr, valid, lead from agg3 where t = -1) select f.yr, f.valid as fup, f.lead as flead, d.valid as dup, d.lead as dlead from agg4 f JOIN agg5 d ON (f.yr = d.yr) where f.rank = 1 and d.valid > f.valid ORDER by fup ASC """, pgconn, params=(station, threshold_c, threshold_c, threshold_c, threshold_c, hours1), index_col=None) if df2.empty: raise NoDataFound("No Data Found") (fig, ax) = plt.subplots(1, 1, figsize=(8, 6)) d2000 = utc(2000, 1, 1, 6) for d in [df, df2]: for _, row in d.iterrows(): if row['dlead'] is None: continue f0 = (row['fup'].replace(year=2000) - d2000).total_seconds() f1 = (row['flead'].replace(year=2000) - d2000).total_seconds() d0 = (row['dup'].replace(year=2000) - d2000).total_seconds() d1 = (row['dlead'].replace(year=2000) - d2000).total_seconds() if d1 < d0: continue ax.barh(row['fup'].year, (f1-f0), left=f0, facecolor='r', align='center', edgecolor='r') color = 'lightblue' if (d1 - d0) < (hours2 * 3600) else 'b' ax.barh(row['fup'].year, (d1-d0), left=d0, facecolor=color, align='center', edgecolor=color) xticks = [] xticklabels = [] for i in range(1, 13): d2 = d2000.replace(month=i) xticks.append((d2 - d2000).total_seconds()) xticklabels.append(d2.strftime("%-d %b")) ax.set_xticks(xticks) ax.set_xticklabels(xticklabels) ax.set_xlim(xticks[2], xticks[6]) ax.grid(True) nt = NetworkTable("ISUSM") nt2 = NetworkTable("ISUAG", only_online=False) ab = nt.sts[station]['archive_begin'] if ab is None: raise NoDataFound("Unknown station metadata.") ax.set_title(("[%s] %s 4 Inch Soil Temps\n[%s] %s used for pre-%s dates" ) % (station, nt.sts[station]['name'], oldstation, nt2.sts[oldstation]['name'], ab.year)) ax.set_ylim(df['yr'].min() - 1, df2['yr'].max() + 1) p0 = plt.Rectangle((0, 0), 1, 1, fc="r") p1 = plt.Rectangle((0, 0), 1, 1, fc="lightblue") p2 = plt.Rectangle((0, 0), 1, 1, fc="b") ax.legend((p0, p1, p2), ( 'First Period Above %s for %s+ Hours' % (threshold, hours1), 'Below %s for 1+ Hours' % (threshold, ), 'Below %s for %s+ Hours' % (threshold, hours2)), ncol=2, fontsize=11, loc=(0., -0.2)) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) return fig, df
f8a412065700ab111f5bf846938721aa397803b3
3,644,146
def config_namespace(config_file=None, auto_find=False, verify=True, **cfg_options): """ Return configuration options as a Namespace. .. code:: python reusables.config_namespace(os.path.join("test", "data", "test_config.ini")) # <Namespace: {'General': {'example': 'A regul...> :param config_file: path or paths to the files location :param auto_find: look for a config type file at this location or below :param verify: make sure the file exists before trying to read :param cfg_options: options to pass to the parser :return: Namespace of the config files """ return ConfigNamespace(**config_dict(config_file, auto_find, verify, **cfg_options))
c3293fa36e32d2ebea610a88a6e29ba47906ab7b
3,644,147
import pandas import numpy import tqdm import torch def extract_peaks(peaks, sequences, signals, controls=None, chroms=None, in_window=2114, out_window=1000, max_jitter=128, min_counts=None, max_counts=None, verbose=False): """Extract sequences and signals at coordinates from a peak file. This function will take in genome-wide sequences, signals, and optionally controls, and extract the values of each at the coordinates specified in the peak file and return them as tensors. Signals and controls are both lists with the length of the list, n_s and n_c respectively, being the middle dimension of the returned tensors. Specifically, the returned tensors of size (len(peaks), n_s/n_c, (out_window/in_wndow)+max_jitter*2). The values for sequences, signals, and controls, can either be filepaths or dictionaries of numpy arrays or a mix of the two. When a filepath is passed in it is loaded using pyfaidx or pyBigWig respectively. Parameters ---------- peaks: str or pandas.DataFrame Either the path to a bed file or a pandas DataFrame object containing three columns: the chromosome, the start, and the end, of each peak. sequences: str or dictionary Either the path to a fasta file to read from or a dictionary where the keys are the unique set of chromosoms and the values are one-hot encoded sequences as numpy arrays or memory maps. signals: list of strs or list of dictionaries A list of filepaths to bigwig files, where each filepath will be read using pyBigWig, or a list of dictionaries where the keys are the same set of unique chromosomes and the values are numpy arrays or memory maps. controls: list of strs or list of dictionaries or None, optional A list of filepaths to bigwig files, where each filepath will be read using pyBigWig, or a list of dictionaries where the keys are the same set of unique chromosomes and the values are numpy arrays or memory maps. If None, no control tensor is returned. Default is None. chroms: list or None, optional A set of chromosomes to extact peaks from. Peaks in other chromosomes in the peak file are ignored. If None, all peaks are used. Default is None. in_window: int, optional The input window size. Default is 2114. out_window: int, optional The output window size. Default is 1000. max_jitter: int, optional The maximum amount of jitter to add, in either direction, to the midpoints that are passed in. Default is 128. min_counts: float or None, optional The minimum number of counts, summed across the length of each example and across all tasks, needed to be kept. If None, no minimum. Default is None. max_counts: float or None, optional The maximum number of counts, summed across the length of each example and across all tasks, needed to be kept. If None, no maximum. Default is None. verbose: bool, optional Whether to display a progress bar while loading. Default is False. Returns ------- seqs: torch.tensor, shape=(n, 4, in_window+2*max_jitter) The extracted sequences in the same order as the peaks in the peak file after optional filtering by chromosome. signals: torch.tensor, shape=(n, len(signals), out_window+2*max_jitter) The extracted signals where the first dimension is in the same order as peaks in the peak file after optional filtering by chromosome and the second dimension is in the same order as the list of signal files. controls: torch.tensor, shape=(n, len(controls), out_window+2*max_jitter) The extracted controls where the first dimension is in the same order as peaks in the peak file after optional filtering by chromosome and the second dimension is in the same order as the list of control files. If no control files are given, this is not returned. """ seqs, signals_, controls_ = [], [], [] in_width, out_width = in_window // 2, out_window // 2 # Load the sequences if isinstance(sequences, str): sequences = pyfaidx.Fasta(sequences) # Load the peaks or rename the columns to be consistent names = ['chrom', 'start', 'end'] if isinstance(peaks, str): peaks = pandas.read_csv(peaks, sep="\t", usecols=(0, 1, 2), header=None, index_col=False, names=names) else: peaks = peaks.copy() peaks.columns = names if chroms is not None: peaks = peaks[numpy.isin(peaks['chrom'], chroms)] # Load the signal and optional control tracks if filenames are given for i, signal in enumerate(signals): if isinstance(signal, str): signals[i] = pyBigWig.open(signal, "r") if controls is not None: for i, control in enumerate(controls): if isinstance(control, str): controls[i] = pyBigWig.open(control, "r") desc = "Loading Peaks" d = not verbose for chrom, start, end in tqdm(peaks.values, disable=d, desc=desc): mid = start + (end - start) // 2 start = mid - out_width - max_jitter end = mid + out_width + max_jitter # Extract the signal from each of the signal files signals_.append([]) for signal in signals: if isinstance(signal, dict): signal_ = signal[chrom][start:end] else: signal_ = signal.values(chrom, start, end, numpy=True) signal_ = numpy.nan_to_num(signal_) signals_[-1].append(signal_) # For the sequences and controls extract a window the size of the input start = mid - in_width - max_jitter end = mid + in_width + max_jitter # Extract the controls from each of the control files if controls is not None: controls_.append([]) for control in controls: if isinstance(control, dict): control_ = control[chrom][start:end] else: control_ = control.values(chrom, start, end, numpy=True) control_ = numpy.nan_to_num(control_) controls_[-1].append(control_) # Extract the sequence if isinstance(sequences, dict): seq = sequences[chrom][start:end].T else: seq = one_hot_encode(sequences[chrom][start:end].seq.upper(), alphabet=['A', 'C', 'G', 'T', 'N']).T seqs.append(seq) seqs = torch.tensor(numpy.array(seqs), dtype=torch.float32) signals_ = torch.tensor(numpy.array(signals_), dtype=torch.float32) idxs = torch.ones(signals_.shape[0], dtype=torch.bool) if max_counts is not None: idxs = (idxs) & (signals_.sum(dim=(1, 2)) < max_counts) if min_counts is not None: idxs = (idxs) & (signals_.sum(dim=(1, 2)) > min_counts) if controls is not None: controls_ = torch.tensor(numpy.array(controls_), dtype=torch.float32) return seqs[idxs], signals_[idxs], controls_[idxs] return seqs[idxs], signals_[idxs]
f3a3696f2e31b7b91384df50dd0374c2e4e46443
3,644,148
def map_feature(value, f_type): """ Builds the Tensorflow feature for the given feature information """ if f_type == np.dtype('object'): return bytes_feature(value) elif f_type == np.dtype('int'): return int64_feature(value) elif f_type == np.dtype('float'): return float64_feature(value) elif f_type == np.dtype('bool'): return int64_feature(value.astype('int')) else: raise ValueError('Do not know how to store value {} with type {}' .format(value, f_type))
26416b27737542c8ac6100168775f47b271206a3
3,644,150
def is_text_area(input): """ Template tag to check if input is file :param input: Input field :return: True if is file, False if not """ return input.field.widget.__class__.__name__ == "Textarea"
4657a93809e123aaa27ee0a202b33e0383ac23cc
3,644,151
def print_album_list(album_list): """Print album list and return the album name choice. If return is all then all photos on page will be download.""" for i in range(len(album_list)): print("{}. {} ({} photo(s))".format( i + 1, album_list[i]['name'], album_list[i]['count'])) choice = raw_input("Please enter your choice (0 for all): ") return int(choice) - 1
2a3c4fde9fc56da179ea43c88f966735fc5c7beb
3,644,152
import struct def read_bool(data): """ Read 1 byte of data as `bool` type. Parameters ---------- data : io.BufferedReader File open to read in binary mode Returns ------- bool True or False """ s_type = "=%s" % get_type("bool") return struct.unpack(s_type, data.read(1))[0]
9302a3f4831143c44b0a67cfe0f146463e8ba27e
3,644,156
def sectorize(position): """ Returns a tuple representing the sector for the given `position`. Parameters ---------- position : tuple of len 3 Returns ------- sector : tuple of len 3 """ x, y, z = normalize(position) x, y, z = x // GameSettings.SECTOR_SIZE, y // GameSettings.SECTOR_SIZE, z // GameSettings.SECTOR_SIZE return (x, 0, z)
689fc3ee350e5493d037df290c5df05d50621b7e
3,644,157
import random def add_random_phase_shift(hkl, phases, fshifts=None): """ Introduce a random phase shift, at most one unit cell length along each axis. Parameters ---------- hkl : numpy.ndarray, shape (n_refls, 3) Miller indices phases : numpy.ndarray, shape (n_refls,) phase values in degrees, ordered as hkl fshifts : numpy.ndarray, shape (3,), optional fractional shifts along (a,b,c) to apply; if None, apply random shifts Returns ------- shifted_phases : numpy.ndarray, shape (n_refls,) phase values in degrees, ordered as hkl fshifts : numpy.ndarray, shape (3,) fractional shifts applied along (a,b,c) """ if fshifts is None: fshifts = np.array([random.random() for i in range(3)]) shifted_phases = wrap_phases(phases - 360 * np.dot(hkl, fshifts).ravel()) return shifted_phases, fshifts
7739d99b58bec80283a5e49fc2e6eaa6161286ae
3,644,158
import itertools import re def parse_cluster_file(filename): """ Parse the output of the CD-HIT clustering and return a dictionnary of clusters. In order to parse the list of cluster and sequences, we have to parse the CD-HIT output file. Following solution is adapted from a small wrapper script ([source code on Github](https://github.com/Y-Lammers/CD-HIT-Filter/blob/master/CD-HIT-Filter.py), author: Youri Lammers). """ # parse through the .clstr file and create a dictionary # with the sequences per cluster # open the cluster file and set the output dictionary cluster_file, cluster_dic = open(filename), {} # parse through the cluster file and store the cluster name + sequences in the dictionary # This is a generator comprehension which groups lines together based of wether the # line starts with a ">". cluster_groups = (x[1] for x in itertools.groupby(cluster_file, key=lambda line: line[0] == '>')) # Now we get alternate groups of cluster name and sequence list. for cluster in cluster_groups: # Note: next(cluster) retrieves the first line of the cluster i (>cluster name) name = next(cluster).strip() name = re.sub(' ', '_', name[1:]) # Note: next(cluster_groups) retrieves the next cluster i+1 containing the sequences # the cluster is itself an iterator (every line) seqs = [seq.split('>')[1].split('...') for seq in next(cluster_groups)] # Write a boolean value True if sequence is the reference sequence from the cluster seqs = [[seq[0], (True if seq[1] == ' *\n' else False)] for seq in seqs] cluster_dic[name] = seqs # return the cluster dictionary return cluster_dic
d50eaeb926be3a7b8d1139c82142e4a1b595c1a0
3,644,162
def app(par=None): """ Return the Miniweb object instance. :param par: Dictionary with configuration parameters. (optional parameter) :return: Miniweb object instance. """ return Miniweb.get_instance(par)
3d2b0d1a9fd87e9e5c26ea9a141e40fbe342b764
3,644,163
def openTopics(): """Opens topics file :return: list of topics """ topicsFile = 'topics' with open(topicsFile) as f: topics = f.read().split() return topics
e6d43ff6717122532a71355b71134d6f78f9db85
3,644,164
from django.forms.boundfield import BoundField from django.utils.inspect import func_supports_parameter, func_accepts_kwargs def fix_behaviour_widget_render_forced_renderer(utils): """ Restore the behaviour where the "renderer" parameter of Widget.render() may not be supported by subclasses. """ original_as_widget = BoundField.as_widget def as_widget(self, widget=None, attrs=None, only_initial=False): widget = widget or self.field.widget if not ( func_supports_parameter(widget.render, "renderer") or func_accepts_kwargs(widget.render) ): original_widget_render = widget.render utils.emit_warning( "Add the `renderer` argument to the render() method of %s. " "It will be mandatory in Django 2.1." % widget.__class__, RemovedInDjango21Warning, stacklevel=2, ) def instance_render(name, value, attrs=None, renderer=None): del renderer # restore non-mandatory support for this parameter return original_widget_render(name=name, value=value, attrs=attrs) utils.inject_callable( widget, "render", instance_render ) # beware, function stored in INSTANCE return original_as_widget( self, widget=widget, attrs=attrs, only_initial=only_initial ) utils.inject_callable(BoundField, "as_widget", as_widget)
7d55ecc18fae91af221b806448fa30203fdd9cd4
3,644,165
from typing import List def split_blocks(blocks:List[Block], ncells_per_block:int,direction:Direction=None): """Split blocks is used to divide an array of blocks based on number of cells per block. This code maintains the greatest common denominator of the parent block. Number of cells per block is simply an estimate of how many you want. The actual number will change to meet the greatest common denominator (GCD). GCD of 4 means multigrid of 3 e.g. grid/4 (coarse), 2 (fine), and 1 (finest). If a direction is not specified then for each block the longest index either i,j, or k is used. Wisdom from Dave Rigby: For example, for radial equilibrium we must integrate across the span. Some codes (GlennHT used to) would want a single block across the entire span. In that case you would want some additional control. Another example might be if you would like a block to include the entire boundary layer. In that case you might introduce an aspect ratio control. Args: blocks (List[Block]): List of blocks ncells_per_block (int): number of cells desired per block direction (Direction): direction to split the blocks in. Direction.(i,j,k). Defaults to None. None means it will pick the direction for you based on which is greater IMAX, JMAX, or KMAX Returns: Blocks (List[Block]): list of blocks split in the specified direction """ direction_to_use = direction # store the user input variable new_blocks = list() for block_indx in range(len(blocks)): block = blocks[block_indx] total_cells = block.IMAX*block.JMAX*block.KMAX if direction==None: indx = np.argmin(np.array([block.IMAX,block.JMAX,block.KMAX])) if indx == 0: direction_to_use=Direction.i elif indx == 1: direction_to_use=Direction.j elif indx == 2: direction_to_use=Direction.k if total_cells>ncells_per_block: # Use greatest common divsor to maintain multi-grid so say the entire block is divisible by 4 then we want to maintain than for all the splits! greatest_common_divisor =gcd(block.IMAX-1, gcd(block.JMAX-1, block.KMAX-1)) # Gets the maximum number of partitions that we can make for this given block if direction_to_use == Direction.i: # In order to get close to the number of cells per block, we need to control how many steps of the greatest_common_divisor to advance so for example if you have a multigrid mesh that has gcd of 16 (fine) => 8 (coarse) => 4 (coarser) => 2 (coarsest) and you want 400K cells per block then JMAX*KMAX*gcd*some_factor has to be close to 400K cells denominator = block.JMAX*block.KMAX step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward') if step_size==-1: step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward') if step_size==-1: assert('no valid step size found, do you have multi-block? gcd > 1') # step_size-1 is the IMAX of the sub_blocks e.g. 0 to 92 this shows IMAX=93, (93-1) % 4 = 0 (good) iprev = 0 for i in range(step_size,block.IMAX,step_size): if (i+1) > block.IMAX: break X = block.X[iprev:i+1,:,:] # New X, Y, Z splits Y = block.Y[iprev:i+1,:,:] # This indexes to iprev:i so if iprev=2 and i = 10 it will go from 2 to 9 Z = block.Z[iprev:i+1,:,:] iprev=i # Blocks have to share the same face, Pick the previous face new_blocks.append(Block(X,Y,Z)) # Check for remainder if i+1 < block.IMAX: # Add remainder to last block X = block.X[i:,:,:] # New X, Y, Z splits Y = block.Y[i:,:,:] Z = block.Z[i:,:,:] new_blocks.append(Block(X,Y,Z)) elif direction_to_use == Direction.j: denominator = block.IMAX*block.KMAX step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward') if step_size==-1: step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward') if step_size==-1: assert('no valid step size found, do you have multi-block? gcd > 1') jprev = 0 for j in range(step_size,block.JMAX,step_size): if (j+1) > block.IMAX: break X = block.X[:,jprev:j,:] # New X, Y, Z splits Y = block.Y[:,jprev:j,:] Z = block.Z[:,jprev:j,:] jprev=j new_blocks.append(Block(X,Y,Z)) # Check for remainder if j+1 < block.JMAX: # Add remainder to last block X = block.X[:,j:,:] # New X, Y, Z splits Y = block.Y[:,j:,:] Z = block.Z[:,j:,:] new_blocks.append(Block(X,Y,Z)) else: denominator = block.IMAX*block.JMAX step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward') if step_size==-1: step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward') if step_size==-1: assert('no valid step size found, do you have multi-block? gcd > 1') kprev = 0 for k in range(step_size,block.KMAX,step_size): if (k+1) > block.KMAX: break X = block.X[:,:,kprev:k+1] # New X, Y, Z splits Y = block.Y[:,:,kprev:k+1] Z = block.Z[:,:,kprev:k+1] kprev=k new_blocks.append(Block(X,Y,Z)) # Check for remainder if k+1 < block.KMAX: # Add remainder to last block X = block.X[:,:,k:] # New X, Y, Z splits Y = block.Y[:,:,k:] Z = block.Z[:,:,k:] new_blocks.append(Block(X,Y,Z)) # replace it return new_blocks
e7ebf6189b3f140b006d74846c4979058023784a
3,644,166
def get_entry_details(db_path, entry_id): """Get all information about an entry in database. Args: db_path: path to database file entry_id: string Return: out: dictionary """ s = connect_database(db_path) # find entry try: sim = s.query(Main).filter(Main.entry_id == entry_id).one() except NoResultFound: print("No entry found with entry_id {} in {}.".format(entry_id, db_path)) return {} # details from main table out = sim.__dict__ # groups out["groups"] = [g.name for g in sim.groups] # tags out["tags"] = [t.name for t in sim.keywords if t.value == None] # keywords out["keywords"] = {k.name: k.value for k in sim.keywords if k.value != None} # meta data meta = {} for meta_group in sim.meta.all(): meta[meta_group.name] = {m.name: m.value for m in meta_group.entries} out["meta"] = meta s.close() # clean up output try: del out["_sa_instance_state"] except: pass return out
7a4023fa32a0e41cf3440bcd8fd2140ce88b8c33
3,644,167
import bisect def pose_interp(poses, timestamps_in, timestamps_out, r_interp='slerp'): """ :param poses: N x 7, (t,q) :param timestamps: (N,) :param t: (K,) :return: (K,) """ # assert t_interp in ['linear', 'spline'] assert r_interp in ['slerp', 'squad'] assert len(poses)>1 assert len(poses) == len(timestamps_in) input_ts = poses[:,:3] input_rs= poses[:,3:] #quaternions timestamps_in = np.array(timestamps_in) #sort the inputs inds = np.argsort(timestamps_in) poses = poses[inds] timestamps_in = timestamps_in[inds] if r_interp == 'squad': input_rs_ = quaternion.from_float_array(input_rs) output_rs = quaternion.squad( input_rs, timestamps_in, timestamps_out) output_rs = quaternion.as_float_array(output_rs) elif r_interp == 'slerp': output_rs = [] for t in timestamps_out: input_rs_ = quaternion.from_float_array(input_rs) idx = bisect.bisect_left(timestamps_in) output_r = quaternion.slerp(input_rs_[idx],input_rs_[idx+1], timestamps_in[idx], timestamps_in[idx+1],t ) output_r = quaternion.as_float_array(output_r) output_rs.append(output_r) output_ts = [] for t in timestamps_out: idx = bisect_left.bisect_left(timestamps_in) if idx>=len(timestamps_in)-1: idx -= 1 t1 = timestamps_in[idx] t2 = timestamps_in[idx+1] output_t = ((t-t1)*input_ts[idx+1] + (t2-t) *input_ts[idx]) / (t2-t1) output_ts.append(output_t) output_ts =np.concatenate(output_ts, axis=0 ) output_rs =np.concatenate(output_rs, axis=0 ) new_pose = np.concatenate([output_ts, output_rs], axis=1) return new_pose
cc8e49b6bab918c6887e37973d09469fcddc298d
3,644,168
from datetime import datetime def checklist_saved_action(report_id): """ View saved report """ report = Report.query.filter_by(id=report_id).first() return render_template( 'checklist_saved.html', uid=str(report.id), save_date=datetime.now(), report=report, title='Отчет | %s' % TITLE )
302bc174ffe0ed7d3180b2a59c5212b3a38e7eaf
3,644,169
def trilinear_memory_efficient(a, b, d, use_activation=False): """W1a + W2b + aW3b.""" n = tf.shape(a)[0] len_a = tf.shape(a)[1] len_b = tf.shape(b)[1] w1 = tf.get_variable('w1', shape=[d, 1], dtype=tf.float32) w2 = tf.get_variable('w2', shape=[d, 1], dtype=tf.float32) w3 = tf.get_variable('w3', shape=[1, 1, d], dtype=tf.float32) a_reshape = tf.reshape(a, [-1, d]) # [bs*len_a, d] b_reshape = tf.reshape(b, [-1, d]) # [bs*len_b, d] part_1 = tf.reshape(tf.matmul(a_reshape, w1), [n, len_a]) # [bs, len_a] part_1 = tf.tile(tf.expand_dims(part_1, 2), [1, 1, len_b]) # [bs, len_a, len_b] part_2 = tf.reshape(tf.matmul(b_reshape, w2), [n, len_b]) # [bs, len_b] part_2 = tf.tile(tf.expand_dims(part_2, 1), [1, len_a, 1]) # [bs, len_a, len_b] a_w3 = a * w3 # [bs, len_a, d] part_3 = tf.matmul(a_w3, tf.transpose(b, perm=[0, 2, 1])) # [bs,len_a,len_b] ## return the unnormalized logits matrix : [bs,len_a,len_b] if use_activation: return tf.nn.relu(part_1 + part_2 + part_3) return part_1 + part_2 + part_3
d6ed8cc216019987674b86ef36377a6af45a6702
3,644,170
def private_questions_get_unique_code(assignment_id: str): """ Get all questions for the given assignment. :param assignment_id: :return: """ # Try to find assignment assignment: Assignment = Assignment.query.filter( Assignment.id == assignment_id ).first() # Verify that the assignment exists req_assert(assignment is not None, message='assignment does not exist') # Assert that the assignment is within the course context assert_course_context(assignment) assigned_question_count = AssignedStudentQuestion.query.filter( AssignedStudentQuestion.assignment_id == assignment.id ).count() return success_response({ 'assignment_name': assignment.name, 'questions': get_all_questions(assignment), 'questions_assigned': assigned_question_count > 0, 'assigned_question_count': assigned_question_count, })
1c94404168ac659e9ee3c45b3ecf7c2c398d1cca
3,644,171
def make_ngram(tokenised_corpus, n_gram=2, threshold=10): """Extract bigrams from tokenised corpus Args: tokenised_corpus (list): List of tokenised corpus n_gram (int): maximum length of n-grams. Defaults to 2 (bigrams) threshold (int): min number of n-gram occurrences before inclusion Returns: ngrammed_corpus (list) """ tokenised = tokenised_corpus.copy() t = 1 # Loops while the ngram length less / equal than our target while t < n_gram: phrases = models.Phrases(tokenised, threshold=threshold) bigram = models.phrases.Phraser(phrases) tokenised = bigram[tokenised] t += 1 return list(tokenised)
8897456e9da4cd3c0f1c3f055b43e7d27c7261d8
3,644,172
def bw_estimate(samples): """Computes Abraham's bandwidth heuristic.""" sigma = np.std(samples) cand = ((4 * sigma**5.0) / (3.0 * len(samples)))**(1.0 / 5.0) if cand < 1e-7: return 1.0 return cand
44629f9e774d07f7c55a5a77dcb7b06ae38a964b
3,644,173
def process_coins(): """calculate the amount of money paid based on the coins entered""" number_of_quarters = int(input("How many quarters? ")) number_of_dimes = int(input("How many dimes? ")) number_of_nickels = int(input("How many nickels? ")) number_of_pennies = int(input("How many pennies? ")) quarters = number_of_quarters * 0.25 dimes = number_of_dimes * 0.10 nickels = number_of_nickels * 0.05 pennies = number_of_pennies * 0.01 total_inserted = quarters + dimes + nickels + pennies return total_inserted
6a26ad161720554079a76f6bdadbbf9555d6b82d
3,644,174
def getLastSegyTraceHeader(SH,THN='cdp',data='none', bheadSize = 3600, endian='>'): # added by A Squelch """ getLastSegyTraceHeader(SH,TraceHeaderName) """ bps=getBytePerSample(SH) if (data=='none'): data = open(SH["filename"]).read() # SET PARAMETERS THAT DEFINE THE LOCATION OF THE LAST HEADER # AND THE TRACE NUMBER KEY FIELD THpos=STH_def[THN]["pos"] THformat=STH_def[THN]["type"] ntraces=SH["ntraces"] pos=THpos+bheadSize+(SH["ns"]*bps+240)*(ntraces-1); txt="getLastSegyTraceHeader : Reading last trace header " + THN + " " + str(pos) printverbose(txt,20); thv,index = getValue(data,pos,THformat,endian,1) txt="getLastSegyTraceHeader : " + THN + "=" + str(thv) printverbose(txt,30); return thv
19de6339bcc3ec63b0e33007f51fa50ddb619449
3,644,175
def get_data_url(data_type): """Gets the latest url from the kff's github data repo for the given data type data_type: string value representing which url to get from the github api; must be either 'pct_total' or 'pct_share' """ data_types_to_strings = { 'pct_total': 'Percent of Total Population that has Received a COVID-19 Vaccine by RaceEthnicity', 'pct_share': 'COVID19 Vaccinations by RE', 'pct_population': 'Distribution of Vaccinations, Cases, Deaths', } df = gcs_to_bq_util.load_json_as_df_from_web_based_on_key(BASE_GITHUB_API_URL, "tree") df = df.loc[df['path'].str.contains(data_types_to_strings[data_type])] urls = df.loc[df['path'] == df['path'].max()].url if len(urls) != 1: raise ValueError("Found %d urls, should have only found 1" % len(urls)) return urls.values[0]
f92520243ee7f952ff69c7c62c315225982a24fe
3,644,176
def kl(p, q): """Kullback-Leibler divergence D(P || Q) for discrete distributions Parameters ---------- p, q : array-like, dtype=float, shape=n Discrete probability distributions. """ p = np.asarray(p, dtype=np.float) q = np.asarray(q, dtype=np.float) return np.sum(np.where(p != 0, p * np.log(p / q), 0))
06b6283ea83a729f9c374dabbe1c1a94a8ed8480
3,644,177
import torch def get_loaders(opt): """ Make dataloaders for train and validation sets """ # train loader opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset) # opt.std = get_std() if opt.no_mean_norm and not opt.std_norm: norm_method = transforms.Normalize([0, 0, 0], [1, 1, 1]) elif not opt.std_norm: norm_method = transforms.Normalize(opt.mean, [1, 1, 1]) else: norm_method = transforms.Normalize(opt.mean, opt.std) spatial_transform = transforms.Compose([ # crop_method, transforms.Scale((opt.sample_size, opt.sample_size)), #grayscale # transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), norm_method ]) temporal_transform = None #TemporalRandomCrop(16) target_transform = ClassLabel() training_data = get_training_set(opt, spatial_transform, temporal_transform, target_transform) train_loader = torch.utils.data.DataLoader( training_data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers, pin_memory=True) # validation loader validation_data = get_validation_set(opt, spatial_transform, temporal_transform, target_transform) val_loader = torch.utils.data.DataLoader( validation_data, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_workers, pin_memory=True) return train_loader, val_loader
d7a166a477c535a60846e05598dd19bbe84062be
3,644,178
def trapezoidal(f, a, b, n): """Trapezoidal integration via iteration.""" h = (b-a)/float(n) I = f(a) + f(b) for k in xrange(1, n, 1): x = a + k*h I += 2*f(x) I *= h/2 return I
f2887a3b0d1732f322dca52d0d869c1063e08c22
3,644,179
def writetree(tree, sent, key, fmt, comment=None, morphology=None, sentid=False): """Convert a tree to a string representation in the given treebank format. :param tree: should have indices as terminals :param sent: contains the words corresponding to the indices in ``tree`` :param key: an identifier for this tree; part of the output with some formats or when ``sentid`` is True. :param fmt: Formats are ``bracket``, ``discbracket``, Negra's ``export`` format, and ``alpino`` XML format, as well unlabeled dependency conversion into ``mst`` or ``conll`` format (requires head rules). The formats ``tokens`` and ``wordpos`` are to strip away tree structure and leave only lines with space-separated tokens or ``token/POS``. When using ``bracket``, make sure tree is canonicalized. :param comment: optionally, a string that will go in the format's comment field (supported by ``export`` and ``alpino``), or at the end of the line preceded by a tab (``discbracket``); ignored by other formats. Should be a single line. :param sentid: for line-based formats, prefix output by ``key|``. Lemmas, functions, and morphology information will be empty unless nodes contain a 'source' attribute with such information.""" if fmt == 'bracket': result = writebrackettree(tree, sent) # if comment: # result = '# %s\n%s\n' % (comment, result.rstrip('\n')) elif fmt == 'discbracket': result = writediscbrackettree(tree, sent) if comment: result = '%s\t%s\n' % (result.rstrip('\n'), comment) elif fmt == 'tokens': result = '%s\n' % ' '.join(sent) elif fmt == 'wordpos': result = '%s\n' % ' '.join('%s/%s' % (word, pos) for word, (_, pos) in zip(sent, sorted(tree.pos()))) elif fmt == 'export': result = writeexporttree(tree, sent, key, comment, morphology) elif fmt == 'alpino': result = writealpinotree(tree, sent, key, comment) elif fmt in ('conll', 'mst'): result = writedependencies(tree, sent, fmt) else: raise ValueError('unrecognized format: %r' % fmt) if sentid and fmt in ('tokens', 'wordpos', 'bracket', 'discbracket'): return '%s|%s' % (key, result) return result
cf8181596a4882ae18a8adcd0411e1c4e2ee8a33
3,644,180
import struct def xor_string(hash1, hash2, hash_size): """Encrypt/Decrypt function used for password encryption in authentication, using a simple XOR. Args: hash1 (str): The first hash. hash2 (str): The second hash. Returns: str: A string with the xor applied. """ xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)] return struct.pack("{0}B".format(hash_size), *xored)
4efc263a0ff9fb05b0ee7cb7b7b3fdd4c8c0c2ec
3,644,181
def create_secret_key(string): """ :param string: A string that will be returned as a md5 hash/hexdigest. :return: the hexdigest (hash) of the string. """ h = md5() h.update(string.encode('utf-8')) return h.hexdigest()
eb31e149684074b18fdbc1989ecfc14f21756dea
3,644,182
import base64 def decode_password(base64_string: str) -> str: """ Decode a base64 encoded string. Args: base64_string: str The base64 encoded string. Returns: str The decoded string. """ base64_bytes = base64_string.encode("ascii") sample_string_bytes = base64.b64decode(base64_bytes) return sample_string_bytes.decode("ascii")
0f04617c239fbc740a9b4c9c2d1ae867a52e0c74
3,644,183
def _generate_overpass_api(endpoint=None): """ Create and initialise the Overpass API object. Passing the endpoint argument will override the default endpoint URL. """ # Create API object with default settings api = overpass.API() # Change endpoint if desired if endpoint is not None: api.endpoint = endpoint return api
9b8016035e87428286f68622e9a6129bcf818c4a
3,644,184
def to_pascal_case(value): """ Converts the value string to PascalCase. :param value: The value that needs to be converted. :type value: str :return: The value in PascalCase. :rtype: str """ return "".join(character for character in value.title() if not character.isspace())
138ab9ddf7ca814b50bf8ff0618de03b236535c7
3,644,185
from typing import Iterable from typing import Any from typing import List def drop(n: int, it: Iterable[Any]) -> List[Any]: """ Return a list of N elements drop from the iterable object Args: n: Number to drop from the top it: Iterable object Examples: >>> fpsm.drop(3, [1, 2, 3, 4, 5]) [4, 5] """ return list(it)[n:]
0732bd560f0da0a43f65ee3b5ed46fd3a05e26f5
3,644,186
def generate_classification_style_dataset(classification='multiclass'): """ Dummy data to test models """ x_data = np.array([ [1,1,1,0,0,0], [1,0,1,0,0,0], [1,1,1,0,0,0], [0,0,1,1,1,0], [0,0,1,1,0,0], [0,0,1,1,1,0]]) if classification=='multiclass': y_data = np.array([ [1, 0, 0], [1, 0, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0], [0, 1, 0]]) elif classification=='binary': y_data = np.array([ [1], [1], [1], [-1], [-1], [-1]]) else: raise Exception("Only binary or multiclass classification supported") print("Returning classification style dataset") return x_data, y_data
77a65bb3445216a9a21aa30a7c7201983328efce
3,644,187
def getSupportedDatatypes(): """ Gets the datatypes that are supported by the framework Returns: a list of strings of supported datatypes """ return router.getSupportedDatatypes()
635612975c271bdbe22b622787a2d7f823277baa
3,644,189
def run_stacking(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): """Run stacking. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. """ if alphas is None: alphas = np.logspace(-3, 5, 100) if train_sizes is None: train_sizes = np.linspace(.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] # extract data and estimator names for name, data in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple(d.shape[1] for d in combined_data) estimators = [] subjects = data.index.values # prepare first-level estimators for stacking for i_data, _ in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 # prepare input information for ColumnTransformer for i_ct, (name, col_len) in enumerate(zip(names, feature_col_lens)): trans_name = ('pass_' if i_data == i_ct else 'drop_') + name transformer = 'passthrough' if i_data == i_ct else 'drop' ft_end = ft_end + col_len trans_slice = slice(ft_begin, ft_end) ft_begin = ft_begin + col_len feature_transformers.append((trans_name, transformer, trans_slice)) est_name = 'reg_' + named_data[i_data][0] est_pipeline = make_pipeline( ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) train_sizes, train_scores, test_scores = \ learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return df_pred, mae, r2, train_sizes, train_scores, test_scores
75b97509097652fdccc444cfd3731ce68b49e992
3,644,190
def add_random_shadow(img, w_low=0.6, w_high=0.85): """ Overlays supplied image with a random shadow poligon The weight range (i.e. darkness) of the shadow can be configured via the interval [w_low, w_high) """ cols, rows = (img.shape[0], img.shape[1]) top_y = np.random.random_sample() * rows bottom_y = np.random.random_sample() * rows bottom_y_right = bottom_y + np.random.random_sample() * (rows - bottom_y) top_y_right = top_y + np.random.random_sample() * (rows - top_y) if np.random.random_sample() <= 0.5: bottom_y_right = bottom_y - np.random.random_sample() * (bottom_y) top_y_right = top_y - np.random.random_sample() * (top_y) poly = np.asarray([[[top_y, 0], [bottom_y, cols], [bottom_y_right, cols], [top_y_right, 0]]], dtype=np.int32) mask_weight = np.random.uniform(w_low, w_high) origin_weight = 1 - mask_weight mask = np.copy(img).astype(np.int32) cv2.fillPoly(mask, poly, (0, 0, 0)) # masked_image = cv2.bitwise_and(img, mask) return cv2.addWeighted(img.astype(np.int32), origin_weight, mask, mask_weight, 0).astype(np.uint8)
3b520312941ffc4b125ce0a777aeb76fecd6b263
3,644,191
def csv_args(value): """Parse a CSV string into a Python list of strings. Used in command line parsing.""" return map(str, value.split(","))
b2596180054f835bfe70e3f900caa5b56a7856a6
3,644,192
def get_tokens(): """ Returns a tuple of tokens in the format {{site/property}} that will be used to build the dictionary passed into execute """ return (HAWQMASTER_PORT, HAWQSTANDBY_ADDRESS)
4664feb568a3a5599b9da64594d09a034e9aaebb
3,644,193
def projl1_epigraph(center): """ Project center=proxq.true_center onto the l1 epigraph. The bound term is center[0], the coef term is center[1:] The l1 epigraph is the collection of points $(u,v): \|v\|_1 \leq u$ np.fabs(coef).sum() <= bound. """ norm = center[0] coef = center[1:] sorted_coefs = np.sort(np.fabs(coef)) n = sorted_coefs.shape[0] csum = sorted_coefs.sum() for i, c in enumerate(sorted_coefs): csum -= c if csum - (n - i - 1) * c <= norm + c: # this will terminate as long as norm >= 0 # when it terminates, we know that the solution is between # sorted_coefs[i-1] and sorted_coefs[i] # we set the cumulative sum back to the value at i-1 csum += c idx = i-1 break if i == n-1: # if it hasn't terminated early, then even soft-thresholding at the largest value was insufficent, answer is 0 return np.zeros_like(center) # the solution is such that csum - (n-idx-1)*x = norm+x thold = (csum - norm) / (n-idx) result = np.zeros_like(center) result[0] = norm + thold result[1:] = st(coef, thold) return result
d7b8c70f45853eef61322fdb9583c8279780982f
3,644,194
import requests from datetime import datetime def crypto_command(text): """ <ticker> -- Returns current value of a cryptocurrency """ try: encoded = quote_plus(text) request = requests.get(API_URL.format(encoded)) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get value: {}".format(e) data = request.json() if "error" in data: return "{}.".format(data['error']) updated_time = datetime.fromtimestamp(data['timestamp']) if (datetime.today() - updated_time).days > 2: # the API retains data for old ticker names that are no longer updated # in these cases we just return a "not found" message return "Currency not found." change = float(data['change']) if change > 0: change_str = "\x033{}%\x0f".format(change) elif change < 0: change_str = "\x035{}%\x0f".format(change) else: change_str = "{}%".format(change) return "{} // \x0307${:,.2f}\x0f USD - {:,.7f} BTC // {} change".format(data['symbol'].upper(), float(data['price']['usd']), float(data['price']['btc']), change_str)
0b0757a8b657791204d74b8536be3b6cb5af2ff5
3,644,195
import torch def byol_loss_multi_views_func(p: torch.Tensor, z: torch.Tensor,p1: torch.Tensor, z1: torch.Tensor, simplified: bool = True) -> torch.Tensor: """Computes BYOL's loss given batch of predicted features p and projected momentum features z. Args: p, p1 (torch.Tensor): NxD Tensor containing predicted features from view 1 z, z1 (torch.Tensor): NxD Tensor containing projected momentum features from view 2 simplified (bool): faster computation, but with same result. Defaults to True. Returns: torch.Tensor: BYOL's loss. """ if simplified: loss = F.cosine_similarity(p, z.detach(), dim=-1).mean() + F.cosine_similarity(p1, z1.detach(), dim=-1).mean() return 2 - 2 * loss p = F.normalize(p, dim=-1) z = F.normalize(z, dim=-1) p1 = F.normalize(p1, dim=-1) z1 = F.normalize(z1, dim=-1) return 2 - 2 * ((p * z.detach()).sum(dim=1).mean() +(p1 * z1.detach()).sum(dim=1).mean())
705cbe9e62fa1e58da0a1f4087e6090d7b8002b8
3,644,196
def a_test_model(n_classes=2): """ recover model and test data from disk, and test the model """ images_test, labels_test, data_num_test = load_test_data_full() model = load_model(BASE_PATH + 'models/Inception_hemorrhage_model.hdf5') adam_optimizer = keras.optimizers.Adam( lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(optimizer=adam_optimizer, loss='binary_crossentropy', metrics=['accuracy']) # score the test data test_data_generator = generate_testing_from_hdf5(TEST_INDICES, batch_size=BATCH_SIZE) scores = model.evaluate_generator(test_data_generator, steps=N_STEPS_PER_EPOCH_TEST) # refresh the data generator and generate predictions test_data_generator = generate_testing_from_hdf5(TEST_INDICES, batch_size=batch_size) predictions = model.predict_generator(test_data_generator, steps=N_STEPS_PER_EPOCH_TEST) classes = np.argmax(predictions, axis=1) pred_ground_truth = np.column_stack((predictions, classes, labels_test)) pred_ground_truth = pd.DataFrame( pred_ground_truth, columns=[ 'Proba Neg', 'Proba Pos', 'Class Proba', 'Neg Label', 'Pos Label']) # Compute ROC curve and ROC area for each class fpr, tpr, thresholds = roc_curve( pred_ground_truth['Class Proba'], pred_ground_truth['Pos Label']) roc_auc = auc(fpr, tpr) accuracy, precision, recall, f1_score, cm = vol_inception_utils.calc_metrics( pred_ground_truth['Pos Label'], pred_ground_truth['Class Proba']) np.savetxt(BASE_PATH + 'results/confusion_matrix.csv', (cm), delimiter=',') return pred_ground_truth, accuracy, precision, recall, f1_score, cm, fpr, tpr, thresholds, roc_auc
d060f79a149d7659d74ffac316f71d7ef7b63368
3,644,197
def generate_synchronous_trajectory(initial_state): """ Simulate the network starting from a given initial state in the synchronous strategy :param initial_state: initial state of the network :return: a trajectory in matrix from, where each row denotes a state """ trajectory = [initial_state] state_index_set = {state_to_index(initial_state)} # if a state reoccurs, an attractor or fixed point is # reached, stop. s = initial_state while True: new_s = update(s) # synchronous new_s_index = state_to_index(new_s) if new_s_index in state_index_set: break trajectory.append(new_s) state_index_set.add(new_s_index) s = new_s return np.array(trajectory)
85f452f7665028e29085296820f67cf2e5cdb8bf
3,644,198
import inspect from textwrap import dedent import ast def arg_names(level=2): """Try to determine names of the variables given as arguments to the caller of the caller. This works only for trivial function invocations. Otherwise either results may be corrupted or exception will be raised. level: 0 is current frame, 1 is the caller, 2 is caller of the caller """ try: caller_frame_info = inspect.stack()[level] caller_context = caller_frame_info.code_context code = dedent(''.join(caller_context)) tree = ast.parse(code, '', 'eval') always_assert(isinstance(tree.body, ast.Call)) args = tree.body.args names = [astunparse.unparse(arg).strip() for arg in args] return names except Exception as ex: raise Exception('Cannot determine arg names') from None
ce5b26747404442bfd017827435e9515c60aace0
3,644,199
import jinja2 def render_template(path, ctx): """Render a Jinja2 template""" with path.open() as f: content = f.read() tmpl = jinja2.Template(content) return html_minify(tmpl.render(**ctx))
0eb4b2a73a645283998260cdadbab37da32d6784
3,644,200
def reverse( sequence ): """Return the reverse of any sequence """ return sequence[::-1]
f08ae428844347e52d8dbf1cd8ad07cfbf4ef597
3,644,202
def createOutputBuffer(file, encoding): """Create a libxml2 output buffer from a Python file """ ret = libxml2mod.xmlCreateOutputBuffer(file, encoding) if ret is None:raise treeError('xmlCreateOutputBuffer() failed') return outputBuffer(_obj=ret)
28ece9b710362d710ff6df25f426d91a0b318ebf
3,644,203
def wait_for_proof(node, proofid_hex, timeout=60, expect_orphan=None): """ Wait for the proof to be known by the node. If expect_orphan is set, the proof should match the orphan state, otherwise it's a don't care parameter. """ def proof_found(): try: wait_for_proof.is_orphan = node.getrawavalancheproof(proofid_hex)[ "orphan"] return True except JSONRPCException: return False wait_until_helper(proof_found, timeout=timeout) if expect_orphan is not None: assert_equal(expect_orphan, wait_for_proof.is_orphan)
f8f390424fe084bf8bf62bf1d16ac780d5c5df69
3,644,205
def check(verbose=1): """ Runs a couple of functions to check the module is working. :param verbose: 0 to hide the standout output :return: list of dictionaries, result of each test """ return []
4ecf144fc64a165b5b0f9766b76eb6b703eba130
3,644,206
def cylinder_sideways(): """ sideways cylinder for poster """ call_separator('cylinder sidweays') T1 = .1 #gs = gridspec.GridSpec(nrows=2,ncols=3,wspace=-.1,hspace=.5) fig = plt.figure(figsize=(5,4)) ax11 = fig.add_subplot(111,projection='3d') #ax12 = fig.add_subplot(gs[0,2]) #ax22 = fig.add_subplot(gs[1,2]) a = lubrication(phi1=.57,Rp=0.96,Rc=1.22,base_radius=1.22, pi3=1,pi4=4.7,pi5=0.1,pi6=10, mu=1.2,T=T1,constriction='piecewise',U0=0.2, dt=0.02,eps=1, F0=50,method='euler') a.Z0 = -5/a.Rp z = np.linspace(-7,7,100) # dimensional r = a.pi1(z) th = np.linspace(0,2*np.pi,100) radius_al = 0.25 # draw arrow going into spine ar1 = Arrow3D([-5,-1.5],[0,0],[0,0], mutation_scale=10, lw=2, arrowstyle="-|>", color="k") ax11.add_artist(ar1) # A # draw spine Z,TH = np.meshgrid(z,th) #Z,TH = np.mgrid[-7:7:.1, 0:2*np.pi:.1] X = np.zeros_like(Z) Y = np.zeros_like(Z) #print(np.shape(Z)) for i in range(len(Z[:,0])): X[i,:] = a.pi1(Z[i,:])*np.cos(TH[i,:]) Y[i,:] = a.pi1(Z[i,:])*np.sin(TH[i,:]) ax11.plot_surface(Z,Y,X,alpha=.25) shifts = np.array([-6,0,-4]) names = ['z','y','x'] size = 2 for i in range(3): coords = np.zeros((3,2)) coords[:,0] += shifts coords[:,1] += shifts coords[i][1] += size arx = Arrow3D(*list(coords), mutation_scale=5, lw=2, arrowstyle="-|>", color="k") ax11.text(*list(coords[:,1]),names[i],horizontalalignment='center') ax11.add_artist(arx) # draw sphere for cap b = a.base_radius r = np.sqrt(b**2+7**2) th2 = np.linspace(0,np.arctan(b/7),100) phi = np.linspace(0,2*np.pi,100) TH2,PHI = np.meshgrid(th2,phi) X = r*np.sin(TH2)*np.cos(PHI) Y = r*np.sin(TH2)*np.sin(PHI) Z = r*np.cos(TH2) ax11.plot_surface(Z,Y,X,color='tab:blue',alpha=.5) # draw sphere vesicle u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j] X = np.cos(u)*np.sin(v) Y = np.sin(u)*np.sin(v) Z = np.cos(v) ax11.plot_surface(Z,Y,X,color='gray',alpha=.5) # label spine head and base ax11.text(7,0,-2,r'\setlength{\parindent}{0pt}Spine Head\\(Closed End)') ax11.text(-4,0,3,r'\setlength{\parindent}{0pt}Spine Base\\(Open End)') # set equal aspect ratios #ax11.set_aspect('auto') # only auto allowed?? ax11.set_box_aspect((np.ptp(X), np.ptp(Y), np.ptp(Z))) ax11.set_axis_off() lo = -4.4 hi = 4.4 dx = -.5 ax11.set_xlim(lo-dx,hi+dx) ax11.set_ylim(lo-dx,hi+dx) ax11.set_zlim(lo,hi) ax11.view_init(20,65) return fig
98c0ed70c11ffe619d28623a5c5f4c4e2be40889
3,644,207
def get_generic_or_msg(intent, result): """ The master method. This method takes in the intent and the result dict structure and calls the proper interface method. """ return Msg_Fn_Dict[intent](result)
00853e2e74892a6d01ba1c6986e72f6436c88a92
3,644,208
def s3_example_tile(gtiff_s3): """Example tile for fixture.""" return (5, 15, 32)
a4b7e35fc6f7bf51a551ac8cb18003c23ff35a01
3,644,209
def execute_list_of_commands(command_list): """ INPUT: - ``command_list`` -- a list of strings or pairs OUTPUT: For each entry in command_list, we attempt to run the command. If it is a string, we call ``os.system()``. If it is a pair [f, v], we call f(v). If the environment variable :envvar:`SAGE_NUM_THREADS` is set, use that many threads. """ t = time.time() # Determine the number of threads from the environment variable # SAGE_NUM_THREADS, which is set automatically by sage-env try: nthreads = int(os.environ['SAGE_NUM_THREADS']) except KeyError: nthreads = 1 # normalize the command_list to handle strings correctly command_list = [ [run_command, x] if isinstance(x, str) else x for x in command_list ] # No need for more threads than there are commands, but at least one nthreads = min(len(command_list), nthreads) nthreads = max(1, nthreads) def plural(n,noun): if n == 1: return "1 %s"%noun return "%i %ss"%(n,noun) print "Executing %s (using %s)"%(plural(len(command_list),"command"), plural(nthreads,"thread")) execute_list_of_commands_in_parallel(command_list, nthreads) print "Time to execute %s: %s seconds"%(plural(len(command_list),"command"), time.time() - t)
79247f8dc15cc790b6f1811e3cb79de47c514bc4
3,644,210
import requests def get_transceiver_diagnostics(baseurl, cookie_header, transceiver): """ Get the diagnostics of a given transceivers in the switch :param baseurl: imported baseurl variable :param cookie_header: Parse cookie resulting from successful loginOS.login_os(baseurl) :param transceiver: data parsed to specify a transceiver in switch :return return transceiver's diagnostics information in json format """ url = baseurl + 'transceivers/' + transceiver + '/diagnostics' headers = {'cookie': cookie_header} response = requests.get(url, verify=False, headers=headers) if response.status_code == 200: return response.json()
c2863b54b03ae3bdcf779fbd18a50e2bcdb2edd7
3,644,211
def mask_valid_boxes(boxes, return_mask=False): """ :param boxes: (cx, cy, w, h,*_) :return: mask """ w = boxes[:,2] h = boxes[:,3] ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) mask = (w > 2) & (h > 2) & (ar < 30) if return_mask: return mask else: return boxes[mask]
3a3c00f934dabce78ee8a28f0ece2105d79f9f3f
3,644,213
import tokenize def import_buffer_to_hst(buf): """Import content from buf and return an Hy AST.""" return tokenize(buf + "\n")
4571bac8987911bf9b9a277590be6204be6120ab
3,644,214
def preprocess_input(x, **kwargs): """Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Preprocessed array. """ return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
ca81dff57f51184042899849dff6623d32e475c0
3,644,217
def build_gauss_kernel(sigma_x, sigma_y, angle): """ Build the rotated anisotropic gaussian filter kernel Parameters ---------- sigma_x : numpy.float64 sigma in x-direction sigma_y: numpy.float64 sigma in y-direction angle: int angle in degrees of the needle holder measuered with respect to 'vertical' transducer axis Returns ------- kernel: numpy.ndarray roteted filter kernel """ angle = np.pi/2-np.deg2rad(angle) # Calculate gaussian kernel kernel = ascon.Gaussian2DKernel(sigma_x, sigma_y, 0) # Extract size and kernel values x_size = kernel.shape[0]; y_size = kernel.shape[1] kernel = kernel.array # Rotate kernel = ndimage.rotate(kernel,np.rad2deg(-angle), reshape=False) # Parameters for cropping max_in_kernel = np.amax(abs(kernel)) threshold = 0.05*max_in_kernel # Crop the kernel to reduce its size x_start = 0; for i in range(0, x_size, 1): if abs(max(kernel[i,:])) > threshold: x_start = i break x_end = (x_size-1)-x_start y_start = 0; for i in range(0, y_size, 1): if abs(max(kernel[:,i])) > threshold: y_start = i break y_end = (y_size-1)-y_start kernel = kernel[x_start:x_end, y_start:y_end] return kernel
14dd4143ad94bcdfa3298b4acf9b2d4c2bd0b7e6
3,644,218
def kwargs_to_flags(**kwargs): """Convert `kwargs` to flags to pass on to CLI.""" flag_strings = [] for (key, val) in kwargs.items(): if isinstance(val, bool): if val: flag_strings.append(f"--{key}") else: flag_strings.append(f"--{key}={val}") return " ".join(flag_strings)
aa672fe26c81e7aaf8a6e7c38354d1649495b8df
3,644,219
def extractBananas(item): """ Parser for 'Bananas' """ badwords = [ 'iya na kao manga chapters', ] if any([bad in item['tags'] for bad in badwords]): return None vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('isekai joushu chapters', 'Struggling Hard As The Lord Of A Castle In A Different World', 'translated'), ('dungeon harem wn chapters', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'), ('erufu seidorei wn', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) chp_prefixes = [ ('AARASL', 'An A-ranked Adventurer’s “Slow-living”', 'translated'), ('Isekai Taneuma', 'Isekai Taneuma', 'translated'), ('Gang of Yuusha', 'Gang of Yusha', 'translated'), ('Gang of Yusha', 'Gang of Yusha', 'translated'), ('The Revenge of the Soul Eater', 'Soul Eater of the Rebellion', 'translated'), ('Soul Eater of the Rebellion', 'Soul Eater of the Rebellion', 'translated'), ('Sparta Teikoku ', 'Sparta Teikoku Kenkoku Senki ', 'translated'), ] for prefix, series, tl_type in chp_prefixes: if item['title'].lower().startswith(prefix.lower()): return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
f06167a0d379ec3b1921bb7ad8146b0bca9fd8aa
3,644,220
def get_template_parameters_s3(template_key, s3_resource): """ Checks for existance of parameters object in S3 against supported suffixes and returns parameters file key if found Args: template_key: S3 key for template file. omit bucket. s3_resource: a boto3 s3 resource Returns: filename of parameters file if it exists """ for suffix in EFConfig.PARAMETER_FILE_SUFFIXES: parameters_key = template_key.replace("/templates", "/parameters") + suffix try: obj = s3_resource.Object(EFConfig.S3_CONFIG_BUCKET, parameters_key) obj.get() return parameters_key except ClientError: continue return None
3b68dc9c1fa8636bd0d066780aab43a6e55ecf2f
3,644,222
def cell_from_system(sdict): """ Function to obtain cell from namelist SYSTEM read from PW input. Args: sdict (dict): Dictinary generated from namelist SYSTEM of PW input. Returns: ndarray with shape (3,3): Cell is 3x3 matrix with entries:: [[a_x b_x c_x] [a_y b_y c_y] [a_z b_z c_z]], where a, b, c are crystallographic vectors, and x, y, z are their coordinates in the cartesian reference frame. """ ibrav = sdict.get('ibrav', None) if ibrav == 0: return None params = ['a', 'b', 'c', 'cosab', 'cosac', 'cosbc'] celldm = [sdict.get(f'celldm({i + 1})', 0) for i in range(6)] if not any(celldm): abc = [sdict.get(a, 0) for a in params] celldm = celldms_from_abc(ibrav, abc) if not any(celldm): return None if ibrav == 1: cell = np.eye(3) * celldm[0] return cell elif ibrav == 2: v1 = celldm[0] / 2 * np.array([-1, 0, 1]) v2 = celldm[0] / 2 * np.array([0, 1, 1]) v3 = celldm[0] / 2 * np.array([-1, 1, 0]) elif ibrav == 3: v1 = celldm[0] / 2 * np.array([1, 1, 1]) v2 = celldm[0] / 2 * np.array([-1, 1, 1]) v3 = celldm[0] / 2 * np.array([-1, -1, 1]) elif ibrav == -3: v1 = celldm[0] / 2 * np.array([-1, 1, 1]) v2 = celldm[0] / 2 * np.array([1, -1, 1]) v3 = celldm[0] / 2 * np.array([1, 1, -1]) elif ibrav == 4: v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] * np.array([-1 / 2, np.sqrt(3) / 2, 0]) v3 = celldm[0] * np.array([0, 0, celldm[2]]) elif ibrav == 5: term_1 = np.sqrt(1 + 2 * celldm[3]) term_2 = np.sqrt(1 - celldm[3]) v1 = celldm[0] * np.array([term_2 / np.sqrt(2), -term_2 / np.sqrt(6), term_1 / np.sqrt(3)]) v2 = celldm[0] * np.array([0, term_2 * np.sqrt(2 / 3), term_1 / np.sqrt(3)]) v3 = celldm[0] * np.array([-term_2 / np.sqrt(2), -term_2 / np.sqrt(6), term_1 / np.sqrt(3)]) elif ibrav == -5: term_1 = np.sqrt(1 + 2 * celldm[3]) term_2 = np.sqrt(1 - celldm[3]) v1 = celldm[0] * np.array([(term_1 - 2 * term_2) / 3, (term_1 + term_2) / 3, (term_1 + term_2) / 3]) v2 = celldm[0] * np.array([(term_1 + term_2) / 3, (term_1 - 2 * term_2) / 3, (term_1 + term_2) / 3]) v3 = celldm[0] * np.array([(term_1 + term_2) / 3, (term_1 + term_2) / 3, (term_1 - 2 * term_2) / 3]) elif ibrav == 6: v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] * np.array([0, 1, 0]) v3 = celldm[0] * np.array([0, 0, celldm[2]]) elif ibrav == 7: v1 = celldm[0] / 2 * np.array([1, -1, celldm[2]]) v2 = celldm[0] / 2 * np.array([1, 1, celldm[2]]) v3 = celldm[0] / 2 * np.array([-1, -1, celldm[2]]) elif ibrav == 8: v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] * np.array([0, celldm[1], 0]) v3 = celldm[0] * np.array([0, 0, celldm[2]]) elif ibrav == 9: v1 = celldm[0] / 2 * np.array([1, celldm[1], 0]) v2 = celldm[0] / 2 * np.array([-1, celldm[1], 0]) v3 = celldm[0] * np.array([0, 0, celldm[2]]) elif ibrav == -9: v1 = celldm[0] / 2 * np.array([1, -celldm[1], 0]) v2 = celldm[0] / 2 * np.array([+1, celldm[1], 0]) v3 = celldm[0] * np.array([0, 0, celldm[2]]) elif ibrav == 91: v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] / 2 * np.array([0, celldm[1], -celldm[2]]) v3 = celldm[0] / 2 * np.array([0, celldm[1], celldm[2]]) elif ibrav == 10: v1 = celldm[0] / 2 * np.array([1, 0, celldm[2]]) v2 = celldm[0] / 2 * np.array([1, celldm[1], 0]) v3 = celldm[0] / 2 * np.array([0, celldm[1], celldm[2]]) elif ibrav == 11: v1 = celldm[0] / 2 * np.array([1, celldm[1], celldm[2]]) v2 = celldm[0] / 2 * np.array([-1, celldm[1], celldm[2]]) v3 = celldm[0] / 2 * np.array([-1, -celldm[1], celldm[2]]) elif ibrav == 12: sen = np.sqrt(1 - celldm[3] ** 2) v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] * np.array([celldm[1] * celldm[3], celldm[1] * sen, 0]) v3 = celldm[0] * np.array([0, 0, celldm[2]]) elif ibrav == -12: sen = np.sqrt(1 - celldm[4] ** 2) v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] * np.array([0, celldm[1], 0]) v3 = celldm[0] * np.array([celldm[2] * celldm[4], 0, celldm[2] * sen]) elif ibrav == 13: sen = np.sqrt(1 - celldm[3] ** 2) v1 = celldm[0] / 2 * np.array([1, 0, -celldm[2]]) v2 = celldm[0] * np.array([celldm[1] * celldm[3], celldm[1] * sen, 0]) v3 = celldm[0] / 2 * np.array([1, 0, celldm[2]]) elif ibrav == -13: sen = np.sqrt(1 - celldm[4] ** 2) v1 = celldm[0] / 2 * np.array([1, celldm[1], 0]) v2 = celldm[0] / 2 * np.array([-1, celldm[1], 0]) v3 = celldm[0] * np.array([celldm[2] * celldm[4], 0, celldm[2] * sen]) elif ibrav == 14: singam = np.sqrt(1 - celldm[5] ** 2) term = (1 + 2 * celldm[3] * celldm[4] * celldm[5] - celldm[3] ** 2 - celldm[4] ** 2 - celldm[5] ** 2) term = np.sqrt(term / (1 - celldm[5] ** 2)) v1 = celldm[0] * np.array([1, 0, 0]) v2 = celldm[0] * np.array([celldm[1] * celldm[5], celldm[1] * singam, 0]) v3 = celldm[0] * np.array([celldm[2] * celldm[4], celldm[2] * (celldm[3] - celldm[4] * celldm[5]) / singam, celldm[2] * term]) else: raise ValueError('Unsupported ibrav') cell = np.stack([v1, v2, v3], axis=1) return cell
fbd6e034f738f42be45d7e5304892a9e69a8493b
3,644,223
def A12_6_3_2(FAxial, eta, Pp, Pu, Muey , Muez, Muay, Muaz, Ppls, Mby, Mbz, GammaRPa, GammaRPb): """ A.12.6.3.2 Interaction equation approach where : Pu is the applied axial force in a member due to factored actions, determined in an analysis that includes Pu effects (see A.12.4); Ppls is the representative local axial strength of a non-circular prismatic member, Pp is the representative axial strength of a non-circular prismatic member, Muey is the corrected bending moment due to factored actions about the member y-axis from A.12.4; Muez is the corrected bending moment due to factored actions about the member z-axis from A.12.4; Muay is the amplified bending moment due to factored actions about the member y-axis from A.12.4; Muaz is the amplified bending moment due to factored actions about the member z-axis from A.12.4; Mby is the representative bending moment strength about the member y-axis, as defined in A.12.6.2.5 or A.12.6.2.6. """ # Each non-circular prismatic structural member should satisfy # the following conditions in Equations (A.12.6-38] to [A.12.6-40] # at all cross-sections along its length. When the shear due to # factored actions is greater than 60 percent of the shear strength, # the bending moment strength should be reduced parabolically to zero # when the shear equals the shear strength (Pv in A.12.6.3.4). # # Local strength check (for all members): # (A.12.6-38) _UR1 = ((GammaRPa * Pu / Ppls) + pow((pow((GammaRPb * Muey / Mby),eta) + pow((GammaRPb * Muez / Mbz),eta)), 1.0 / eta)) print("") print("A.12.6.3.2 Interaction equation approach") print("Uint [Local strength check ] = {: 1.4f}".format(_UR1)) _UR2 = 0 if FAxial == 'compression': # and beam-column check (for members subject to axial compression): if GammaRPa * Pu / Pp > 0.20: # after AISC[A.12.5-1], Equation H1-1a (A.12.6-39) _UR2 = ((GammaRPa * Pu / Pp) + (8.0 / 9.0) * pow((pow((GammaRPb * Muay / Mby),eta) + pow((GammaRPb * Muaz / Mbz),eta)), 1.0 / eta)) # else: # after AISC[A.12.5-1], Equation H1-1b (A.12.6-40) _UR2 = ((GammaRPa * Pu / (2.0 * Pp)) + pow((pow((GammaRPb * Muay / Mby),eta) + pow((GammaRPb * Muaz / Mbz),eta)), 1.0/eta)) print("Uint [beam-column check ] = {: 1.4f}".format(_UR2)) print("-----------------") # # # return _UR1, _UR2 #
7a36ec489681100f99563f9c336df1306363851d
3,644,224
def gain_deploy_data(): """ @api {get} /v1/deploy/new_data 获取当前deploy_id 的信息 @apiName deployNew_data @apiGroup Deploy @apiDescription 获取当前deploy_id 的信息 @apiParam {int} project_id 项目id @apiParam {int} flow_id 流程id @apiParam {int} deploy_id 部署id @apiParamExample {json} Request-Example: { "project_id": 45, "flow_id": 1, "deploy_id": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "branch": "develop", "deploy_id": 160, "flow_id": 232, "id": 179, "node_id": 31, "node_name": "yn-244", "project_id": 4, "result": 1, "server_id": 45, "server_name": "submarine-test", "status": 0, "version": "1.1.75" } ], "message": "成功" } """ data = DeployRecordBusiness.query_deploy_id_json() combine_data = {'is_one_Key': 1, 'data': data} single_data = DeployRecordBusiness.is_one_key() if len(single_data) == 0: combine_data['is_one_Key'] = 0 return json_detail_render(0, combine_data)
9dc5e5faa53235ac6c5d8f0d37a2989b15ead477
3,644,225
def topk_mask(score, k): """Efficient implementation of topk_mask for TPUs. This is a more efficient implementation of the following snippet with support for higher rank tensors. It has the limitation that it only supports float32 as element type. The mask only contains k elements even if other elements have the same value as the kth largest. def topk_mask(score, k): _, indices = tf.nn.top_k(score, k=k) return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k), tf.squeeze(score).shape.as_list()) The implementation binary searches for the kth value along each row of the input and once the kth value is found it creates the mask via a single select instruction. This approach is more than 100x faster on TPUs for large inputs compared with the above snippet. Args: score: 1-D or higher Tensor with last dimension at least k. k: Number of top elements to look for along the last dimension (along each row for matrices). """ last_dim_size = score.get_shape().as_list()[-1] # Choose top k+epsilon where epsilon is the number of times the k'th largest i # element is present in the input. topk_mask_with_duplicate = topk_mask_internal(score, k) # Calculate the number of redudant duplicate values to discard. select_num = tf.cast( tf.reduce_sum(topk_mask_with_duplicate, axis=-1, keepdims=True), tf.int32) redudant_num = select_num - k # softmax cross entropy value range [0, 1]. # k's largest value is the smallest value being selected. k_th_value = tf.reduce_min( tf.where( tf.cast(topk_mask_with_duplicate, tf.bool), score, tf.ones_like(score) * 2.0), axis=-1, keepdims=True) # Mask to indicate if score equals k th largest value. equal_k_th_value = tf.equal(score, k_th_value) # Creates a tensor wherer the value is 1 if the value is equal to kth largest # value, otherwise, 0. k_th_value = tf.where(equal_k_th_value, tf.ones_like(score, dtype=tf.int32), tf.zeros_like(score, dtype=tf.int32)) index = tf.range(last_dim_size) k_th_value_index = tf.multiply(k_th_value, index) duplicate_mask = topk_mask_internal( tf.cast(k_th_value_index, tf.float32), redudant_num) return tf.where( tf.cast(duplicate_mask, tf.bool), tf.zeros_like(topk_mask_with_duplicate), topk_mask_with_duplicate)
0a33dc6d5b9c621ab3fbd86c54c9ec90ac00f21f
3,644,226
import calendar def valueSearch(stat_type,op,value,**kwargs): """Quick function to designate a value, and the days or months where the attribute of interest exceeded, equalled, or was less than the passed value valueSearch("attribute","operator",value,**{sortmonth=False}) * "attribute" must be in ["prcp","snow","snwd","tavg","tmax","tmin"] (other values are accepted, but these are what are assessed * "operator" must be in ["<=","<","==","!=",">",">="] * value must be an integer or a float OPT **kwarg: sortmonth = True --> If set to true, it will do a value search based on monthly data instead of daily (no snwd data is available for months though) EXAMPLE: valueSearch("prcp",">=",5) --> returns a list of all days on record where 5+ inches of rain fell """ #operator=">", year=1984, month=12,season="winter" # v, args[rain,prcp,snow,temp,avgtemp,tmax,avgtmax,tmin,avgtmin], kwargs[condition,year,metyear,season,month] valid_yrs = sorted([x for x in clmt.keys() if type(x) == int]) valid_metyrs = sorted([x for x in metclmt.keys() if type(x) == int]) # ERROR HANDLING if stat_type.lower() not in ["rain","prcp","precip","snow","snwd","temp","temps","temperature","temperatures","avgtemp","tavg","tempavglist","tmax","hi","high","tmin","lo","low"]: return print("OOPS! {} is not a supported stat category. Try again!".format(stat_type)) if op not in ["<","<=","==",">",">="]: return print("OOPS! '{}' is not a supported operator. Try again!".format(op)) if type(value) not in [int,float]: return print("OOPS! Only integers or floats are supported for value intake") # Format passed variables stat_type = stat_type.lower() # Convert to lower-case for homogeniety if stat_type in ["rain","prcp","precip"]: stat_type = "prcp" if stat_type in ["snow"]: stat_type = "snow" if stat_type in ["snwd"]: stat_type = "snwd" if stat_type in ["avgtemp","tavg","tempavglist","temp","temps","temperature","temperatures"]: stat_type = "tavg" if stat_type in ["tmax","hi","high"]: stat_type = "tmax" if stat_type in ["tmin","lo","low"]: stat_type = "tmin" if "sortmonth" in kwargs and kwargs["sortmonth"] == True: CLMTDICT = clmt_vars_months stype = "month" else: # Just sorting indv days CLMTDICT = clmt_vars_days stype = "day" results = [] for VAR in CLMTDICT[stat_type]: for DAY in CLMTDICT[stat_type][VAR]: if op == "<": if stype == "month": if VAR < value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY) else: if VAR < value: results.append(DAY) elif op == "<=": if stype == "month": if VAR <= value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY) else: if VAR <= value: results.append(DAY) elif op == "!=": if VAR != value: results.append(DAY) elif op == "==": if VAR == value: results.append(DAY) elif op == ">=": if VAR >= value: results.append(DAY) elif op == ">": if VAR > value: results.append(DAY) results.sort() if "sortmonth" in kwargs and kwargs["sortmonth"] == True: if stat_type == "prcp": print("Total months where the Total Rainfall {} {}: {}".format(op,value,len(results))) elif stat_type == "snow": print("Total months where the Total Snowfall {} {}: {}".format(op,value,len(results))) elif stat_type in ["tmax","tmin"]: print("Total months where the Average {} {} {}: {}".format(stat_type.upper(),op,value,len(results))) elif stat_type == "tavg": print("Total months where the Average Temperature {} {}: {}".format(op,value,len(results))) else: return print("*** valueSearch does not report on monthly variations of {} ***".format(stat_type)) if len(results) <= 50: stillprint = True else: stillpr = input("print results? ('y'/'n'): ") if stillpr == "y": stillprint = True else: stillprint = False if stillprint == True: if stat_type == "prcp": for x in results: print("{:6.2f}: {} {}".format(round(sum(clmt[x.year][x.month]["prcp"]),2),calendar.month_abbr[x.month],x.year)) if stat_type == "snow": for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snow"]),1),calendar.month_abbr[x.month],x.year)) #if stat_type == "snwd": #for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snwd"]),1),calendar.month_abbr[x.month],x.year)) if stat_type == "tavg": for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tempAVGlist"]),1),calendar.month_abbr[x.month],x.year)) if stat_type == "tmax": for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmax"]),1),calendar.month_abbr[x.month],x.year)) if stat_type == "tmin": for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmin"]),1),calendar.month_abbr[x.month],x.year)) else: # Just assessing individual days print("Total days where '{}' {} {}: {}".format(stat_type,op,value,len(results))) if len(results) <= 50: stillprint = True else: stillpr = input("print results? ('y'/'n'): ") if stillpr == "y": stillprint = True else: stillprint = False if stillprint == True: if stat_type == "prcp": for x in results: print("{:>5.2f}: {}".format(float(clmt[x.year][x.month][x.day].prcp),x)) if stat_type == "snow": for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snow),x)) if stat_type == "snwd": for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snwd),x)) if stat_type == "tmax": for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmax,x)) if stat_type == "tmin": for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmin,x)) print("")
94b55a362d179f6acce705b002eb99f330a5427b
3,644,228
import requests def get_gnid(rec): """ Use geonames API (slow and quota limit for free accounts) """ if not any("http://www.geonames.org" in s for s in rec.get("sameAs")) and rec["geo"].get("latitude") and rec["geo"].get("longitude"): changed = False r = requests.get("http://api.geonames.org/findNearbyJSON?lat="+rec["geo"].get( "latitude")+"&lng="+rec["geo"].get("longitude")+"&username=slublod") if r.ok and isiter(r.json().get("geonames")): for geoNameRecord in r.json().get("geonames"): if rec.get("name") in geoNameRecord.get("name") or geoNameRecord.get("name") in rec.get("name"): # match! newSameAs = {'@id': "https://sws.geonames.org/"+str(geoNameRecord.get("geonameId"))+"/", 'publisher': {'abbr': "geonames", 'preferredName': "GeoNames", "isBasedOn": {"@type": "Dataset", "@id": "https://sws.geonames.org/"+str(record.get("id"))+"/" } } } rec["sameAs"] = litter(rec.get("sameAs"), newSameAs) changed = True else: if r.json().get("status").get("message").startswith("the hourly limit") or r.json().get("status").get("message").startswith("the daily limit"): eprint("Limit exceeded!\n") exit(0) if changed: return rec
ab9d5e50e45217e3742f1d1ca7f58326ed3bf6f6
3,644,229
def allowed_file(filename): """ Is file extension allowed for upload""" return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
3d0a3a15eecf8f6b0d76b52935a14628f1655328
3,644,231
import re def parse_tsv(filename, name_dict): """ """ output_matrix = [] with open(filename, 'rU') as handle: curr_protein = [] for line in handle: if line[0] == "#" or line[0] == "-" or len(line.strip('\n')) < 1: continue if re.match("Protein", line): continue arow = line.strip('\n').split() if arow[0] == "pos": continue arow[12] = float(arow[12]) if len(arow[10].split('-')) == 3: #arow = arow[:10] + arow[10].split('_') + arow[11:] arow = arow[:10] + name_dict[arow[10]].split('-') + arow[11:] #print arow output_matrix.append(arow) return output_matrix
12aa31ab3ff033ecc514518700c22ea467f01ef6
3,644,232
def get_orlist(site=DEFAULT_SITE, namespace="0|6|10|14|100|828", redirects="nonredirects"): """Get list of oldreviewed pages.""" request = Request(site=site, action="query", list="oldreviewedpages", ornamespace=namespace, orfilterredir=redirects, orlimit="5000") result = [] while True: answer = request.submit() result += [page["title"] for page in answer["query"]["oldreviewedpages"]] if "query-continue" in answer: request["orstart"] = answer["query-continue"]["oldreviewedpages"]["orstart"] else: break return result
8253b2ac8ea72690086fa7864e5ca4ffcc33de50
3,644,233
def meshVolume(verts, norm, tri): """Compute the Volume of a mesh specified by vertices, their normals, and indices of triangular faces """ # TEST zeronorms = [] for i, n in enumerate(norm): #if n == [0., 0., 0.] or n == (0., 0., 0.): if n[0] == 0 and n[1] == 0 and n[2] == 0: #print "normal %d is zero!" % i, n zeronorms.append(i) #print "in meshVolume, zeronorms length: ", len(zeronorms), "normals length:", len(norm) # Initialize volSum = 0.0 oneThird = 1./3. # Compute face normals trinorm = [] for t in tri: n1 = norm[t[0]] n2 = norm[t[1]] n3 = norm[t[2]] tn = [ (n1[0]+n2[0]+n3[0])*oneThird, (n1[1]+n2[1]+n3[1])*oneThird, (n1[2]+n2[2]+n3[2])*oneThird ] trinorm.append(tn) # print trinorm # TEST # Compute volume for t,tn in zip(tri, trinorm): s1 = verts[t[0]] s2 = verts[t[1]] s3 = verts[t[2]] area = triangleArea(s1,s2,s3) g = [ (s1[0]+s2[0]+s3[0])*oneThird, (s1[1]+s2[1]+s3[1])*oneThird, (s1[2]+s2[2]+s3[2])*oneThird ] volSum += (g[0]*tn[0] + g[1]*tn[1] + g[2]*tn[2])*area return volSum*oneThird
018818ab558b64b9699250bf6f45f0a1c47f92c8
3,644,234
def _groupby_clause(uuid=None, owner=None, human_name=None, processing_name=None): """ Build the groupby clause. Simply detect which fields are set, and group by those. Args: uuid: owner: human_name: processing_name: Returns: (str): "field, ..., field" """ gbc = '' clauses = [] if uuid is not None: clauses.append('uuid') if owner is not None: clauses.append('owner') if human_name is not None: clauses.append('human_name') if processing_name is not None: clauses.append('processing_name') if len(clauses) > 0: gbc = ','.join(clauses) return gbc
21546efa19e841661ed3a7ad8a84cf9a9a76d416
3,644,235
def _coeff_mod_wfe_drift(self, wfe_drift, key='wfe_drift'): """ Modify PSF polynomial coefficients as a function of WFE drift. """ # Modify PSF coefficients based on WFE drift if wfe_drift==0: cf_mod = 0 # Don't modify coefficients elif (self._psf_coeff_mod[key] is None): _log.warning("You must run `gen_wfedrift_coeff` first before setting the wfe_drift parameter.") _log.warning("Will continue assuming `wfe_drift=0`.") cf_mod = 0 else: _log.info("Generating WFE drift modifications...") psf_coeff = self.psf_coeff cf_fit = self._psf_coeff_mod[key] lxmap = self._psf_coeff_mod['wfe_drift_lxmap'] # Fit function cf_fit_shape = cf_fit.shape cf_fit = cf_fit.reshape([cf_fit.shape[0], -1]) cf_mod = jl_poly(np.array([wfe_drift]), cf_fit, use_legendre=True, lxmap=lxmap) cf_mod = cf_mod.reshape(cf_fit_shape[1:]) # Pad cf_mod array with 0s if undersized if not np.allclose(psf_coeff.shape, cf_mod.shape): new_shape = psf_coeff.shape[1:] cf_mod_resize = np.asarray([pad_or_cut_to_size(im, new_shape) for im in cf_mod]) cf_mod = cf_mod_resize return cf_mod
345d07a8850ec702d42f5c527fae0311f50a69b1
3,644,236
def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user=None): """ Returns the GitHub webhook JSON payload transformed into our own payload format. If the gh_payload is not valid, returns None. """ try: validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA) except Exception as exc: raise InvalidPayloadException(exc.message) payload = JSONPathDict(gh_payload) if payload['head_commit'] is None: raise SkipRequestException config = SafeDictSetter() config['commit'] = payload['head_commit.id'] config['ref'] = payload['ref'] config['default_branch'] = payload['repository.default_branch'] or default_branch config['git_url'] = payload['repository.ssh_url'] config['commit_info.url'] = payload['head_commit.url'] config['commit_info.message'] = payload['head_commit.message'] config['commit_info.date'] = payload['head_commit.timestamp'] config['commit_info.author.username'] = payload['head_commit.author.username'] config['commit_info.author.url'] = payload.get('head_commit.author.html_url') config['commit_info.author.avatar_url'] = payload.get('head_commit.author.avatar_url') config['commit_info.committer.username'] = payload.get('head_commit.committer.username') config['commit_info.committer.url'] = payload.get('head_commit.committer.html_url') config['commit_info.committer.avatar_url'] = payload.get('head_commit.committer.avatar_url') # Note: GitHub doesn't always return the extra information for users, so we do the lookup # manually if possible. if (lookup_user and not payload.get('head_commit.author.html_url') and payload.get('head_commit.author.username')): author_info = lookup_user(payload['head_commit.author.username']) if author_info: config['commit_info.author.url'] = author_info['html_url'] config['commit_info.author.avatar_url'] = author_info['avatar_url'] if (lookup_user and payload.get('head_commit.committer.username') and not payload.get('head_commit.committer.html_url')): committer_info = lookup_user(payload['head_commit.committer.username']) if committer_info: config['commit_info.committer.url'] = committer_info['html_url'] config['commit_info.committer.avatar_url'] = committer_info['avatar_url'] return config.dict_value()
26e645219b816405521ddb6033a0a44c2ab7bba5
3,644,237
def get_retweeted_tweet(tweet): """ Get the retweeted Tweet and return it as a dictionary If the Tweet is not a Retweet, return None Args: tweet (Tweet or dict): A Tweet object or a dictionary Returns: dict: A dictionary representing the retweeted status or None if there is no quoted status. \n - For original format, this is the value of "retweeted_status" \n - For activity streams, If the Tweet is a Retweet this is the value of the key "object" """ if get_tweet_type(tweet) == "retweet": if is_original_format(tweet): return tweet["retweeted_status"] else: return tweet["object"] else: return None
f852d45deadb1622687d097f2c724bdaef72ccc9
3,644,238
def listminus(c1, c2): """Return a list of all elements of C1 that are not in C2.""" s2 = {} for delta in c2: s2[delta] = 1 c = [] for delta in c1: if not s2.has_key(delta): c.append(delta) return c
829c347343d6a305fef2ad2f71539d7267b5a973
3,644,239
import random import torch def distribute_quantity_skew(batch_size, grouped_data, distributed_dataset, groupings, p=0.5, scalar=1.5): """ Adds quantity skew to the data distribution. If p=0. or scalar=1., no skew is applied and the data are divided evenly among the workers in each label group. :param batch_size: the batch size for training :param grouped_data: a dictionary containing the data for each label skew group, key is the label integer and value is the data :param distributed_dataset: an initialized empty dictionary that will be filled with data for each worker :param groupings: a dictionary of the groupings for each worker id, key is the label integer and value is a list of worker ids :param p: the portion of workers within each group that will receive higher data quantities, p=0 indicates no skew :param scalar: the factor used to multiply the size of datasets for high quantity workers, e.g. if scalar=1.5 then each worker with high quantity skew has 1.5x as many data points as the low quantity workers in their group :return: the distributed dataset """ for n, group in groupings.items(): high_quantity = random.sample(group, k=int(p*len(group))) low_quantity = [i for i in group if i not in high_quantity] base_k = int(len(grouped_data[n])/len(group)) print(f"Base K: {base_k}") print(f"Length of grouped data: {len(grouped_data[n])}") if p > 0.: low_k = int(len(grouped_data[n]) / (len(low_quantity) + len(high_quantity) * scalar)) high_k = int(low_k * scalar) print(f"High Quantity Skew: {high_quantity}") print(f"High Quantity K: {high_k}") print(f"Low Quantity Skew: {low_quantity}") print(f"Low Quantity K: {low_k}") else: low_k = base_k assert len(high_quantity) == 0, "Quantity skew with probability 0 should have no high quantity clients" print(f"High Quantity Skew: {high_quantity}") print(f"Low Quantity Skew: {low_quantity}") print(f"Base K: {base_k}") for worker in high_quantity: selected = random.sample(list(range(len(grouped_data[n]))), k=high_k) temp = [grouped_data[n][i] for i in selected] # This would need to be changed if the number of samples is not divisible by batch size worker_vals = [] for i in range(len(temp) // batch_size): ix = i * batch_size vals = temp[ix:ix + batch_size] targets = [] inputs = [] for j in vals: targets.append(int(j[1].numpy())) inputs.append(j[0].numpy()) worker_vals.append((torch.Tensor(inputs), torch.Tensor(targets))) distributed_dataset[worker].extend(worker_vals) grouped_data[n] = [grouped_data[n][i] for i in range(len(grouped_data[n])) if i not in selected] for nx, worker in enumerate(low_quantity): if nx+1 == len(low_quantity): print(f"Length of remaining data = {len(grouped_data[n])}\nLow_k = {low_k}") temp = grouped_data[n] else: selected = random.sample(list(range(len(grouped_data[n]))), k=low_k) temp = [grouped_data[n][i] for i in selected] # This would need to be changed if the number of samples is not divisible by batch size worker_vals = [] for i in range(len(temp) // batch_size): ix = i * batch_size vals = temp[ix:ix + batch_size] targets = [] inputs = [] for j in vals: targets.append(int(j[1].numpy())) inputs.append(j[0].numpy()) worker_vals.append((torch.Tensor(inputs), torch.Tensor(targets))) distributed_dataset[worker].extend(worker_vals) if nx+1 != len(low_quantity): grouped_data[n] = [grouped_data[n][i] for i in range(len(grouped_data[n])) if i not in selected] return distributed_dataset
b4ebd1d6058550d2e32cedd62a56b50441d93b4c
3,644,240
def get_dtype(names, array_dtype=DEFAULT_FLOAT_DTYPE): """ Get a list of tuples containing the dtypes for the structured array Parameters ---------- names : list of str Names of parameters array_dtype : optional dtype to use Returns ------- list of tuple Dtypes as tuples with (field, dtype) """ return [(n, array_dtype) for n in names] \ + [('logP', array_dtype), ('logL', LOGL_DTYPE)]
9f29dae78b3839429f13b8513293e9ce4c240e2f
3,644,243