content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def register(*args, cache_default=True): """ Registers function for further caching its calls and restoring source. Example: ``` python @register def make_ohe_pclass(df): ... ``` """ def __register(func): # if source_utils.source_is_saved(func) and not source_utils.matches_cache(func): if func.__name__ + '_fc' in cache.cached_objs() and source_utils.get_source(func) != cache.load_obj(func.__name__ + '_fc').source: raise NameError("A function with the same name is already registered") if func.__name__ + '_fc' in cache.cached_objs(): return cache.load_obj(func.__name__ + '_fc') else: functor = FeatureConstructor(func, cache_default) cache.cache_obj(functor, functor.__name__ + '_fc') return functor if args: function = args[0] return __register(function) else: return __register
41610d7f3463088f29125fe335a04b9b0292b74f
3,644,716
import re def make_absolute_paths(content): """Convert all MEDIA files into a file://URL paths in order to correctly get it displayed in PDFs.""" overrides = [ { 'root': settings.MEDIA_ROOT, 'url': settings.MEDIA_URL, }, { 'root': settings.STATIC_ROOT, 'url': settings.STATIC_URL, } ] has_scheme = re.compile(r'^[^:/]+://') for x in overrides: if not x['url'] or has_scheme.match(x['url']): continue if not x['root'].endswith('/'): x['root'] += '/' occur_pattern = '''(["|']{0}.*?["|'])''' occurences = re.findall(occur_pattern.format(x['url']), content) occurences = list(set(occurences)) # Remove dups for occur in occurences: content = content.replace(occur, '"%s"' % ( pathname2fileurl(x['root']) + occur[1 + len(x['url']): -1])) return content
4632513f73bf49ec6d1acfef15d632ee980ab345
3,644,717
def social_distancing_start_40(): """ Real Name: b'social distancing start 40' Original Eqn: b'31' Units: b'Day' Limits: (None, None) Type: constant b'' """ return 31
c874afed46a8303ec2d3ad0d571183ddc30059a0
3,644,718
def get_token_auth_header(params): """ Obtains the Access Token from the Authorization Header """ auth = get_token(params) parts = auth.split() if parts[0].lower() != "bearer": raise AuthError({"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401) if len(parts) == 1: raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401) if len(parts) > 2: raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401) token = parts[1] return token
c48a2306ea76b1b5f611194eb33fa13e40f0e155
3,644,720
from typing import Optional def check_hu(base: str, add: Optional[str] = None) -> str: """Check country specific VAT-Id""" weights = (9, 7, 3, 1, 9, 7, 3) s = sum(int(c) * w for (c, w) in zip(base, weights)) r = s % 10 if r == 0: return '0' else: return str(10 - r)
48f1043eeede4ea0b04eb71685f19901da495195
3,644,721
import requests from bs4 import BeautifulSoup def scrape_headline(news_link): """ function to scrape the headlines from a simple news website :return: a dictionary with key as html link of the source and value as the text in the headline of the news in the html link """ #Headlines #URL = 'https://lite.cnn.com/en' page = requests.get(news_link) soup = BeautifulSoup(page.content, 'html.parser') daily_news_headline_dict = myDict() for link in soup.find_all('a'): key = 'https://lite.cnn.com'+link.get('href') text = cleantext.create_cleanerDoc(link.get_text('href')) daily_news_headline_dict.add(key, text) #print(daily_news_headline_dict) return daily_news_headline_dict
f87453a925ace26a3f848c0ed380b6e4ab7030a7
3,644,722
def read_xml(img_path): """Read bounding box from xml Args: img_path: path to image Return list of bounding boxes """ anno_path = '.'.join(img_path.split('.')[:-1]) + '.xml' tree = ET.ElementTree(file=anno_path) root = tree.getroot() ObjectSet = root.findall('object') bboxes = [] for object in ObjectSet: box = object.find('bndbox') x1 = int(box.find('xmin').text) y1 = int(box.find('ymin').text) x2 = int(box.find('xmax').text) y2 = int(box.find('ymax').text) bb = [x1, y1, x2, y2] bboxes.append(bb) return bboxes
7102edccb5258d88b67476770123e54e1b75a5c1
3,644,723
def a2funcoff(*args): """a2funcoff(ea_t ea, char buf) -> char""" return _idaapi.a2funcoff(*args)
0cac71a4e071bf99bf6777fc35002e48099ecc46
3,644,724
def str(obj): """This function can be used as a default `__str__()` in user-defined classes. Classes using this should provide an `__info__()` method, otherwise the `default_info()` function defined in this module is used. """ info_func = getattr(type(obj), "__info__", default_info) return "{}({})".format(type(obj).__name__, info_func(obj))
651e6f3e047a8f7583a39d337d202c09934bc37a
3,644,725
from typing import Union from typing import Optional from typing import Sequence from typing import Any def isin_strategy( pandera_dtype: Union[numpy_engine.DataType, pandas_engine.DataType], strategy: Optional[SearchStrategy] = None, *, allowed_values: Sequence[Any], ) -> SearchStrategy: """Strategy to generate values within a finite set. :param pandera_dtype: :class:`pandera.dtypes.DataType` instance. :param strategy: an optional hypothesis strategy. If specified, the pandas dtype strategy will be chained onto this strategy. :param allowed_values: set of allowable values. :returns: ``hypothesis`` strategy """ if strategy is None: return pandas_dtype_strategy( pandera_dtype, st.sampled_from(allowed_values) ) return strategy.filter(lambda x: x in allowed_values)
aecf05b269b7f89b6fea0b5bfbbc98e51d4caddb
3,644,726
def arrToDict(arr): """ Turn an array into a dictionary where each value maps to '1' used for membership testing. """ return dict((x, 1) for x in arr)
3202aac9a6c091d7c98fd492489dbcf2300d3a02
3,644,727
def getPercentGC(img, nbpix) : """Determines if a page is in grayscale or colour mode.""" if img.mode != "RGB" : img = img.convert("RGB") gray = 0 for (r, g, b) in img.getdata() : if not (r == g == b) : # optimize : if a single pixel is no gray the whole page is colored. return { "G" : 0.0, "C" : 100.0 } return { "G" : 100.0, "C" : 0.0 }
e8ee682889e0f9284cecfcf57cf260b7056c1879
3,644,728
import ctypes def rotate(angle: float, iaxis: int) -> ndarray: """ Calculate the 3x3 rotation matrix generated by a rotation of a specified angle about a specified axis. This rotation is thought of as rotating the coordinate system. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/rotate_c.html :param angle: Angle of rotation (radians). :param iaxis: Axis of rotation X=1, Y=2, Z=3. :return: Resulting rotation matrix """ angle = ctypes.c_double(angle) iaxis = ctypes.c_int(iaxis) mout = stypes.empty_double_matrix() libspice.rotate_c(angle, iaxis, mout) return stypes.c_matrix_to_numpy(mout)
035144bdf04b4c39cc4bf1e41ec02d4c71d4d951
3,644,729
def build_categories(semanticGroups): """ Returns a list of ontobio categories or None Parameters ---------- semanticGroups : string a space delimited collection of semanticGroups """ if semanticGroups is None: return None categories = [] for semanticGroup in semanticGroups.split(' '): try: categories += UMLS_to_monarch(semanticGroup.upper()) except: None if len(categories) == 0: return None else: return categories
5262b62cd5ce8e8c0864f91f43a0925ea991cc83
3,644,730
from x84.bbs import getterminal import time def show_nicks(handles): """ return terminal sequence for /users result. """ term = getterminal() return u''.join(( time.strftime('%H:%M'), u' ', term.blue('-'), u'!', term.blue('-'), u' ', term.bold_cyan('%d' % (len(handles))), u' ', u'user%s: ' % (u's' if len(handles) > 1 else u''), u', '.join(handles) + u'\n',))
6863b7a67686a337304c9f22eb4bd9488f959a1f
3,644,731
def cvCmp(*args): """cvCmp(CvArr src1, CvArr src2, CvArr dst, int cmp_op)""" return _cv.cvCmp(*args)
ec2b9d8d68083fff3a09a5293e9950a2c67c424b
3,644,732
def convert_to_dtype(data, dtype): """ A utility function converting xarray, pandas, or NumPy data to a given dtype. Parameters ---------- data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame, or numpy.ndarray dtype: str or numpy.dtype A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g. np.int16, np.float32) to convert the data to. """ if dtype is None: # Don't convert the data type. return data return data.astype(dtype)
ec3130311fe9c136707d5afb8f564b4f89067f4e
3,644,733
def process_files(data_path, output_path): """Returns a pipeline which rebalances data shards. Args: data_path: File(s) to read. output_path: Path to which output CSVs are written, if necessary. """ def csv_pipeline(root): _ = ( root | beam.io.ReadFromText(data_path) | beam.io.WriteToText(output_path, num_shards=FLAGS.num_output_files)) def tfrecord_pipeline(root): """Pipeline instantiation function. Args: root: Source pipeline from which to extend. """ example_coder = beam.coders.ProtoCoder(tf.train.Example) _ = ( root | beam.io.ReadFromTFRecord(data_path, coder=example_coder) | beam.io.WriteToTFRecord(output_path, file_name_suffix="tfrecord", coder=example_coder, num_shards=FLAGS.num_output_files)) pipeline = tfrecord_pipeline if FLAGS.filetype == "tfrecord" else csv_pipeline return pipeline
320a66857dfcfa43995226ee20f714f0694c8f8d
3,644,734
def delete_tasklog_cached(dc_id, user_id=None): """ Remove tasklog cache entry. """ if user_id: key = _cache_log_key(user_id, dc_id) else: key = _cache_log_key(settings.TASK_LOG_STAFF_ID, dc_id) return cache.delete(key)
29435d0618850a442a56d4d28e96be5989bca1f5
3,644,735
def strip_headers(data): """ Strips headers from data #depreciate""" try: return data['items'] except (TypeError, KeyError) as e: print(e) return data
2eb044e45043f103fff76bfa47007dbcd4aa49c7
3,644,736
def sns_plot(chart_type: str, df): """ return seaborn plots """ fig, ax = plt.subplots() if chart_type == "Scatter": with st.echo(): sns.scatterplot( data=df, x="bill_depth_mm", y="bill_length_mm", hue="species", ) plt.title("Bill Depth by Bill Length") elif chart_type == "Histogram": with st.echo(): sns.histplot(data=df, x="bill_depth_mm") plt.title("Count of Bill Depth Observations") elif chart_type == "Bar": with st.echo(): sns.barplot(data=df, x="species", y="bill_depth_mm") plt.title("Mean Bill Depth by Species") elif chart_type == "Boxplot": with st.echo(): sns.boxplot(data=df) plt.title("Bill Depth Observations") elif chart_type == "Line": with st.echo(): sns.lineplot(data=df, x=df.index, y="bill_length_mm") plt.title("Bill Length Over Time") elif chart_type == "3D Scatter": st.write("Seaborn doesn't do 3D ☹️. Here's 2D.") sns.scatterplot(data=df, x="bill_depth_mm", y="bill_length_mm", hue="island") plt.title("Just a 2D Scatterplot") return fig
8081349e83745167443d76c9be30ee8b884e8d67
3,644,737
def decode_fields(source_str: str, resp_type): """ This is the lower level decode of fields, no automatic guess of type is performed.""" field_decoding = FIELD_MAPPING[resp_type] unpacked_fields = {} for field_name, field_type, field_subtype in field_decoding: search_term = f"{field_name}:".encode() field_location = source_str.find(search_term) assert field_location >= 0 # Attempt to extract the value field_value_start = field_location + len(search_term) if field_type is list: # Handle as a list field_value_end = source_str.find(b']', field_value_start) assert field_value_end > field_value_start list_str = source_str[field_value_start + 1:field_value_end].strip() if len(list_str) == 0: field_list = [] else: if field_subtype is int: list_base = 16 if b'x' in list_str else 10 field_list = [int(x,list_base) for x in list_str.split(b',')] elif field_subtype is str: field_list = [x.replace(b"'", b"").replace(b'"',b"").decode() for x in list_str.split(b',')] unpacked_fields[field_name] = field_list else: # Handle as a single value field_value_end = source_str.find(b',', field_value_start) assert field_value_end > field_value_start if field_type is not bool: field_value = field_type(source_str[field_value_start:field_value_end]) else: field_value = source_str[field_value_start:field_value_end] == b'1' unpacked_fields[field_name] = field_value return unpacked_fields
6d1afebcfb377be0ce454f5a1db7b6aad37313c5
3,644,738
def make_egg(a=-1.25, b=7): """ Return x, y points that resemble an egg. Egg equation is: r = cos(2θ) + a * cos(θ) + b @param a: Number. @param b: Number. """ theta = np.linspace(0, 2 * np.pi, 100) r = np.cos(2 * theta) + a * np.cos(theta) + b y = r * np.cos(theta) x = r * np.sin(theta) return np.array([x, y]).T.tolist()
b94ca316ba9e8bcfdcc3622205204e322c2bccb8
3,644,739
def test_styling_object_which_implements_str_proto(): """ Test styling an object which implements the str protocol """ class Dummy(object): def __str__(self): return 'I am a dummy object' colorful = core.Colorful(colormode=terminal.ANSI_8_COLORS) assert str(colorful.black(Dummy())) == '\033[30mI am a dummy object\033[39m'
d14567675db25c66bdc00e28904b721bb3af536d
3,644,740
def determine_auto_approval(financial_aid, tier_program): """ Takes income and country code and returns a boolean if auto-approved. Logs an error if the country of financial_aid does not exist in CountryIncomeThreshold. Args: financial_aid (FinancialAid): the financial aid object to determine auto-approval tier_program (TierProgram): the TierProgram for the user's income level Returns: boolean: True if auto-approved, False if not """ try: country_income_threshold = CountryIncomeThreshold.objects.get(country_code=financial_aid.country_of_income) income_threshold = country_income_threshold.income_threshold except CountryIncomeThreshold.DoesNotExist: log.error( "Country code %s does not exist in CountryIncomeThreshold for financial aid id %s", financial_aid.country_of_income, financial_aid.id ) income_threshold = DEFAULT_INCOME_THRESHOLD if tier_program.discount_amount == 0: # There is no discount so no reason to go through the financial aid workflow return True elif income_threshold == 0: # There is no income which we need to check the financial aid application return True else: return financial_aid.income_usd > income_threshold
280af275564046ed36b8b5442879f8dcc7e515cb
3,644,741
def crustal_model_files(alt = [200, 1000], anomaly = 'Global', lim = [0., 360. -90., 90.], binsize = 0.1): """" Reads the .bin IDL files of the crustal magnetic field model (Langlais) for a range of altitudes and creates a function based on a linear interpolation. Parameters: alt: 2-elements array, optional The array containing the altitude range. Default is [200, 1000] km. anomaly: string, optional The anomaly index, e. g., A1, A2, A6, etc. This string is used to find the directory where the model matrices are located. Default is 'Global'. lim: 4-elements array, optional An array cointaining the limits for latitude and longitude data, in which: [lon_min, lon_max, lat_min, lat_max]. Default is the whole range of Mars. binsize: double, optional The size of the lon and lat bins (must be the same size). Default is 0.1 degrees. Returns: A function and a matrix containing the data. """ longitude = np.linspace(lim[0], lim[1], int((lim[1] - lim[0]) / binsize + 1)) latitude = np.linspace(lim[2], lim[3], int((lim[3] - lim[2]) / binsize + 1)) altitude = np.linspace(alt[0], alt[1], int(alt[1] - alt[0] + 1)) br = np.empty((len(longitude), len(latitude), len(altitude))) for i in range(len(altitude)): h = int(i + alt[0]) data = sp.io.readsav('/home/oliveira/ccati_mexuser/LANGLAIS_Matrices/'+anomaly+'/LANGLAIS_BR_ALT_' + \ str(h) + '_RES_01.bin') br[:, :, i] = data['zbins'].T fn = rgi((longitude, latitude, altitude), br) return fn, br
e5deb36b571c0cc75e738bd0bdce7a2fa6ea8d7a
3,644,742
def f1(y_true, y_pred): """ Function for computing the unweighted f1 score using tensors. The Function handles only the binary case and compute the unweighted f1 score for the positive class only. Args: - y_true: keras tensor, ground truth labels - y_pred: keras tensord, labels estimated by the model Returns: - f1: float, unweighted f1 score for the positive class """ precision_v = precision(y_true, y_pred) recall_v = recall(y_true, y_pred) nominator = 2 * (precision_v * recall_v) denominator = (precision_v + recall_v + K.epsilon()) f1 = nominator / denominator return f1
793fbcbd2ddcec2608139174794e94304f69a631
3,644,743
def get_selector_score(key, selector, use_median, best_based_on_final): """ :param key: Thing to measure (e.g. Average Returns, Loss, etc.) :param selector: Selector instance :param use_median: Use the median? Else use the mean :param best_based_on_final: Only look at the final value? Else use all values. :return: A single number that gives the score of `key` inside `selector` """ data = selector.extract() if best_based_on_final: values = [ exp.progress.get(key, np.array([np.nan]))[-1] for exp in data ] else: values = np.concatenate([ exp.progress.get(key, np.array([np.nan])) for exp in data ] or [[np.nan]]) if len(values) == 0 or not np.isfinite(values).all(): return np.nan if use_median: return np.nanpercentile(values, q=50, axis=0) else: return np.nanmean(values)
4c9d2e08fcc4f4ee3ecbd2cf67e4db829850707a
3,644,744
import pytz def str_to_timezone(tz): """ 从字符串构建时区 """ return pytz.timezone(tz) if tz else pytz.utc
02c004171f50ceb4b60272769036634f6778c791
3,644,745
def _get_previous_index_search_col( m, col, nested_list, trans_function=None, transformation=False ): """Return previous index of a a key, from a sorted nested list where a key is being seached in the col number.Returns -1 if value is not found. Args: m (comparable): comparable being searched col (int): Column number to be searched. nested_list (list): Nested List with the values being searched. Ex [[0,1,2,2][0,1,2,2]] First inner list represents a row of attributes of an instance. trans_function (func): Function to transform the comparison value of the column. transformation (boolean): If true uses a tranformation in the column value before comparison of the values. Returns: int: Index of the value being searched. """ ix = _search_previous_col(m, col, nested_list, trans_function, transformation) assert ix != -1, f"Previous keyword to {m} was not found." return ix
4545395928128ce2858c1d0508e77a47be543dd7
3,644,746
import json def guest_import(hypervisor, host): """ Import a new guest :: POST /:hypervisor/:host/guests """ response.content_type = "application/json" manager = create_manager(hypervisor, host) guest = manager.guest_import( request.environ['wsgi.input'], request.content_length ) location = "/%s/%s/guests/%s" % (hypervisor, host, guest["id"]) response.set_header("Location", location) manager.logout() return json.dumps(guest)
d26ecbac6bce0ab07c365aabb8352ec57719798d
3,644,747
from typing import List def get_doc_count( group_by: List[str] = ["year", "country"], sort_by: List[metadata.SortOn] = [ metadata.SortOn(field="year", order=metadata.SortOrder.desc), metadata.SortOn(field="count", order=metadata.SortOrder.desc)], limit: int = 10): """This endpoint provides a generic interface to get the count of documents given an arbitrary set of `group_by` fields. The return value can be sorted based on the `sort_by` fields input. The number of returned groups is limited by the `limit` parameter. """ assert len(set(so.field for so in sort_by).difference(group_by + ['count'])) == 0 group_id = {b: f"${b}" for b in group_by} sort_by = SON( [(so.field, -1 if so.order == metadata.SortOrder.desc else 1) for so in sort_by]) projection = {b: f"$_id.{b}" for b in group_by} projection["count"] = "$count" projection["_id"] = 0 # Identify fields that needs unwinding, if any list_fields = set(["adm_region", "author", "country", "der_acronyms", "doc_type", "geo_region", "major_doc_type", "topics_src", "wb_subtopic_src"]) unwind_fields = [{"$unwind": f"${b}"} for b in list_fields.intersection(group_by)] pipeline = [] if unwind_fields: pipeline = unwind_fields pipeline.extend([ {"$group": {"_id": group_id, "count": {"$sum": 1}}}, {"$project": projection}, {"$sort": sort_by}, {"$limit": limit}, ]) agg = mongodb.get_docs_metadata_collection().aggregate( pipeline ) values = [{"rank": ix, **result} for ix, result in enumerate(agg, 1)] return values
c815d6d9a2746c61d3affbca07e8334c75862030
3,644,748
import re def get_author_list(text): """function to extract authors from some text that will also include associations example input: `J. C. Jan†, F. Y. Lin, Y. L. Chu, C. Y. Kuo, C. C. Chang, J. C. Huang and C. S. Hwang, National Synchrotron Radiation Research Center, Hsinchu, Taiwan, R.O.C` or `M.B. Behtouei, M. Migliorati, L. Palumbo, B. Spataro, L. Faillace` assumptions: - if you split by ', ' and the second character of a token is a '.' period then its probably a valid token (an author) but this is not guaranteed (see above example that ends in 'R.O.C') - There can be multiple initials as evidenced above. - Initials may not necessarily be split by a space. watch out for: - hypenated names: 'B. Walasek-Hoehne' - hyphenated initials: 'E. J-M. Voutier' 'J.-L. Vay' - multiple surnames: 'M.J. de Loos' 'S.B. van der Geer' 'A. Martinez de la Ossa' 'N. Blaskovic Kraljevic' 'G. Guillermo Cant�n' 'C. Boscolo Meneguolo' - surname with apostrophes: 'G. D'Alessandro' - extra stuff tacked on: 'S.X. Zheng [on leave]' 'G.R. Li [on leave]' (from the csv file) - one rare instance of non-period separated initials: 'Ph. Richerot (from csv file) my pattern of a name which should match vast majority of names while not matching vast majority of non-names: single letter, followed by a period, potentially followed by a space but not always, repeated n times, and ending in a word of more than one character which may contain hyphens, apostrophes, repeated n times, and finally finishing with a comma word character followed by dot and potentially space, repeated n times then word character repeated n times /(\\w\\.\\ ?)+(\\w+\\ ?)+/g (note this comment had to double up the escape backslashes) (https://regexr.com/) """ newline_fixed_text = text for newline_char in LINE_TERMINATOR_CHARS: newline_fixed_text = newline_fixed_text.replace(newline_char, ', ') potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ') filtered_authors = list() my_name_pattern = re.compile("(-?\\w\\.\\ ?)+([\\w]{2,}\\ ?)+") # the allowance of an optional hyphen preceding an initial is to satisfy a # common pattern observed with the papers coming out of asia. for author in potential_authors: if my_name_pattern.match(author): # match has an implied ^ at the start # which is ok for our purposes. filtered_authors.append(author) return filtered_authors
94b7f74ed24be8bb8bbfacca37dfc9f65f1fc99b
3,644,749
def find_matching_format_function(word_with_formatting, format_functions): """ Finds the formatter function from a list of formatter functions which transforms a word into itself. Returns an identity function if none exists """ for formatter in format_functions: formatted_word = formatter(word_with_formatting) if word_with_formatting == formatted_word: return formatter return lambda word: word
3d2ce0956de4c8ca0de6d0d21f8bbd718247caff
3,644,750
from typing import List def mean (inlist:List(float))->float: """ Returns the arithematic mean of the values in the passed list. Assumes a '1D' list, but will function on the 1st dim of an array(!). Usage: lmean(inlist) """ sum = 0 for item in inlist: sum = sum + item return sum/float(len(inlist))
713bbbec706671043a5b76f142d4f19cfa247c6a
3,644,751
def create_file_download_url(file_path: str) -> str: """ Creates Telegram URL for downloading of file. - contains secret information (bot token)! :param file_path: `file_path` property of `File` object. """ token = environ["TELEGRAM_API_BOT_TOKEN"] return create_url( "https://api.telegram.org/file", f"bot{token}", file_path )
6395d17520778d6bf4507ba69559b6ef1ba32ba9
3,644,752
import csv from datetime import datetime def convert_to_csv(items): """ Args: items: all arns in a region from the DynamoDB query as a list returns: csv_body: body of the csv file to write out """ fieldnames = ["Package", "Package Version", "Status", "Expiry Date", "Arn"] # sort by package, and then created date (oldest to newest) sorted_items = sorted(items, key=lambda i: (i["pckg"].lower(), i["crtdDt"])) with open("/tmp/packages.csv", "w", newline="") as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for item in sorted_items: # convert datetime to human readable try: if item["exDt"]: item["exDt"] = datetime.utcfromtimestamp(item["exDt"]).isoformat() except KeyError: item["exDt"] = "" csv_item = { "Package": item["pckg"], "Package Version": item["pckgVrsn"], "Arn": item["arn"], "Status": item["dplySts"], "Expiry Date": item["exDt"], } writer.writerow(csv_item) with open("/tmp/packages.csv", "r") as csvfile: csv_text = csvfile.read() return csv_text
6e651065f06595e9b964bee1b8dab2965e0076f6
3,644,753
def manhattan(train_X, val_X): """ :param train_X: one record from the training set (type series or dataframe including target (survived)) :param val_X: one record from the validation set series or dataframe include target (survived) :return: the Manhattan distance between train_X and val_X """ diff = train_X - val_X # Remove survived column diff = diff.iloc[:, :-1] dist = np.sqrt((np.abs(diff)).sum(axis=1)) return dist
1989466af70d38a17c2b52dd667733da46bbed0c
3,644,754
def author_idea_list(request, registrant_id): """ Returns author ideas """ registrant = get_object_or_404(Registrant, pk=registrant_id) ideas = Idea.objects.filter(author=registrant) serializer = IdeaSerializer(ideas, many=True) return Response(serializer.data, status=status.HTTP_200_OK)
64ee16535243bfe5414326bed86f5b9efdb97941
3,644,755
import re def match(text: str, pattern: str) -> bool: """ Match a text against a given regular expression. :param text: string to examine. :param pattern: regular expression. :returns: ``True`` if pattern matches the string. """ return re.match(pattern, text) is not None
a59d71283766c5079e8151e8be49501246218001
3,644,756
def _compute_hash_check(input_strings: tf.Tensor, field_size: int, seed: int, dtype: tf.dtypes.DType) -> tf.Tensor: """Returns the hash_check for input_strings modulo field_size.""" hash_check_salt = _get_hash_check_salt(seed) salted_input = tf.strings.join([hash_check_salt, input_strings]) hash_check = tf.strings.to_hash_bucket_fast( salted_input, num_buckets=field_size) hash_check = tf.reshape(hash_check, shape=[tf.size(hash_check), 1]) hash_check = tf.cast(hash_check, dtype=dtype) return hash_check
bff5d9b24f17fd32ea3a5bfbd60a8446f10471aa
3,644,757
import numpy def calc_extinction(radius:float, mosaicity:float, model:str, a:float, b:float, c:float, alpha:float, beta:float, gamma:float, h:float, k:float, l:float, f_sq:float, wavelength:float, flag_derivative_f_sq=False): """ Isotropical extinction coorection y: $$ |F|^2_{\text{corrected}} = y \cdot |F|^2 $$ radius primary extinction ??? mosaisity secondary extinction model= "gauss" or "lorentz" a,b,c,alpha,beta,gamma are unit cell parameters (in angstrem and radians) h, k, l are Miller indices f_sq is square of structure factor (in 10-12cm) wavelength is neutron wavelength in angstrems flag_derivative_radius flag_derivative_mosaicity flag_derivative_a flag_derivative_b flag_derivative_c flag_derivative_alpha flag_derivative_beta flag_derivative_gamma flag_derivative_f_sq flag_derivative_wavelength """ r = float(radius) g = float(mosaicity) g_sq = numpy.square(g) kk = 1. c_a, c_b, c_g = numpy.cos(alpha), numpy.cos(beta), numpy.cos(gamma) volume_unit_cell = calc_volume_uc_by_abc_cosines(a, b, c, c_a, c_b, c_g) sthovl = calc_sthovl_by_hkl_abc_cosines(h, k, l, a, b, c, c_a, c_b, c_g) yext, dder = calc_extinction_2(radius, mosaicity, model, f_sq, volume_unit_cell, sthovl, wavelength) return yext, dder
b0922fde0246ee250033d9ff9eb3fde59c17c343
3,644,759
def smape(y_true: Yannotation, y_pred: Yannotation): """ Calculate the symmetric mean absolute percentage error between `y_true`and `y_pred`. Parameters ---------- y_true : array, `dataframe`, list or `tensor` Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred : array, `dataframe`, list or `tensor` The predicted values. shape = `[batch_size, d0, .. dN]`. Returns ------- error : `tensor` Symetric mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`. Examples -------- >>> from autopycoin.losses import smape >>> import tensorflow as tf >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> smape(y_true, y_pred).numpy() array([99.999985, 99.999985], dtype=float32) """ if not isinstance(y_pred, tf.RaggedTensor): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, dtype=y_pred.dtype) error = tf.abs(y_true - y_pred) / ( tf.maximum(tf.abs(y_true), epsilon()) + tf.abs(y_pred) ) return 200.0 * tf.reduce_mean(error, axis=-1)
0046481ea6b2ddc3295f9d597d6cc3488b498415
3,644,760
def acosh(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None): """ The ACosH operation The arguments for this function are as follows: :param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string :param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf" :param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf" :param astype: output pixel type :return: the output raster """ return local(rasters, 59, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype)
593b13639f40c347a27d4fc772d7b2ec2d062a86
3,644,761
from typing import Optional from typing import Dict from typing import Tuple from typing import Any def load_does( filepath: PathType, defaults: Optional[Dict[str, bool]] = None ) -> Tuple[Any, Any]: """Load_does from file.""" does = {} defaults = defaults or {"do_permutation": True, "settings": {}} data = OmegaConf.load(filepath) data = OmegaConf.to_container(data) mask = data.pop("mask") for doe_name, doe in data.items(): for k in defaults: if k not in doe: doe[k] = defaults[k] does[doe_name] = doe return does, mask
22cebd75da899bebb092c1c470eabe87e17c41f5
3,644,762
def causal_segment_mask(segment_ids: JTensor, dtype: jnp.dtype = jnp.float32) -> JTensor: """Computes the masks which combines causal masking and segment masks. Args: segment_ids: a JTensor of shape [B, T], the segment that each token belongs to. dtype: data type of the input. Returns: A JTensor of shape [B, 1, T, T]. """ # [B, 1, T, T] segment_mask_t = segment_mask(segment_ids, dtype=dtype) # [1, 1, T, T] b, t = segment_ids.shape causal_mask_t = causal_mask(jnp.zeros([b, t, 1], dtype=dtype)) return jnp.minimum(segment_mask_t, causal_mask_t)
7e16f2a943e19b232fb3f1e55f7b348aa7f56a72
3,644,763
def remove_outliers(peaks: np.ndarray, **kwargs): """ https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.LocalOutlierFactor.html#sklearn.neighbors.LocalOutlierFactor https://scikit-learn.org/stable/modules/outlier_detection.html Parameters ---------- peaks kwargs Returns ------- """ clf = LocalOutlierFactor(**kwargs) is_inlier = clf.fit_predict(peaks) # 1 inliers, -1 is outliers mask = is_inlier == 1 return peaks[mask], peaks[np.invert(mask)]
969fe4523e8529edd49a5c0cd81c51949bbe3de5
3,644,764
def powerset(iterable): """ Calcualtes the powerset, copied from https://docs.python.org/3/library/itertools.html#itertools-recipes """ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
3b645848c0810c69685c06b94fee42e5747bb6e8
3,644,765
def get_rotational_vector(skew_symmetric): """Get the rotational vector from a skew symmetric matrix. Parameters ---------- skew_symmetric: numpy.ndarray the skew symmetric matrix. Returns ------- rotational_vector: the rotational vector. """ # make sure that the input is skew symmetric if np.linalg.norm(skew_symmetric + skew_symmetric.T) > 1e-12: raise ValueError("The input is not skew symmetric!") rotational_vector = np.zeros((3, 1), dtype=float) rotational_vector[0] = skew_symmetric[2, 1] rotational_vector[1] = skew_symmetric[0, 2] rotational_vector[2] = skew_symmetric[1, 0] return rotational_vector
e63b771f6db93f63d7307a85689d87162208c6ff
3,644,766
def diaperchange_lifetimes(changes): """ Create a graph showing how long diapers last (time between changes). :param changes: a QuerySet of Diaper Change instances. :returns: a tuple of the the graph's html and javascript. """ changes = changes.order_by("time") durations = [] last_change = changes.first() for change in changes[1:]: duration = change.time - last_change.time if duration.seconds > 0: durations.append(duration) last_change = change trace = go.Box( y=[round(d.seconds / 3600, 2) for d in durations], name=_("Changes"), jitter=0.3, pointpos=-1.8, boxpoints="all", ) layout_args = utils.default_graph_layout_options() layout_args["height"] = 800 layout_args["title"] = _("<b>Diaper Lifetimes</b>") layout_args["yaxis"]["title"] = _("Time between changes (hours)") layout_args["yaxis"]["zeroline"] = False layout_args["yaxis"]["dtick"] = 1 fig = go.Figure({"data": [trace], "layout": go.Layout(**layout_args)}) output = plotly.plot(fig, output_type="div", include_plotlyjs=False) return utils.split_graph_output(output)
4937cff711b8e37e4162a4c7de8cc258c25d2979
3,644,767
def batch_norm_conv(x, n_out, phase_train, scope='bn'): """ Batch normalization on convolutional maps. Args: x: Tensor, 4D BHWD input maps n_out: integer, depth of input maps phase_train: boolean tf.Varialbe, true indicates training phase scope: string, variable scope Return: normed: batch-normalized maps """ with tf.variable_scope(scope): beta = tf.Variable(tf.constant(0.0, shape=[n_out]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[n_out]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.5) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed
2a08db220f08270a8f2870671ee93278f0c7ddd2
3,644,768
def strip_variants(address): """Return a copy of the given address with the variants (if any) stripped from the name. :rtype: :class:`pants.build_graph.address.Address` """ address, _ = parse_variants(address) return address
a2cc1c68b6032304720b9cae05516535cb9ede22
3,644,769
def deleteIdentifiedStock(bot, update): """Deletes the user's selected stock. If the user's selected stock is valid, proceed to delete it. Returns: Return MENU state with normal keyboard. """ if update.message.chat.username is None: # User has no username update.message.reply_text( "It seems you do not have a Telegram Username.\nI'll need your username in order to function :( /start me up when you have one! (You can set your username in Settings.)") else: # User has username text = update.message.text message = bots.deleteUserStock(update.message.chat.username, text) update.message.reply_text(message, parse_mode='HTML') update.message.reply_text( "What would you like to do next?", reply_markup=markup_one) return MENU
7a47579f7e0b9b9388ef0f0f4650cb045cf53570
3,644,770
def z_inc_down(grid): """Return True if z increases downwards in the coordinate reference system used by the grid geometry :meta common: """ if grid.crs is None: assert grid.crs_uuid is not None grid.crs = rqc.Crs(grid.model, uuid = grid.crs_uuid) return grid.crs.z_inc_down
26b05defc1b75ec5a4f3aa9f61c3ba0cb5921bdc
3,644,771
def load_coord_var(prob_data_type): """ Loads a coordinate variable from the source data and returns it. :param prob_data_type: :return: """ fpath = "{}/source_others/a1b_tas_jja_EAW_1961-1990.dat".format(BASEDIR) with open(fpath, 'rb') as reader: data = cPickle.load(reader) key = prob_data_map[prob_data_type] if key == 'prob': return np.array((data[key] * 100), np.float) else: return np.array(data[key], np.int32)
46969ac762393c8b7c60d08b543a2fc2f0069b74
3,644,772
import platform def get_os(): """Get the current operating system. :returns: The OS platform (str). """ return platform.system()
307c6c94573733d900b2e31cfc8bcf3db8b6e5b7
3,644,773
def count_hits(space, positions, pi_plus_4_vecs_lab, pi_null_4_vecs_lab, r): """returns a list of hit counts for z values in space""" return [count_double_hits(positions, pi_plus_4_vecs_lab, pi_null_4_vecs_lab, r=r, z_detector=z) for z in space]
66ba0f61d8491ef6687c0fb20761375c6470cae2
3,644,775
import time import math def time_since(since, m_padding=2, s_padding=2): """Elapsed time since last record point.""" now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 return '{}m:{}s'.format(str(int(m)).zfill(m_padding), str(int(s)).zfill(s_padding))
62641b723bf286f54280bb5c6fb1d54c9753907c
3,644,776
def record_check(record): """ record dict check --- a dictionary is required as the input --- """ assert isinstance( record, dict), 'record should be dict, while the input is {}'.format(type(record)) cnn_json_struct = JsonFormatSetting.CNN_JSON_STRUCTURE record_struct = cnn_json_struct["record"][0] return check_dict(record, record_struct)
965cced685b45de8083dc1c8e161e9aa100b4cf0
3,644,778
import struct def encrypt_chunk(chunk, password=None): """Encrypts the given chunk of data and returns the encrypted chunk. If password is None then saq.ENCRYPTION_PASSWORD is used instead. password must be a byte string 32 bytes in length.""" if password is None: password = saq.ENCRYPTION_PASSWORD assert isinstance(password, bytes) assert len(password) == 32 iv = Crypto.Random.OSRNG.posix.new().read(AES.block_size) encryptor = AES.new(password, AES.MODE_CBC, iv) original_size = len(chunk) if len(chunk) % 16 != 0: chunk += b' ' * (16 - len(chunk) % 16) result = struct.pack('<Q', original_size) + iv + encryptor.encrypt(chunk) return result
328484205dff850f3857f0a7d19e922ffa230c61
3,644,779
def convert_to_pj_lop_plus(lops): """ Converts the list of PlayerStates to an LOP+ :param lops: The PlayerStates to be converted :type lops: [PlayerState, ...] :return: The LOP+ :rtype: PyJSON """ return [convert_to_pj_player_plus(ps) for ps in lops]
968e0a38b5df2a1ce4bf6106632c31213d278a29
3,644,780
from typing import Tuple import math def euler_to_quaternion(roll: float = 0, pitch: float = 0, yaw: float = 0) -> Tuple[float, float, float, float]: """ Convert Euler to Quaternion Args: roll (float): roll angle in radian (x-axis) pitch (float): pitch angle in radian (y-axis) yaw (float): yaw angle in radian (z-axis) Returns: Tuple[float, float, float, float]: x, y, z, w """ # Abbreviations for the various angular functions cy = math.cos(yaw * 0.5) sy = math.sin(yaw * 0.5) cp = math.cos(pitch * 0.5) sp = math.sin(pitch * 0.5) cr = math.cos(roll * 0.5) sr = math.sin(roll * 0.5) # Quaternion w = cr * cp * cy + sr * sp * sy x = sr * cp * cy - cr * sp * sy y = cr * sp * cy + sr * cp * sy z = cr * cp * sy - sr * sp * cy return x, y, z, w
e8346172f07510c377e14827842eb18f1631402e
3,644,781
def create_generic_constant(type_spec, scalar_value): """Creates constant for a combination of federated, tuple and tensor types. Args: type_spec: Instance of `computation_types.Type` containing only federated, tuple or tensor types for which we wish to construct a generic constant. May also be something convertible to a `computation_types.Type` via `computation_types.to_type`. scalar_value: The scalar value we wish this constant to have. Returns: Instance of `computation_building_blocks.ComputationBuildingBlock` representing `scalar_value` packed into `type_spec`. Raises: TypeError: If types don't match their specification in the args section. Notice validation of consistency of `type_spec` with `scalar_value` is not the rsponsibility of this function. """ type_spec = computation_types.to_type(type_spec) py_typecheck.check_type(type_spec, computation_types.Type) inferred_scalar_value_type = type_utils.infer_type(scalar_value) if (not isinstance(inferred_scalar_value_type, computation_types.TensorType) or inferred_scalar_value_type.shape != tf.TensorShape(())): raise TypeError('Must pass a scalar value to ' '`create_tensorflow_constant`; encountered a value ' '{}'.format(scalar_value)) if not type_utils.type_tree_contains_only( type_spec, (computation_types.FederatedType, computation_types.NamedTupleType, computation_types.TensorType)): raise TypeError if type_utils.type_tree_contains_only( type_spec, (computation_types.NamedTupleType, computation_types.TensorType)): return computation_constructing_utils.create_tensorflow_constant( type_spec, scalar_value) elif isinstance(type_spec, computation_types.FederatedType): unplaced_zero = computation_constructing_utils.create_tensorflow_constant( type_spec.member, scalar_value) if type_spec.placement == placement_literals.CLIENTS: placement_fn_type = computation_types.FunctionType( type_spec.member, computation_types.FederatedType( type_spec.member, type_spec.placement, all_equal=True)) placement_function = computation_building_blocks.Intrinsic( intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type) elif type_spec.placement == placement_literals.SERVER: placement_fn_type = computation_types.FunctionType( type_spec.member, computation_types.FederatedType( type_spec.member, type_spec.placement, all_equal=True)) placement_function = computation_building_blocks.Intrinsic( intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type) return computation_building_blocks.Call(placement_function, unplaced_zero) elif isinstance(type_spec, computation_types.NamedTupleType): elements = [] for k in range(len(type_spec)): elements.append(create_generic_constant(type_spec[k], scalar_value)) names = [name for name, _ in anonymous_tuple.to_elements(type_spec)] packed_elements = computation_building_blocks.Tuple(elements) named_tuple = computation_constructing_utils.create_named_tuple( packed_elements, names) return named_tuple else: raise ValueError( 'The type_spec {} has slipped through all our ' 'generic constant cases, and failed to raise.'.format(type_spec))
9e1db57d93407eef385c1ac88ba83a4e578f891a
3,644,783
from django_openid.models import UserOpenidAssociation def has_openid(request): """ Given a HttpRequest determine whether the OpenID on it is associated thus allowing caller to know whether OpenID is good to depend on. """ for association in UserOpenidAssociation.objects.filter(user=request.user): if association.openid == unicode(request.openid): return True return False
ad193ae3c299867ed4c29ee059c45fe24a07523c
3,644,784
def get_wordcloud(): """ Generates the wordcloud and sends it to the front end as a png file. :return: generated tag_cloud.png file """ update_tagcloud(path_to_save='storage/tmp', solr_service=solr) return send_from_directory("storage/tmp", "tag_cloud.png", as_attachment=True)
c40cbf95676ac1b3da9c589934bb67a589a80810
3,644,785
def rtc_runner(rtc): """ :type rtc: pbcommand.models.ResolvedToolContract :return: """ return gather_run_main(chunk_json=rtc.task.input_files[0], chunk_key=Constants.CHUNK_KEY, gathered_fn=rtc.task.output_files[0], ln_name = Constants.DEFAULT_OUT_NAME, gather_func=cat_txt_with_header)
10aa9c707284a04a0d002b95169d0a28e91213eb
3,644,786
from typing import Tuple def get_matching_axis(shape: Tuple, length: int) -> int: """ Infers the correct axis to use :param shape: the shape of the input :param length: the desired length of the axis :return: the correct axis. If multiple axes match, then it returns the last one. """ # noinspection PyUnresolvedReferences axis_candidates = np.nonzero(np.array(shape) == length)[0] if len(axis_candidates) == 0: raise ValueError('Unable to infer axis tue to shape mismatch: ' '{} =/= {}.'.format(shape, length)) return axis_candidates[-1]
981e2bb2487cd113ffc5dd19c2a62d581cf38304
3,644,787
def is_paths(maybe_paths, marker='*'): """ Does given object `maybe_paths` consist of path or path pattern strings? """ return ((is_path(maybe_paths) and marker in maybe_paths) or # Path str (is_path_obj(maybe_paths) and marker in maybe_paths.as_posix()) or (is_iterable(maybe_paths) and all(is_path(p) or is_ioinfo(p) for p in maybe_paths)))
dc825d7417cb7cb52beaecc4fb6eef333db1514b
3,644,788
import logging def output_numpy_or_asa(obj, data, *, output_type=None, labels=None): """This function returns a numpy ndarray or nelpy.AnalogSignalArray Parameters ---------- obj : numpy.ndarray or a nelpy object data : numpy.ndarray, with shape (n_samples, n_signals) Data is either passed through as the np.ndarray or used to form a nelpy object, depending on 'output_type'. output_type : string, optional Specifies the object that should be returned. Default is a numpy np.ndarray labels : np.adarray of string, optional Labels that will be attached to the nelpy object, if that is the desired output type. If the output type is 'numpy', the labels are ignored. Returns ------- Output object of the specified type. If a numpy array, it will have shape (n_samples, n_signals) """ if data.size == 0: logging.warning("Output data is empty") if not isinstance(data, np.ndarray): raise TypeError("data must be a numpy ndarray") if output_type is not None: if output_type != 'asa': raise TypeError(("Invalid output type {} specified". format(output_type))) if output_type == 'asa': try: res = isinstance(obj, nel.RegularlySampledAnalogSignalArray) if res is False: raise TypeError("You specified output type {} but the input" " object was not a nelpy object. Cannot form an" " ASA around the input object".format(output_type)) # Transpose data since ASAs have shape (n_signals, n_samples) out = nel.AnalogSignalArray(data.T, abscissa_vals=obj.abscissa_vals, fs=obj.fs, support=obj.support, labels=labels) return out except NameError: raise ModuleNotFoundError("You must have nelpy installed for" " output type {}".format(output_type)) return data
2e19de7caa58d4be606fa3c2fef623c32a08a201
3,644,789
def embed_oar(features: Array, action: Array, reward: Array, num_actions: int) -> Array: """Embed each of the (observation, action, reward) inputs & concatenate.""" chex.assert_rank([features, action, reward], [2, 1, 1]) action = jax.nn.one_hot(action, num_classes=num_actions) # [B, A] reward = jnp.tanh(reward) while reward.ndim < action.ndim: reward = jnp.expand_dims(reward, axis=-1) embedding = jnp.concatenate([features, action, reward], axis=-1) # [B, D+A+1] return embedding
624b1ca67fa031e548411b4c9cfd9f86765cbd7e
3,644,790
import requests import json def aggregations_terms(query=None): """Get page for aggregations.""" if query is None: # Default query query = "state,config.instance_type" # Remove all white spaces from the str query = query.replace(" ", "") data = {"query": query} end_point = "aggregations/terms" url = SOUNDWAVE_API + end_point response = requests.post(url, json=data) if response.status_code == 200 or response.status_code == 304: json_data = json_loads_byteified(response.text) return render_template( "aggregations.html", data=json.dumps(json_data), query=query) elif response.status_code == 404 or response.status_code == 400: logger.warn("Data not found in soundwave elastic search store. API returned 404") return render_template("404.html") elif response.status_code == 500: logger.warn("soundwave api returned 500 status code. Internal Server error") return render_template("500.html")
c69308a007ec4b366129de3c3aa277c96fda2edd
3,644,792
import tqdm import torch def validate_base(model, args, loader, loadername, train=True): """ The validation function. Validates the ELBO + MIL, ELBO, and the accuracy of the given [training, validation or test] loader. Returns ------- loss: list Either the ELBO (from base VAE) or the accuracy rate (from the base MIL). """ # Model: validate model.eval() # Declare loss tracker loss_val = 0. # Initialize the number of points N = 0 # Loop through the data for data, label in tqdm(loader, desc=f' Validation[{loadername}]'): # Convert the data to cuda if available data = data.to(device=args.device).squeeze(0) # Update the N N += data.shape[0] # If args.mode is 'base_att' if args.model == 'base_att': # Convert the label to cuda if available label = label[0].to(device=args.device) # Calculate the objective for the Attention MIL # (name kept the same not to duplicate the code blocks) elbo_u_sum = model.calculate_classification_error(data, label)[0] # Otherwise else: # Calculate ELBO for unlabeled data elbo_u_sum = model(data) # Track elbo results together [sum] loss_val += elbo_u_sum.item() if args.test_mode: break # If the mode is base_att if args.model == 'base_att': # Divide the accuracy by the length of the loader loss_val = loss_val / len(loader) # Trace print(f' [Valid {loadername}]\t accuracy: {loss_val: .2f}') # If the loader is not the training loader if not train: # If the validation accuracy is higher than the previous one if loss_val >= args.prev_val: # Save the model torch.save(model.state_dict(), f'{args.MODELPATH}/{args.mode}_E{args.epoch}.pt') # Update the accuracy value args.prev_val = loss_val # If the mode is base elif args.model == 'base': # Divide the loss by the number of points loss_val = loss_val / N # Trace print(f' [Valid {loadername}]\t elbo: {loss_val: .2f}') # If the loader is not the training loader if not train: # If the validation loss is lower than the previous one if loss_val <= args.prev_val: # Save the model torch.save(model.state_dict(), f'{args.MODELPATH}/{args.mode}_E{args.epoch}.pt') # Update the accuracy value args.prev_val = loss_val # Return validation records return loss_val
85c9558bd484190eb61599509b9c9ec9f4a5cc0a
3,644,793
def parse_person(person): """ https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional A "person" is an object with an optional "name" or "email" field. A person can be in the form: "author": "Isaac Z. Schlueter <i@izs.me>" For example: >>> p = parse_person('Barney Rubble <b@rubble.com>') >>> assert p == ('Barney Rubble', 'b@rubble.com') >>> p = parse_person('Barney Rubble') >>> assert p == ('Barney Rubble', None) >>> p = parse_person('<b@rubble.com>') >>> assert p == (None, 'b@rubble.com') """ parsed = person_parser(person) if not parsed: name = None parsed = person_parser_no_name(person) else: name = parsed.group('name') email = parsed.group('email') if name: name = name.strip() if email: email = email.strip('<> ') return name, email
3fe30bf85f3ba4877b2924c5b5778d5a5205b6ee
3,644,794
def _glasstone_surface_cf(y): """Correction factor provided by TEoNW for contact surface bursts (p. 335).""" return np.interp(y, [1.0, 50.0, 100.0, 300.0, 700.0, 2000.0, 5000.0, 5000.0], [0.6666666666666666, 0.6666666666666666, 1.0, 1.25, 1.5, 2.0, 3.0, 3.0])
8ac3b0273e8c8fe218d15a2b89aad994a7413d68
3,644,795
import torch def create_eval_fn(task_id, calculate_gradient=False): """Creates an evaluation function for a given task. Returns an evaluation function that takes in a model, dataloader, and device, and evaluates the model on the data from the dataloader. Returns a dictionary with mean "loss" and "accuracy". If calculate_gradient is True, dictionary will also contain gradients for the model wrt the loss on the data. Args: task_id: Task id corresponding to the data that will be evaluated. calculate_gradient: Whether gradient should be calculated. """ def eval_fn(model, dataloader, device): model.eval() total_loss = 0 loss_fn = torch.nn.CrossEntropyLoss(reduction="sum").to(device=device) num_correct = 0 model.zero_grad() torch.set_grad_enabled(calculate_gradient) for X, y in iter(dataloader): X = X.to(device=device) y = y.to(device=device) output = model(X, task_id) preds = torch.argmax(output, dim=1) num_correct += (preds == y).sum().item() loss = loss_fn(output, y) / len(dataloader.dataset) if calculate_gradient: loss.backward() total_loss += loss.item() accuracy = num_correct / len(dataloader.dataset) metrics = {"loss": total_loss, "accuracy": accuracy} if calculate_gradient: gradients = flatten_gradients(model) metrics["gradients"] = gradients return metrics return eval_fn
ac0a7107f695170f2fa6c65dfeae63056b53452d
3,644,796
def naming_style(f): """Decorator for name utility functions. Wraps a name utility function in a function that takes one or more names, splits them into a list of words, and passes the list to the utility function. """ def inner(name_or_names): names = name_or_names if isinstance(name_or_names, list) else [name_or_names] words = [] for name in names: words.extend(split_name(name)) return f(words) return inner
bbcb1b0b06bbb7a24abe60131a9a5ba525ed01db
3,644,798
def is_idaq(*args): """ is_idaq() -> bool Returns True or False depending if IDAPython is hosted by IDAQ """ return _ida_kernwin.is_idaq(*args)
5d18067b31be9c165a847815eb0bab92f89b0381
3,644,801
import requests import yaml def get_stats_yaml(): """grab national stats yaml from scorecard repo""" nat_dict = {} try: nat_yaml = requests.get(COLLEGE_CHOICE_NATIONAL_DATA_URL) if nat_yaml.ok and nat_yaml.text: nat_dict = yaml.safe_load(nat_yaml.text) except AttributeError: # If response.text has no value return nat_dict except requests.exceptions.ConnectionError: # If requests can't connect return nat_dict else: return nat_dict
045eeba3bfc42fa9e1821728260fd4d33e216731
3,644,802
def get_original(N: int = 64) -> np.ndarray: """radontea logo base image""" x = np.linspace(-N / 2, N / 2, N, endpoint=False) X = x.reshape(1, -1) Y = x.reshape(-1, 1) z = logo(X, Y, N) return np.array((z) * 255, dtype=np.uint16)
2bab08961d444f6ecfa097258872d02ae185944b
3,644,805
from typing import List def get_sql_update_by_ids(table: str, columns: List[str], ids_length: int): """ 获取添加数据的字符串 :param table: :param columns: :param ids_length: :return: """ # 校验数据 if not table: raise ParamError(f"table 参数错误:table={table}") if not columns or not isinstance(columns, List): raise ParamError(f"columns 参数错误:columns={columns}") if not ids_length or not isinstance(ids_length, int): raise ParamError(f"ids_length 参数错误:ids_length={ids_length}") # 准备参数 kvs = [f"{columns[i]}=%s" for i in range(len(columns))] kvs_str = ", ".join(kvs) ids = ["%s" for _ in range(ids_length)] ids_str = ", ".join(ids) # 准备sql s = f"update {table} set {kvs_str} where id in ({ids_str});" return s
ac70aa43aea4fad06ac2fd521239687040143b28
3,644,806
import random import sympy def add_X_to_both_sides(latex_dict: dict) -> str: """ https://docs.sympy.org/latest/gotchas.html#double-equals-signs https://stackoverflow.com/questions/37112738/sympy-comparing-expressions Given a = b add c to both sides get a + c = b + c >>> latex_dict = {} >>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}] >>> latex_dict['feed'] = [parse_latex('c')] >>> latex_dict['output'] = [{'LHS': parse_latex('a + c'), 'RHS': parse_latex('b + c')}] >>> add_X_to_both_sides(latex_dict) 'step is valid' """ trace_id = str(random.randint(1000000, 9999999)) logger.info("[trace start " + trace_id + "]") d1 = sympy.simplify( sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["feed"][0]) - latex_dict["output"][0]["LHS"] ) d2 = sympy.simplify( sympy.Add(latex_dict["input"][0]["RHS"], latex_dict["feed"][0]) - latex_dict["output"][0]["RHS"] ) if (d1 == 0) and (d2 == 0): logger.info("[trace end " + trace_id + "]") return "valid" else: logger.info("[trace end " + trace_id + "]") return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
2ab0af9acbb09dcace00575a58afb66cebf2a07c
3,644,808
def init_var_dict(init_args, var_list): """Init var with different methods. """ var_map = {} _, max_val = init_args for i, _ in enumerate(var_list): key, shape, method = var_list[i] if key not in var_map.keys(): if method in ['random', 'uniform']: var_map[key] = Parameter(initializer(Uniform(max_val), shape, ms_type), name=key) elif method == "one": var_map[key] = Parameter(initializer("ones", shape, ms_type), name=key) elif method == "zero": var_map[key] = Parameter(initializer("zeros", shape, ms_type), name=key) elif method == 'normal': var_map[key] = Parameter(Tensor(np.random.normal(loc=0.0, scale=0.01, size=shape). astype(dtype=np_type)), name=key) return var_map
05a3bece9598426010466c27ce794eb7d2aea937
3,644,809
import warnings def _eval_bernstein_1d(x, fvals, method="binom"): """Evaluate 1-dimensional bernstein polynomial given grid of values. experimental, comparing methods Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. method: "binom", "beta" or "bpoly" Method to construct Bernstein polynomial basis, used for comparison of parameterizations. - "binom" uses pmf of Binomial distribution - "beta" uses pdf of Beta distribution - "bpoly" uses one interval in scipy.interpolate.BPoly Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis. """ k_terms = fvals.shape[-1] xx = np.asarray(x) k = np.arange(k_terms).astype(float) n = k_terms - 1. if method.lower() == "binom": # Divide by 0 RuntimeWarning here with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) poly_base = stats.binom.pmf(k, n, xx[..., None]) bp_values = (fvals * poly_base).sum(-1) elif method.lower() == "bpoly": bpb = interpolate.BPoly(fvals[:, None], [0., 1]) bp_values = bpb(x) elif method.lower() == "beta": # Divide by 0 RuntimeWarning here with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1) bp_values = (fvals * poly_base).sum(-1) else: raise ValueError("method not recogized") return bp_values
5561d4099bd07b0fc75dcbf47c53f5ff589e2d9d
3,644,812
def exp_bar(self, user, size=20): """\ Returns a string visualizing the current exp of the user as a bar. """ bar_length = user.exp * size // exp_next_lvl(user.lvl) space_length = size - bar_length bar = '#' * bar_length + '.' * space_length return '[' + bar + ']'
575d475d602d0fdd4ded9eb2a139484c5d78887e
3,644,813
def linear(input_, output_size, scope=None, stddev=0.02, with_w=False): """Define lienar activation function used for fc layer. Args: input_: An input tensor for activation function. output_dim: A output tensor size after passing through linearity. scope: variable scope, if None, used independently. stddev : user defined standard deviation for initialization. with_w: if the weight is also needed as output. Returns: logits of weights and biases. """ shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(0.0)) if with_w: return tf.matmul(input_, matrix) + bias, matrix, bias else: return tf.matmul(input_, matrix) + bias
8a5a4b06598d9c3c799c4a82d07a9d3d11962f23
3,644,814
from pathlib import Path import json import hashlib import math def generate_patches(patch_cache_location, axis, image_input_channels, brain_mask_channel, classification_mask, patch_size, k_fold_count, patients=None, excluded_patients=None): """Generate new patch sets for testing and training for given input channels""" if excluded_patients is not None: excluded_patients = np.array(excluded_patients) patient_nrs = None if patients: # patient override print('Patient override:\n') print(patients) patient_nrs = np.array(patients) else: # loop over patient nrs in input channel dirs for input_channel in image_input_channels: # get all dirs in given input channel path input_channel_path = Path(input_channel['path']) dirs = [f for f in input_channel_path.iterdir() if f.is_dir()] # get all patient ids listed in input channel new_patients = [] for pat_dir in dirs: pat_id = basename(normpath(pat_dir)) new_patients.append(pat_id) # calculate intersect in arrays so final patient nrs list only contains patients # which are in all of the given input channels if patient_nrs is not None: patient_nrs = np.intersect1d(patient_nrs, np.array(new_patients)) else: patient_nrs = np.array(new_patients) patient_nrs.sort() patient_nrs = np.array(patient_nrs) if excluded_patients is not None: excluded_indices = np.isin(patient_nrs, excluded_patients) patient_nrs = np.delete(patient_nrs, excluded_indices.nonzero(), 0) patient_shuffle = np.arange(patient_nrs.shape[0]) np_random_shuffle(patient_shuffle) patient_nrs = patient_nrs[patient_shuffle] del patient_shuffle json_image_channels = json.dumps(image_input_channels, sort_keys=True).encode('utf-8') input_channel_hash = str(hashlib.md5(json_image_channels).hexdigest()) pat_size_hashed_cache_path = join(patch_cache_location, input_channel_hash) if not isdir(pat_size_hashed_cache_path): makedirs(pat_size_hashed_cache_path) with open(join(patch_cache_location, input_channel_hash, '_image_channels.json'), 'w') as o_file: json.dump(image_input_channels, o_file) fold_data_sets = [] fold_size = patient_nrs.shape[0] / k_fold_count start = 0 for fold in range(k_fold_count): fold_patients = patient_nrs[start:start+math.ceil(fold_size)] start += math.ceil(fold_size) if fold < (k_fold_count - 1): fold_size = (patient_nrs.shape[0] - start) / (k_fold_count - (fold + 1)) fold_patches, fold_labels = patients_patches(fold_patients, pat_size_hashed_cache_path, image_input_channels, brain_mask_channel, classification_mask, patch_size, axis) perm0 = np.arange(fold_patches.shape[0]) np_random_shuffle(perm0) fold_patches = fold_patches[perm0] fold_labels = fold_labels[perm0] fold_data_set = DataWrapper(fold_patches, fold_labels, reshape=False, patients=fold_patients) fold_data_sets.append(fold_data_set) print('Fetched all patient data') for fold in range(k_fold_count): print('\nFold {} Patches'.format(fold)) print(fold_data_sets[fold].images.shape) print(fold_data_sets[fold].labels.shape) return fold_data_sets
d8dc0d1312acff05bfdbc56192ee3c7caeb65c86
3,644,815
from typing import Union from typing import Sequence from typing import List def query_user_joins(user_group: Union[User, Sequence[User], None]) \ -> List[JoinRecord]: """ :param user_group: User or user group as an iterable of users. :return: """ # Input validation user_list = [user_group] if isinstance(user_group, User) else user_group # Query query = session.query(JoinRecord) if user_list: # noinspection PyUnresolvedReferences query = query.filter(JoinRecord.user_id.in_(u.user_id for u in user_list)) results = query.order_by(JoinRecord.timestamp).all() logger.info("query_user_joins: " "Found {:d} records for user group: {!r}".format(len(results), user_group)) return results
5481e4512b7b28b0832f9fec00ef0cf4e7cfd5de
3,644,817
def rec_test(test_type: str): """ Rec test decorator """ def decorator(f): @wraps(f) def w(*args, **kwargs): return f(*args, **kwargs) # add attributes to f w.is_test = True w.test_type = test_type try: w.test_desc = f.__doc__.lstrip().rstrip() except: w.test_desc = "" try: # python 3 w.name = w.__name__ except: # python 2 w.name = w.__func__.func_name return w return decorator
94eca60bd4d3f96fd3346da5bcc2b70c3a167ace
3,644,819
def display_convw(w, s, r, c, fig, vmax=None, vmin=None, dataset='mnist', title='conv_filters'): """ w2 = np.zeros(w.shape) d = w.shape[1]/3 print w.shape for i in range(w.shape[0]): for j in range(w.shape[1]/3): w2[i, j] = w[i, 3*j] w2[i, j + d] = w[i, 3*j+1] w2[i, j + 2*d] = w[i, 3*j+2] w = w2 """ numhid = w.shape[0] size_x = s size_y = s # For now. num_channels = w.shape[1] / (size_x*size_y) assert num_channels == 3 assert w.shape[1] % size_x*size_y == 0 if isinstance(w, np.ndarray): vh = w.reshape(size_x*numhid*num_channels, size_y) else: vh = w.asarray().reshape(size_x*numhid*num_channels, size_y) pvh = np.zeros((size_x*r, size_y*c, num_channels)) for i in range(r): for j in range(c): for ch in range(num_channels): pvh[i*size_x:(i+1)*size_x, j*size_y:(j+1)*size_y, ch] = \ vh[(num_channels*(i*c+j)+ch)*size_x:(num_channels*(i*c+j)+ch+1)*size_x,:] # pvh /= np.std(pvh) plt.figure(fig) plt.clf() plt.title(title) plt.imshow(pvh, vmax=vmax, vmin=vmin) scale = 1 xmax = size_x*c ymax = size_y*r color = 'k' for x in range(0, c): plt.axvline(x=x*size_x/scale, ymin=0,ymax=ymax/scale, color = color) for y in range(0, r): plt.axhline(y=y*size_y/scale, xmin=0,xmax=xmax/scale, color = color) plt.draw() return pvh
87742ea0831f731e800385134379ce1b786b834f
3,644,820
def get_optional_list(all_tasks=ALL_TASKS, grade=-1, *keys) -> list: """获取可选的任务列表 :param keys: 缩小范围的关键字,不定长,定位第一级有一个键,要定位到第二级就应该有两个键 :param all_tasks: dict,两级, 所有的任务 :param grade: 字典层级 第0层即为最外层,依次向内层嵌套,默认值-1层获取所有最内层的汇总列表 :return: """ optional_list = [] # 按照指定层级获取相应的可选任务列表 if grade == -1: # 获取最内层所有的具体任务 for key_grade_1 in all_tasks.keys(): for key_grade_2 in all_tasks[key_grade_1].keys(): optional_list.extend(all_tasks[key_grade_1][key_grade_2]) elif grade == 0: # 获取最外层的宽泛任务 optional_list.extend(all_tasks.keys()) elif grade == 1: key_grade_1 = keys[0] # 需取第一层级的值,就必须提供第0层的key optional_list.extend(all_tasks[key_grade_1].keys()) elif grade == 2: key_grade_1, key_grade_2 = keys[0], keys[1] # 需取第二层级的值,就必须提供第0层和第1层的key optional_list.extend(all_tasks[key_grade_1][key_grade_2]) else: print("超出任务字典的层级范围了哦") return optional_list
ee54e65e724520d8ed9e3d994811c26ed2205add
3,644,821
def process_genotypes(filepath, snp_maf, snp_list=None, **kwargs): """ Process genotype file. :param filepath: :param snp_maf: :param snp_list: get specified snp if provided :param bool genotype_label: True if first column is the label of specimen, default False :param bool skip_none_rs: True if skip None genotype, default True :param bool fill_none: True if auto fill None genotype with most frequent genotype by MAF, default True :return: """ conf = dict({ 'genotype_label': False, 'skip_none_rs': True }, **kwargs) with open(filepath, encoding='utf-8') as fh: if conf['genotype_label']: df = genotype_with_label(fh, snp_maf=snp_maf, snp_list=snp_list, **conf) else: df = genotype_without_label(fh, snp_maf=snp_maf, snp_list=snp_list, **conf) return df
501aa7b648d970b21dff1a4bd98102680e5ea774
3,644,822
def table_exists(conn, table_name, schema=False): """Checks if a table exists. Parameters ---------- conn A Psycopg2 connection. table_name : str The table name. schema : str The schema to which the table belongs. """ cur = conn.cursor() table_exists_sql = ('select * from information_schema.tables ' f'where table_name={table_name!r}') if schema: table_exists_sql += f' and table_schema={schema!r}' cur.execute(table_exists_sql) return bool(cur.rowcount)
c9b698afbe795a6a73ddfb87b2725c3c4205f35e
3,644,824
import re def _dict_from_dir(previous_run_path): """ build dictionary that maps training set durations to a list of training subset csv paths, ordered by replicate number factored out as helper function so we can test this works correctly Parameters ---------- previous_run_path : str, Path path to directory containing dataset .csv files that represent subsets of training set, created by a previous run of ``vak.core.learncurve.learning_curve``. Typically directory will have a name like ``results_{timestamp}`` and the actual .csv splits will be in sub-directories with names corresponding to the training set duration Returns ------- train_dur_csv_paths : dict where keys are duration in seconds of subsets taken from training data, and corresponding values are lists of paths to .csv files containing those subsets """ train_dur_csv_paths = {} train_dur_dirs = previous_run_path.glob("train_dur_*s") for train_dur_dir in train_dur_dirs: train_dur = re.findall(TRAIN_DUR_PAT, train_dur_dir.name) if len(train_dur) != 1: raise ValueError( f"did not find just a single training subset duration in filename:\n" f"{train_subset_path}\n" f"Instead found: {train_dur}" ) train_dur = int(train_dur[0]) # sort by increasing replicate number -- numerically, not alphabetically replicate_dirs = sorted( train_dur_dir.glob("replicate_*"), key=lambda dir_path: int(dir_path.name.split("_")[-1]), ) train_subset_paths = [] for replicate_dir in replicate_dirs: train_subset_path = sorted(replicate_dir.glob("*prep*csv")) if len(train_subset_path) != 1: raise ValueError( f"did not find just a single training subset .csv in replicate directory:\n" f"{replicate_dir}\n" f"Instead found: {train_subset_path}" ) train_subset_path = train_subset_path[0] train_subset_paths.append(train_subset_path) train_dur_csv_paths[train_dur] = train_subset_paths return train_dur_csv_paths
32d49b6ec6a8472a3864fc95cc52502a63038cdc
3,644,825
def aggregate_pixel(arr,x_step,y_step): """Aggregation code for a single pixel""" # Set x/y to zero to mimic the setting in a loop # Assumes x_step and y_step in an array-type of length 2 x = 0 y = 0 # initialize sum variable s = 0.0 # sum center pixels left = int(ceil(x_step[x])) right = int(floor(x_step[x+1])) top = int(ceil(y_step[y])) bottom = int(floor(y_step[y+1])) s += arr[left:right,top:bottom].sum() # Find edge weights wl = left - x_step[x] wr = x_step[x+1] - right wt = top - y_step[y] wb = y_step[y+1] - bottom # sum edges - left s += arr[left-1:left,top:bottom].sum() * wl # sum edges - right s += arr[right:right+1,top:bottom].sum() * wr # sum edges - top s += arr[left:right,top-1:top].sum() * wt # sum edges - bottom s += arr[left:right,bottom:bottom+1].sum() * wb # sum corners ... # ul s += arr[left-1:left,top-1:top].sum() * wl * wt # ur s += arr[right:right+1,top-1:top].sum() * wr * wt # ll s += arr[left-1:left,bottom:bottom+1].sum() * wl * wb # lr s += arr[right:right+1,bottom:bottom+1].sum() * wr * wb # calculate weight weight = (x_step[x+1]-x_step[x])*(y_step[y+1]-y_step[y]) return s/float(weight)
d9cdad36c7eeff3581310d13bedce204e7431560
3,644,826
def simplify_datatype(config): """ Converts ndarray to list, useful for saving config as a yaml file """ for k, v in config.items(): if isinstance(v, dict): config[k] = simplify_datatype(v) elif isinstance(v, tuple): config[k] = list(v) elif isinstance(v, np.ndarray): config[k] = v.tolist() else: config[k] = v return config
f3e8ae76e04479ed9b1b5fbd450edec20342e5a9
3,644,827
def _strict_random_crop_image(image, boxes, labels, is_crowd, difficult, masks=None, sem_seg=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3): """Performs random crop. Note: boxes will be clipped to the crop. Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. This function always crops the image and is supposed to be used by `random_crop_image` function which sometimes returns image unchanged. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If masks is not None, the function also returns: masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. """ with tf.name_scope('RandomCropImage', values=[image, boxes]): image_shape = tf.shape(image) # boxes are [N, 4]. Lets first make them [1, N, 4]. boxes_expanded = tf.expand_dims( tf.clip_by_value( boxes, clip_value_min=0.0, clip_value_max=1.0), 0) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( image_shape, bounding_boxes=boxes_expanded, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=100, use_image_if_no_bounding_boxes=True) im_box_begin, im_box_size, im_box = sample_distorted_bounding_box new_image = tf.slice(image, im_box_begin, im_box_size) new_image.set_shape([None, None, image.get_shape()[2]]) # [1, 4] im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0]) # [4] im_box_rank1 = tf.squeeze(im_box) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) boxlist.add_field('is_crowd', is_crowd) boxlist.add_field('difficult', difficult) if masks is not None: boxlist.add_field('masks', masks) im_boxlist = box_list.BoxList(im_box_rank2) # remove boxes that are outside cropped image boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window( boxlist, im_box_rank1) # remove boxes that are outside image overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box_rank1) new_boxes = new_boxlist.boxes new_boxes = tf.clip_by_value( new_boxes, clip_value_min=0.0, clip_value_max=1.0) new_boxes.set_shape([None, 4]) result = [ new_image, new_boxes, overlapping_boxlist.get_field('labels'), overlapping_boxlist.get_field('is_crowd'), overlapping_boxlist.get_field('difficult'), ] if masks is not None: masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids) masks_of_boxes_completely_inside_window = tf.gather( masks_of_boxes_inside_window, keep_ids) masks_box_begin = [0, im_box_begin[0], im_box_begin[1]] masks_box_size = [-1, im_box_size[0], im_box_size[1]] new_masks = tf.slice( masks_of_boxes_completely_inside_window, masks_box_begin, masks_box_size) result.append(new_masks) if sem_seg is not None: sem_seg = tf.expand_dims(sem_seg, axis=-1) new_sem_seg = tf.slice(sem_seg, im_box_begin, im_box_size) new_sem_seg = tf.squeeze(new_sem_seg, axis=-1) new_sem_seg.set_shape([None, None]) result.append(new_sem_seg) return tuple(result)
749107213a8bf34d2b159d38657a9c63af6699c3
3,644,828
def aggregate_by_player_id(statistics, playerid, fields): """ Inputs: statistics - List of batting statistics dictionaries playerid - Player ID field name fields - List of fields to aggregate Output: Returns a nested dictionary whose keys are player IDs and whose values are dictionaries of aggregated stats. Only the fields from the fields input will be aggregated in the aggregated stats dictionaries. """ players = {} # create nested dict with outer keys of player ids and inner dict of fields for dic in statistics: if dic[playerid] not in players: players[dic[playerid]] = {playerid: dic[playerid]} for field in fields: players[dic[playerid]][field] = 0 # loop through statistics again, incrementing field values for dic in statistics: for field in fields: players[dic[playerid]][field] += int(dic[field]) return players
c137fc8820f8898ebc63c54de03be5b919fed97a
3,644,829
import pickle def loadStatesFromFile(filename): """Loads a list of states from a file.""" try: with open(filename, 'rb') as inputfile: result = pickle.load(inputfile) except: result = [] return result
cc2f64a977ff030ec6af94d3601c094e14f5b584
3,644,830
import tkinter def get_configuration_item(configuration_file, item, default_values): """Return configuration value on file for item or builtin default. configuration_file Name of configuration file. item Item in configuation file whose value is required. default_values dict of default values for items. Return "" if configuration file cannot be opened or read, after showing a dialogue to tell the user. Return "" if the item exists but has no value. Return default value if the item does not exist and a default value exists. Return "" if the item does not exist and a default value does not exist. Return the item value if there is one. Items occupy a single line formatted as (?P<item>[^/s]*)/s*(?P<value>.*) """ try: of = open(configuration_file) try: config_text = of.read() except Exception as exc: tkinter.messagebox.showinfo( parent=parent, message="".join( ( "Unable to read from\n\n", configuration_file, "\n\n", str(exc), '\n\n"" will be returned as value of ', item, ) ), title="Read File", ) return "" finally: of.close() except Exception as exc: tkinter.messagebox.showinfo( parent=parent, message="".join( ( "Unable to open\n\n", configuration_file, "\n\n", str(exc), '\n\n"" will be returned as value of ', item, ) ), title="Open File", ) return "" key = None for i in config_text.splitlines(): i = i.split(maxsplit=1) if not i: continue if i[0].startswith("#"): continue if i[0] != item: continue key = item if len(i) == 1: value = "" else: value = i[1].strip() if key is None: for k, v in default_values: if k == item: key = item value = v if key is None: value = "" return value
c077989d2d90468a80b27f32a68b827fbdb49b92
3,644,831
def sexa2deg(ra, dec): """Convert sexagesimal to degree; taken from ryan's code""" ra = coordinates.Angle(ra, units.hour).degree dec = coordinates.Angle(dec, units.degree).degree return ra, dec
3a016b1163c6ceda403cfe5c8d24467d1646c7aa
3,644,833