content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import json import os import re import numpy as np import pandas as pd from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR WDIR = os.path.dirname(os.path.abspath(__file__)) def flip_box(box): """ box (list, length 4): [x1, y1, w, h] """ # Get top right corner of prediction x1 = box[0] y1 = box[1] w = box[2] h = box[3] topRight = (x1 + w, y1) # Top left corner of flipped box is: newTopLeft = (1024. - topRight[0], topRight[1]) return [newTopLeft[0], newTopLeft[1], w, h] with open(MAPPINGS_PATH) as f: mapping = json.load(f) with open(MAPPINGS_PATH.replace(test_image_set, "{}_flip".format(test_image_set))) as f: flip_mapping = json.load(f) metadata = pd.read_csv(METADATA_PATH) execfile(os.path.join(WDIR, "DetectionEnsemble.py")) imsizes = [224, 256, 288, 320, 352, 384, 416, 448, 480, 512] fold0_nom = "fold{}_{}".format(0, imsizes[0]) fold1_nom = "fold{}_{}".format(1, imsizes[1]) fold2_nom = "fold{}_{}".format(2, imsizes[2]) fold3_nom = "fold{}_{}".format(3, imsizes[3]) fold4_nom = "fold{}_{}".format(4, imsizes[4]) fold5_nom = "fold{}_{}".format(5, imsizes[5]) fold6_nom = "fold{}_{}".format(6, imsizes[6]) fold7_nom = "fold{}_{}".format(7, imsizes[7]) fold8_nom = "fold{}_{}".format(8, imsizes[8]) fold9_nom = "fold{}_{}".format(9, imsizes[9]) fold1RCNN0 = run_ensemble(get_TTA_results("fold1_256", test_image_set, RCNN0_DETS_DIR.format(fold1_nom)), metadata) fold3RCNN0 = run_ensemble(get_TTA_results("fold3_320", test_image_set, RCNN0_DETS_DIR.format(fold3_nom)), metadata) fold5RCNN0 = run_ensemble(get_TTA_results("fold5_384", test_image_set, RCNN0_DETS_DIR.format(fold5_nom)), metadata) fold7RCNN0 = run_ensemble(get_TTA_results("fold7_448", test_image_set, RCNN0_DETS_DIR.format(fold7_nom)), metadata) fold9RCNN0 = run_ensemble(get_TTA_results("fold9_512", test_image_set, RCNN0_DETS_DIR.format(fold9_nom)), metadata) list_of_dfs = [fold1RCNN0, fold3RCNN0, fold5RCNN0, fold7RCNN0, fold9RCNN0] final_TTA_ensemble = run_ensemble(list_of_dfs, metadata, adjust_score=False) final_TTA_ensemble["adjustedScore"] = final_TTA_ensemble.score * final_TTA_ensemble.votes final_TTA_ensemble = final_TTA_ensemble[["patientId", "x", "y", "w", "h", "score", "votes", "adjustedScore"]] final_TTA_ensemble.to_csv(os.path.join(WDIR, "../../SimpleDCNPredictions.csv"), index=False)
[ 11748, 33918, 198, 11748, 28686, 198, 11748, 302, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 12351, 13, 259, 2232, 13, 11627, 974, 5005, 687, 540, 51, 5603, 1330, 337, 24805, 20754, ...
2.280038
1,057
from web3 import Web3, HTTPProvider import json w3url = "https://mainnet.infura.io/v3/998f64f3627548bbaf2630599c1eefca" w3 = Web3(HTTPProvider(w3url)) WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" YFII = "0xa1d0E215a23d7030842FC67cE582a6aFa3CCaB83" DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F" iUSDT = "0x72Cf258c852Dc485a853370171d46B9D29fD3184" POOL4 = "0x3d367C9529f260B0661e1C1E91167C9319ee96cA" yfii2dai = [YFII, WETH, DAI] with open("abi/erc20.json") as f: erc20ABI = json.loads(f.read()) with open("abi/uniswapRouterv2.json") as f: uniswapABI = json.loads(f.read()) with open("abi/pool4.json") as f: pool4ABI = json.loads(f.read()) uniswap_instance = w3.eth.contract( abi=uniswapABI, address=w3.toChecksumAddress("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"), ) pool4_instance = w3.eth.contract(abi=pool4ABI, address=POOL4) if __name__ == "__main__": print(getDATA())
[ 6738, 3992, 18, 1330, 5313, 18, 11, 14626, 29495, 198, 11748, 33918, 198, 198, 86, 18, 6371, 796, 366, 5450, 1378, 12417, 3262, 13, 10745, 5330, 13, 952, 14, 85, 18, 14, 34808, 69, 2414, 69, 2623, 23195, 2780, 11848, 1878, 2075, 225...
1.899384
487
#!/usr/bin/env python # -*- coding: utf-8 -*- # THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT # # (Generated on 2020-12-20 18:26:33.661372) # from .base_classes import Baserequests
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 12680, 45811, 21725, 24700, 1137, 11617, 11050, 7716, 62, 37724, 13, 9078, 532, 8410, 5626, 48483, 1303, 198, 2...
2.316547
139
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from .models import CustomUser admin.site.register(CustomUser, UserAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 28482, 1330, 11787, 46787, 198, 198, 6738, 764, 27530, 1330, 8562, 12982, 198, 198, 28482, 13, 15654, 13, 30238, 7, 15022, 12982, 11,...
3.488889
45
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import OrderedDict import cv2 import numpy as np from ...adapters import MTCNNPAdapter
[ 37811, 198, 15269, 357, 66, 8, 2864, 12, 1238, 1828, 8180, 10501, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, ...
3.715054
186
from Cb_constants import DocLoading from basetestcase import ClusterSetup from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator from couchbase_helper.tuq_generators import JsonGenerator from remote.remote_util import RemoteMachineShellConnection from sdk_client3 import SDKClient from com.couchbase.client.java.json import JsonObject """ Basic test cases with commit,rollback scenarios """
[ 6738, 327, 65, 62, 9979, 1187, 1330, 14432, 19031, 198, 6738, 1615, 316, 395, 7442, 1330, 38279, 40786, 198, 6738, 18507, 8692, 62, 2978, 525, 13, 22897, 8612, 1352, 1330, 16854, 8645, 1352, 11, 2205, 62, 8612, 1352, 198, 6738, 18507, ...
3.741071
112
#!/usr/bin/env python3 #-*- coding:utf-8 -*- #Author: ''' 2.xlen()list 3.x def spin_words(sentence): # Your code goes here return " ".join([x[::-1] if len(x) >= 5 else x for x in sentence.split(" ")]) str[::-1] ok '''
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 12, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 2, 13838, 25, 198, 198, 7061, 6, 198, 362, 13, 87, 11925, 3419, 4868, 513, 13, 87, 628, 220, 220, 198, 198, 4299, ...
2.08547
117
""" Created on 30 May 2017 @author: Bruno Beloff (bruno.beloff@southcoastscience.com) A network socket abstraction, implementing ProcessComms """ import socket import time from scs_core.sys.process_comms import ProcessComms # --------------------------------------------------------------------------------------------------------------------
[ 37811, 198, 41972, 319, 1542, 1737, 2177, 198, 198, 31, 9800, 25, 31045, 3944, 2364, 357, 1671, 36909, 13, 6667, 2364, 31, 35782, 1073, 5773, 4234, 13, 785, 8, 198, 198, 32, 3127, 17802, 34651, 11, 15427, 10854, 5377, 907, 198, 37811,...
4.716216
74
# -*- coding: utf-8 -*- import calendar import collections from datetime import datetime, timedelta from warnings import warn import six import regex as re from dateutil.relativedelta import relativedelta from dateparser.date_parser import date_parser from dateparser.freshness_date_parser import freshness_date_parser from dateparser.languages.loader import LanguageDataLoader from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages from dateparser.conf import apply_settings from dateparser.utils import normalize_unicode, apply_timezone_from_settings APOSTROPHE_LOOK_ALIKE_CHARS = [ u'\N{RIGHT SINGLE QUOTATION MARK}', # u'\u2019' u'\N{MODIFIER LETTER APOSTROPHE}', # u'\u02bc' u'\N{MODIFIER LETTER TURNED COMMA}', # u'\u02bb' u'\N{ARMENIAN APOSTROPHE}', # u'\u055a' u'\N{LATIN SMALL LETTER SALTILLO}', # u'\ua78c' u'\N{PRIME}', # u'\u2032' u'\N{REVERSED PRIME}', # u'\u2035' u'\N{MODIFIER LETTER PRIME}', # u'\u02b9' u'\N{FULLWIDTH APOSTROPHE}', # u'\uff07' ] RE_NBSP = re.compile(u'\xa0', flags=re.UNICODE) RE_SPACES = re.compile(r'\s+') RE_TRIM_SPACES = re.compile(r'^\s+(\S.*?)\s+$') RE_SANITIZE_SKIP = re.compile(r'\t|\n|\r|\u00bb|,\s\u0432|\u200e|\xb7|\u200f|\u064e|\u064f', flags=re.M) RE_SANITIZE_RUSSIAN = re.compile(r'([\W\d])\u0433\.', flags=re.I | re.U) RE_SANITIZE_AMPM = re.compile(r'\b([ap])(\.)?m(\.)?\b', flags=re.DOTALL | re.I) RE_SANITIZE_ON = re.compile(r'^.*?on:\s+(.*)') RE_SANITIZE_APOSTROPHE = re.compile(u'|'.join(APOSTROPHE_LOOK_ALIKE_CHARS)) RE_SEARCH_TIMESTAMP = re.compile(r'^\d{10}(?![^\d.])') def sanitize_date(date_string): date_string = RE_SANITIZE_SKIP.sub(' ', date_string) date_string = RE_SANITIZE_RUSSIAN.sub(r'\1 ', date_string) # remove u'.' (Russian for year) but not in words date_string = sanitize_spaces(date_string) date_string = RE_SANITIZE_AMPM.sub(r'\1m', date_string) date_string = RE_SANITIZE_ON.sub(r'\1', date_string) date_string = RE_SANITIZE_APOSTROPHE.sub(u"'", date_string) return date_string def get_date_from_timestamp(date_string, settings): if RE_SEARCH_TIMESTAMP.search(date_string): date_obj = datetime.fromtimestamp(int(date_string[:10])) date_obj = apply_timezone_from_settings(date_obj, settings) return date_obj def parse_with_formats(date_string, date_formats, settings): """ Parse with formats and return a dictionary with 'period' and 'obj_date'. :returns: :class:`datetime.datetime`, dict or None """ period = 'day' for date_format in date_formats: try: date_obj = datetime.strptime(date_string, date_format) except ValueError: continue else: # If format does not include the day, use last day of the month # instead of first, because the first is usually out of range. if '%d' not in date_format: period = 'month' date_obj = date_obj.replace( day=get_last_day_of_month(date_obj.year, date_obj.month)) if not ('%y' in date_format or '%Y' in date_format): today = datetime.today() date_obj = date_obj.replace(year=today.year) date_obj = apply_timezone_from_settings(date_obj, settings) return {'date_obj': date_obj, 'period': period} else: return {'date_obj': None, 'period': period}
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 11845, 198, 11748, 17268, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 14601, 1330, 9828, 198, 198, 11748, 2237, 198, 11748, 40364, 355, 30...
2.131197
1,654
import chainer import chainer.functions from chainer.utils import type_check from chainer import cuda from chainer import function import numpy as np #from chainer import function_node from utils import clip_grad #class MixtureDensityNetworkFunction(function_node.FunctionNode): def mixture_density_network(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho): """ Mixture Density Network Output the coefficient params Args: x (Variable): Tensor containing the position [x1, x2, x3] to predict eos (Variable): End-of-stroke prediction pi (Variable): mixture components mu_x1 (Variable): mean of x1 mu_x2 (Variable): mean of x2 s_x1 (Variable): variance of x1 s_x2 (Variable): variance of x2 rho (Variable): correlation parameter Returns: loss (Variable) y (Variable) eos (Variable) pi (Variable) mu_x1 (Variable) mu_x2 (Variable) s_x1 (Variable) s_x2 (Variable) rho (Variable) """ return MixtureDensityNetworkFunction()(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho)
[ 11748, 6333, 263, 198, 11748, 6333, 263, 13, 12543, 2733, 198, 198, 6738, 6333, 263, 13, 26791, 1330, 2099, 62, 9122, 198, 6738, 6333, 263, 1330, 269, 15339, 198, 6738, 6333, 263, 1330, 2163, 198, 11748, 299, 32152, 355, 45941, 198, 2...
2.160714
560
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' # SOURCE: https://github.com/twbs/bootstrap # SOURCE: https://github.com/gitbrent/bootstrap4-toggle # SOURCE: https://gitbrent.github.io/bootstrap4-toggle/ from flask import Flask, render_template app = Flask(__name__) import logging logging.basicConfig(level=logging.DEBUG) if __name__ == '__main__': app.debug = True # Localhost # port=0 -- random free port # app.run(port=0) app.run( port=5000, # :param threaded: should the process handle each request in a separate # thread? # :param processes: if greater than 1 then handle each request in a new process # up to this maximum number of concurrent processes. threaded=True, ) # # Public IP # app.run(host='0.0.0.0')
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 834, 9800, 834, 796, 705, 541, 21879, 1077, 6, 628, 198, 2, 311, 31033, 25, 3740, 1378, 12567, 13, 785, 14...
2.417827
359
import pypospack.io.phonts as phonts # <---- additional classes and functions in which to add top # <---- pypospack.io.phonts if __name__ == "__main__":
[ 11748, 12972, 1930, 8002, 13, 952, 13, 746, 756, 82, 355, 872, 756, 82, 198, 198, 2, 1279, 650, 3224, 6097, 290, 5499, 287, 543, 284, 751, 1353, 198, 2, 1279, 650, 220, 220, 220, 220, 12972, 1930, 8002, 13, 952, 13, 746, 756, 82...
2.758621
58
import copy import os import re import string import sys import warnings from contextlib import contextmanager from enum import Enum from textwrap import dedent from typing import ( Any, Dict, Iterator, List, Optional, Tuple, Type, Union, get_type_hints, ) import yaml from .errors import ( ConfigIndexError, ConfigTypeError, ConfigValueError, GrammarParseError, OmegaConfBaseException, ValidationError, ) from .grammar_parser import SIMPLE_INTERPOLATION_PATTERN, parse try: import dataclasses except ImportError: # pragma: no cover dataclasses = None # type: ignore # pragma: no cover try: import attr except ImportError: # pragma: no cover attr = None # type: ignore # pragma: no cover # Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc. # We begin by matching the head (in these examples: a, a, ..a). # This can be read as "dots followed by any character but `.` or `[`" # Note that a key starting with brackets, like [a], is purposedly *not* # matched here and will instead be handled in the next regex below (this # is to keep this regex simple). KEY_PATH_HEAD = re.compile(r"(\.)*[^.[]*") # Then we match other keys. The following expression matches one key and can # be read as a choice between two syntaxes: # - `.` followed by anything except `.` or `[` (ex: .b, .d) # - `[` followed by anything then `]` (ex: [b], [c]) KEY_PATH_OTHER = re.compile(r"\.([^.[]*)|\[(.*?)\]") # source: https://yaml.org/type/bool.html YAML_BOOL_TYPES = [ "y", "Y", "yes", "Yes", "YES", "n", "N", "no", "No", "NO", "true", "True", "TRUE", "false", "False", "FALSE", "on", "On", "ON", "off", "Off", "OFF", ] # To be used as default value when `None` is not an option. _DEFAULT_MARKER_: Any = Marker("_DEFAULT_MARKER_") def _resolve_optional(type_: Any) -> Tuple[bool, Any]: """Check whether `type_` is equivalent to `typing.Optional[T]` for some T.""" if getattr(type_, "__origin__", None) is Union: args = type_.__args__ if len(args) == 2 and args[1] == type(None): # noqa E721 return True, args[0] if type_ is Any: return True, Any return False, type_ def _is_optional(obj: Any, key: Optional[Union[int, str]] = None) -> bool: """Check `obj` metadata to see if the given node is optional.""" from .base import Container, Node if key is not None: assert isinstance(obj, Container) obj = obj._get_node(key) if isinstance(obj, Node): return obj._is_optional() else: # In case `obj` is not a Node, treat it as optional by default. # This is used in `ListConfig.append` and `ListConfig.insert` # where the appended/inserted value might or might not be a Node. return True def extract_dict_subclass_data(obj: Any, parent: Any) -> Optional[Dict[str, Any]]: """Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values.""" from omegaconf.omegaconf import _maybe_wrap is_type = isinstance(obj, type) obj_type = obj if is_type else type(obj) subclasses_dict = is_dict_subclass(obj_type) if subclasses_dict: warnings.warn( f"Class `{obj_type.__name__}` subclasses `Dict`." + " Subclassing `Dict` in Structured Config classes is deprecated," + " see github.com/omry/omegaconf/issues/663", UserWarning, stacklevel=9, ) if is_type: return None elif subclasses_dict: dict_subclass_data = {} key_type, element_type = get_dict_key_value_types(obj_type) for name, value in obj.items(): is_optional, type_ = _resolve_optional(element_type) type_ = _resolve_forward(type_, obj.__module__) try: dict_subclass_data[name] = _maybe_wrap( ref_type=type_, is_optional=is_optional, key=name, value=value, parent=parent, ) except ValidationError as ex: format_and_raise( node=None, key=name, value=value, cause=ex, msg=str(ex) ) return dict_subclass_data else: return None def get_value_kind( value: Any, strict_interpolation_validation: bool = False ) -> ValueKind: """ Determine the kind of a value Examples: VALUE: "10", "20", True MANDATORY_MISSING: "???" INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]", "ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}" :param value: Input to classify. :param strict_interpolation_validation: If `True`, then when `value` is a string containing "${", it is parsed to validate the interpolation syntax. If `False`, this parsing step is skipped: this is more efficient, but will not detect errors. """ if _is_missing_value(value): return ValueKind.MANDATORY_MISSING value = _get_value(value) # We identify potential interpolations by the presence of "${" in the string. # Note that escaped interpolations (ex: "esc: \${bar}") are identified as # interpolations: this is intended, since they must be processed as interpolations # for the string to be properly un-escaped. # Keep in mind that invalid interpolations will only be detected when # `strict_interpolation_validation` is True. if isinstance(value, str) and "${" in value: if strict_interpolation_validation: # First try the cheap regex matching that detects common interpolations. if SIMPLE_INTERPOLATION_PATTERN.match(value) is None: # If no match, do the more expensive grammar parsing to detect errors. parse(value) return ValueKind.INTERPOLATION else: return ValueKind.VALUE # DEPRECATED: remove in 2.2 # DEPRECATED: remove in 2.2 def is_generic_list(type_: Any) -> bool: """ Checks if a type is a generic list, for example: list returns False typing.List returns False typing.List[T] returns True :param type_: variable type :return: bool """ return is_list_annotation(type_) and get_list_element_type(type_) is not None def is_generic_dict(type_: Any) -> bool: """ Checks if a type is a generic dict, for example: list returns False typing.List returns False typing.List[T] returns True :param type_: variable type :return: bool """ return is_dict_annotation(type_) and len(get_dict_key_value_types(type_)) > 0 def split_key(key: str) -> List[str]: """ Split a full key path into its individual components. This is similar to `key.split(".")` but also works with the getitem syntax: "a.b" -> ["a", "b"] "a[b]" -> ["a, "b"] ".a.b[c].d" -> ["", "a", "b", "c", "d"] "[a].b" -> ["a", "b"] """ # Obtain the first part of the key (in docstring examples: a, a, .a, '') first = KEY_PATH_HEAD.match(key) assert first is not None first_stop = first.span()[1] # `tokens` will contain all elements composing the key. tokens = key[0:first_stop].split(".") # Optimization in case `key` has no other component: we are done. if first_stop == len(key): return tokens if key[first_stop] == "[" and not tokens[-1]: # This is a special case where the first key starts with brackets, e.g. # [a] or ..[a]. In that case there is an extra "" in `tokens` that we # need to get rid of: # [a] -> tokens = [""] but we would like [] # ..[a] -> tokens = ["", "", ""] but we would like ["", ""] tokens.pop() # Identify other key elements (in docstring examples: b, b, b/c/d, b) others = KEY_PATH_OTHER.findall(key[first_stop:]) # There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting # with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]). # Only one group can be non-empty. tokens += [dot_key if dot_key else bracket_key for dot_key, bracket_key in others] return tokens # Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead, # once support for Python 3.6 is dropped).
[ 11748, 4866, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 4731, 198, 11748, 25064, 198, 11748, 14601, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 2420, 37150, 1330, 4648, 298, 198, 6738, 1972...
2.436838
3,491
#!/usr/bin/env python3 # # AMBER Clustering import os from time import sleep import yaml import ast import threading import multiprocessing as mp import numpy as np from astropy.time import Time, TimeDelta import astropy.units as u from astropy.coordinates import SkyCoord from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT from darc.external import tools from darc import util
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 198, 2, 3001, 13246, 1012, 436, 1586, 198, 198, 11748, 28686, 198, 6738, 640, 1330, 3993, 198, 11748, 331, 43695, 198, 11748, 6468, 198, 11748, 4704, 278, 198, 11748, 18540, 305...
3.231293
147
import datetime import os import subprocess import base64 from pathlib import Path import shutil import pandas as pd import signal import requests from baselayer.app.env import load_env from baselayer.app.model_util import status, create_tables, drop_tables from social_tornado.models import TornadoStorage from skyportal.models import init_db, Base, DBSession, Source, User from skyportal.model_util import setup_permissions, create_token from skyportal.tests import api from baselayer.tools.test_frontend import verify_server_availability if __name__ == "__main__": """Insert test data""" env, cfg = load_env() basedir = Path(os.path.dirname(__file__)) / ".." with status(f"Connecting to database {cfg['database']['database']}"): init_db(**cfg["database"]) with status("Dropping all tables"): drop_tables() with status("Creating tables"): create_tables() for model in Base.metadata.tables: print(" -", model) with status(f"Creating permissions"): setup_permissions() with status(f"Creating dummy users"): super_admin_user = User( username="testuser@cesium-ml.org", role_ids=["Super admin"] ) group_admin_user = User( username="groupadmin@cesium-ml.org", role_ids=["Super admin"] ) full_user = User(username="fulluser@cesium-ml.org", role_ids=["Full user"]) view_only_user = User( username="viewonlyuser@cesium-ml.org", role_ids=["View only"] ) DBSession().add_all( [super_admin_user, group_admin_user, full_user, view_only_user] ) for u in [super_admin_user, group_admin_user, full_user, view_only_user]: DBSession().add( TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2") ) with status("Creating token"): token = create_token( [ "Manage groups", "Manage sources", "Upload data", "Comment", "Manage users", ], super_admin_user.id, "load_demo_data token", ) with status("Launching web app & executing API calls"): try: response_status, data = api("GET", "sysinfo", token=token) app_already_running = True except requests.ConnectionError: app_already_running = False web_client = subprocess.Popen( ["make", "run"], cwd=basedir, preexec_fn=os.setsid ) server_url = f"http://localhost:{cfg['ports.app']}" print() print(f"Waiting for server to appear at {server_url}...") try: verify_server_availability(server_url) print("App running - continuing with API calls") with status("Creating dummy group & adding users"): data = assert_post( "groups", data={ "name": "Stream A", "group_admins": [ super_admin_user.username, group_admin_user.username, ], }, ) group_id = data["data"]["id"] for u in [view_only_user, full_user]: data = assert_post( f"groups/{group_id}/users/{u.username}", data={"admin": False} ) with status("Creating dummy instruments"): data = assert_post( "telescope", data={ "name": "Palomar 1.5m", "nickname": "P60", "lat": 33.3633675, "lon": -116.8361345, "elevation": 1870, "diameter": 1.5, "group_ids": [group_id], }, ) telescope1_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "P60 Camera", "type": "phot", "band": "optical", "telescope_id": telescope1_id, }, ) instrument1_id = data["data"]["id"] data = assert_post( "telescope", data={ "name": "Nordic Optical Telescope", "nickname": "NOT", "lat": 28.75, "lon": 17.88, "elevation": 1870, "diameter": 2.56, "group_ids": [group_id], }, ) telescope2_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "ALFOSC", "type": "both", "band": "optical", "telescope_id": telescope2_id, }, ) with status("Creating dummy sources"): SOURCES = [ { "id": "14gqr", "ra": 353.36647, "dec": 33.646149, "redshift": 0.063, "group_ids": [group_id], "comments": [ "No source at transient location to R>26 in LRIS imaging", "Strong calcium lines have emerged.", ], }, { "id": "16fil", "ra": 322.718872, "dec": 27.574113, "redshift": 0.0, "group_ids": [group_id], "comments": ["Frogs in the pond", "The eagle has landed"], }, ] (basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True) for source_info in SOURCES: comments = source_info.pop("comments") data = assert_post("sources", data=source_info) assert data["data"]["id"] == source_info["id"] for comment in comments: data = assert_post( "comment", data={"source_id": source_info["id"], "text": comment}, ) phot_file = basedir / "skyportal/tests/data/phot.csv" phot_data = pd.read_csv(phot_file) data = assert_post( "photometry", data={ "source_id": source_info["id"], "time_format": "iso", "time_scale": "utc", "instrument_id": instrument1_id, "observed_at": phot_data.observed_at.tolist(), "mag": phot_data.mag.tolist(), "e_mag": phot_data.e_mag.tolist(), "lim_mag": phot_data.lim_mag.tolist(), "filter": phot_data["filter"].tolist(), }, ) spec_file = os.path.join( os.path.dirname(os.path.dirname(__file__)), "skyportal", "tests", "data", "spec.csv", ) spec_data = pd.read_csv(spec_file) for i, df in spec_data.groupby("instrument_id"): data = assert_post( "spectrum", data={ "source_id": source_info["id"], "observed_at": str(datetime.datetime(2014, 10, 24)), "instrument_id": 1, "wavelengths": df.wavelength.tolist(), "fluxes": df.flux.tolist(), }, ) for ttype in ["new", "ref", "sub"]: fname = f'{source_info["id"]}_{ttype}.png' fpath = basedir / f"skyportal/tests/data/{fname}" thumbnail_data = base64.b64encode( open(os.path.abspath(fpath), "rb").read() ) data = assert_post( "thumbnail", data={ "source_id": source_info["id"], "data": thumbnail_data, "ttype": ttype, }, ) source = Source.query.get(source_info["id"]) source.add_linked_thumbnails() finally: if not app_already_running: print("Terminating web app") os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
[ 11748, 4818, 8079, 198, 11748, 28686, 198, 11748, 850, 14681, 198, 11748, 2779, 2414, 198, 6738, 3108, 8019, 1330, 10644, 198, 11748, 4423, 346, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 6737, 198, 11748, 7007, 198, 198, 6738, 1...
1.642127
5,773
# Copyright 2013 University of Maryland. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE.TXT file. import sys import os import time from selenium.common.exceptions import NoAlertPresentException import framework
[ 198, 2, 15069, 2211, 2059, 286, 10769, 13, 220, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 198, 2, 5964, 326, 460, 307, 1043, 287, 262, 38559, 24290, 13, 51, 25010, 2393, ...
3.826667
75
from .. import types from ... import utils
[ 6738, 11485, 1330, 3858, 198, 6738, 2644, 1330, 3384, 4487, 628 ]
4
11
# -*- coding: utf-8 -*- """ Created on Tue Jul 7 20:14:22 2020 Simple script to join json files @author: SERGI """ import json import sys import os if __name__ == "__main__": print("hello from python", flush=True) jsonPath = str(sys.argv[1]) # ============================================================================= # jsonPath = "../eclipse-workspace/prueba/target/json/" # ============================================================================= jsonPathTemp = jsonPath+"temp/" arr = os.listdir(jsonPathTemp) arr.sort() print(arr) dict_to_json = {} dict_0 = readJson(jsonPathTemp + arr[0]) dict_1 = readJson(jsonPathTemp + arr[1]) dict_2 = readJson(jsonPathTemp + arr[2]) dict_3 = readJson(jsonPathTemp + arr[3]) keys = [name for name in dict_0.keys() if "0" not in name] for key in keys: dict_to_json[key] = dict_0[key] + dict_1[key] + dict_2[key] + dict_3[key] #0seg,f_step,f_stop seg = dict_0['0seg,f_step,f_stop'][0] step = dict_0['0seg,f_step,f_stop'][1] stop = dict_3['0seg,f_step,f_stop'][2] dict_to_json['0seg,f_step,f_stop'] = [seg, step, stop] print("Escribiendo json: ", jsonPath+arr[0], flush=True) writeJson(jsonPath+arr[0], dict_to_json) print("finish", flush=True)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 41972, 319, 30030, 5979, 220, 767, 1160, 25, 1415, 25, 1828, 12131, 201, 198, 201, 198, 26437, 4226, 284, 4654, 33918, 3696, 201, 198, 201, 198, ...
2.2411
618
# Generated by Django 3.0.2 on 2020-01-23 11:02 import re import django.contrib.postgres.fields.citext import django.core.validators from django.db import migrations import grandchallenge.challenges.models
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 17, 319, 12131, 12, 486, 12, 1954, 1367, 25, 2999, 198, 198, 11748, 302, 198, 198, 11748, 42625, 14208, 13, 3642, 822, 13, 7353, 34239, 13, 25747, 13, 66, 578, 742, 198, 11748, 42625, 142...
3
70
import numpy as np import scipy.sparse as sp from HPOlibConfigSpace.configuration_space import ConfigurationSpace from HPOlibConfigSpace.conditions import EqualsCondition, InCondition from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \ UniformIntegerHyperparameter, CategoricalHyperparameter, Constant from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm from autosklearn.pipeline.constants import *
[ 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 82, 29572, 355, 599, 198, 198, 6738, 6574, 46, 8019, 16934, 14106, 13, 11250, 3924, 62, 13200, 1330, 28373, 14106, 198, 6738, 6574, 46, 8019, 16934, 14106, 13, 17561, 1756, ...
3.723577
123
# Creating a elif chain alien_color = 'red' if alien_color == 'green': print('Congratulations! You won 5 points!') elif alien_color == 'yellow': print('Congratulations! You won 10 points!') elif alien_color == 'red': print('Congratulations! You won 15 points!')
[ 2, 30481, 257, 1288, 361, 6333, 220, 220, 198, 198, 42690, 62, 8043, 796, 705, 445, 6, 198, 198, 361, 8756, 62, 8043, 6624, 705, 14809, 10354, 198, 220, 220, 220, 3601, 10786, 45048, 0, 921, 1839, 642, 2173, 0, 11537, 198, 417, 36...
3.088889
90
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import learning_curve # Plot learning curve # Plot validation curve
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 384, 397, 1211, 355, 3013, 82, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 467...
3.327586
58
"""Utilities for downloading comsumption data from Oomi.""" from oomi.oomi_downloader import OomiDownloader, OomiConfig
[ 37811, 18274, 2410, 329, 22023, 401, 16345, 1159, 1366, 422, 440, 12753, 526, 15931, 198, 198, 6738, 267, 12753, 13, 4207, 72, 62, 15002, 263, 1330, 440, 12753, 10002, 263, 11, 440, 12753, 16934, 198 ]
3.457143
35
## @file # This file is used to define class objects of INF file miscellaneous. # Include BootMode/HOB/Event and others. It will consumed by InfParser. # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent ''' InfMisc ''' import Logger.Log as Logger from Logger import ToolError from Library import DataType as DT from Object.Parser.InfCommonObject import InfSectionCommonDef from Library.Misc import Sdict ## # BootModeObject # ## # EventObject # ## # HobObject # ## # InfSpecialCommentObject # ## ErrorInInf # # An encapsulate of Error for INF parser. # def ErrorInInf(Message=None, ErrorCode=None, LineInfo=None, RaiseError=True): if ErrorCode is None: ErrorCode = ToolError.FORMAT_INVALID if LineInfo is None: LineInfo = ['', -1, ''] Logger.Error("InfParser", ErrorCode, Message=Message, File=LineInfo[0], Line=LineInfo[1], ExtraData=LineInfo[2], RaiseError=RaiseError)
[ 2235, 2488, 7753, 198, 2, 770, 2393, 318, 973, 284, 8160, 1398, 5563, 286, 45594, 2393, 2984, 25673, 13, 198, 2, 40348, 18892, 19076, 14, 39, 9864, 14, 9237, 290, 1854, 13, 632, 481, 13529, 416, 4806, 46677, 13, 198, 2, 198, 2, 15...
2.487356
435
# https://www.codechef.com/START8C/problems/PENALTY for T in range(int(input())): n=list(map(int,input().split())) a=b=0 for i in range(len(n)): if(n[i]==1): if(i%2==0): a+=1 else: b+=1 if(a>b): print(1) elif(b>a): print(2) else: print(0)
[ 2, 3740, 1378, 2503, 13, 19815, 721, 258, 69, 13, 785, 14, 2257, 7227, 23, 34, 14, 1676, 22143, 14, 47, 1677, 1847, 9936, 198, 198, 1640, 309, 287, 2837, 7, 600, 7, 15414, 28955, 2599, 198, 220, 220, 220, 299, 28, 4868, 7, 8899,...
1.706897
174
import matplotlib.pyplot as plt from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 7326, 4241, 46912, 23114, 11, 10802, 62, 6759, 8609, 628 ]
3.642857
28
import torch import torchvision import torchvision.transforms as transforms import os.path BASE_DIR = os.path.dirname(os.path.abspath(__file__)) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) root = os.path.join(BASE_DIR, '../data/') trainset = torchvision.datasets.CIFAR10(root=root, train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root=root, train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, shuffle=False, num_workers=2) import torch.nn as nn import torch.nn.functional as F # torch.Size([1, 3, 32, 32]) # torch.Size([1, 6, 14, 14]) # torch.Size([1, 16, 5, 5]) # torch.Size([1, 400]) # torch.Size([1, 120]) # torch.Size([1, 84]) # torch.Size([1, 100]) model = Net() import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9) from util import train_eval train_eval(model, criterion, trainloader, testloader, optimizer, epochs=5) # [1, 5000] loss: 2.293 # [1, 10000] loss: 2.075 # [1, 15000] loss: 1.876 # [1, 20000] loss: 1.754 # [1, 25000] loss: 1.658 # [1, 30000] loss: 1.625 # [1, 35000] loss: 1.558 # [1, 40000] loss: 1.520 # [1, 45000] loss: 1.494 # [1, 50000] loss: 1.459 # 1/5 4456/10000 44.56% (107.18255376815796s) # [2, 5000] loss: 1.413 # [2, 10000] loss: 1.398 # [2, 15000] loss: 1.386 # [2, 20000] loss: 1.379 # [2, 25000] loss: 1.358 # [2, 30000] loss: 1.324 # [2, 35000] loss: 1.333 # [2, 40000] loss: 1.280 # [2, 45000] loss: 1.296 # [2, 50000] loss: 1.304 # 2/5 5357/10000 53.56999999999999% (105.8866639137268s) # [3, 5000] loss: 1.226 # [3, 10000] loss: 1.231 # [3, 15000] loss: 1.215 # [3, 20000] loss: 1.235 # [3, 25000] loss: 1.199 # [3, 30000] loss: 1.187 # [3, 35000] loss: 1.192 # [3, 40000] loss: 1.194 # [3, 45000] loss: 1.196 # [3, 50000] loss: 1.191 # 3/5 5729/10000 57.29% (105.63971090316772s) # [4, 5000] loss: 1.117 # [4, 10000] loss: 1.096 # [4, 15000] loss: 1.121 # [4, 20000] loss: 1.123 # [4, 25000] loss: 1.107 # [4, 30000] loss: 1.120 # [4, 35000] loss: 1.124 # [4, 40000] loss: 1.094 # [4, 45000] loss: 1.105 # [4, 50000] loss: 1.102 # 4/5 5829/10000 58.29% (112.56915497779846s) # [5, 5000] loss: 1.034 # [5, 10000] loss: 1.024 # [5, 15000] loss: 1.040 # [5, 20000] loss: 1.027 # [5, 25000] loss: 1.043 # [5, 30000] loss: 1.049 # [5, 35000] loss: 1.024 # [5, 40000] loss: 1.042 # [5, 45000] loss: 1.027 # [5, 50000] loss: 1.027 # 5/5 6178/10000 61.78% (109.75669193267822s) # 61.0% (541.0347754955292s)
[ 11748, 28034, 198, 11748, 28034, 10178, 198, 11748, 28034, 10178, 13, 7645, 23914, 355, 31408, 198, 11748, 28686, 13, 6978, 198, 33, 11159, 62, 34720, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 834...
2.052784
1,383
from ... import neodys
[ 198, 6738, 2644, 1330, 497, 375, 893, 628 ]
3.125
8
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-05-08 19:56 from __future__ import unicode_literals from __future__ import absolute_import from django.db import migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 1485, 319, 2864, 12, 2713, 12, 2919, 678, 25, 3980, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.893939
66
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Evaluation script for RL agents. Example invocation: python -m tensor2tensor.rl.evaluator \ --policy_dir=$HOME/t2t/rl_v1/policy \ --eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \ --hparams_set=rlmb_base \ --hparams='batch_size=64' """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensor2tensor.data_generators import gym_env from tensor2tensor.layers import common_video from tensor2tensor.models.research import rl # pylint: disable=unused-import from tensor2tensor.rl import rl_utils from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import from tensor2tensor.utils import registry from tensor2tensor.utils import trainer_lib import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.") flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.") flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.") flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.") flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.") flags.DEFINE_string( "eval_metrics_dir", "", "Directory to output the eval metrics at." ) flags.DEFINE_bool("full_eval", True, "Whether to ignore the timestep limit.") flags.DEFINE_enum( "agent", "policy", ["random", "policy", "planner"], "Agent type to use." ) flags.DEFINE_bool( "eval_with_learner", True, "Whether to use the PolicyLearner.evaluate function instead of an " "out-of-graph one. Works only with --agent=policy." ) flags.DEFINE_string( "planner_hparams_set", "planner_small", "Planner hparam set." ) flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.") flags.DEFINE_integer( "log_every_steps", 20, "Log every how many environment steps." ) flags.DEFINE_string( "debug_video_path", "", "Path to save the planner debug video at." ) # Unused flags needed to pass for multi-run infrastructure. flags.DEFINE_bool("autotune", False, "Unused here.") flags.DEFINE_string("objective", "", "Unused here.") flags.DEFINE_string("client_handle", "client_0", "Unused.") flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.") flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.") def make_env(env_type, real_env, sim_env_kwargs): """Factory function for envs.""" return { "real": lambda: real_env.new_like( # pylint: disable=g-long-lambda batch_size=sim_env_kwargs["batch_size"], store_rollouts=False, ), "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda **sim_env_kwargs ), }[env_type]() def make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs=None, frame_stack_size=None, planning_horizon=None, rollout_agent_type=None, batch_size=None, num_rollouts=None, inner_batch_size=None, video_writer=None, env_type=None): """Factory function for Agents.""" if batch_size is None: batch_size = env.batch_size return { "random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space ), "policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space, policy_hparams, policy_dir, sampling_temp ), "planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda batch_size, make_agent( rollout_agent_type, env, policy_hparams, policy_dir, sampling_temp, batch_size=inner_batch_size ), make_env(env_type, env.env, sim_env_kwargs), lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size), num_rollouts, planning_horizon, discount_factor=policy_hparams.gae_gamma, video_writer=video_writer ), }[agent_type]() def make_eval_fn_with_agent( agent_type, planner_hparams, model_dir, log_every_steps=None, video_writer=None ): """Returns an out-of-graph eval_fn using the Agent API.""" def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp): """Eval function.""" base_env = env env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size) sim_env_kwargs = rl.make_simulated_env_kwargs( base_env, loop_hparams, batch_size=planner_hparams.batch_size, model_dir=model_dir ) agent = make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs, loop_hparams.frame_stack_size, planner_hparams.planning_horizon, planner_hparams.rollout_agent_type, num_rollouts=planner_hparams.num_rollouts, inner_batch_size=planner_hparams.batch_size, video_writer=video_writer, env_type=planner_hparams.env_type ) rl_utils.run_rollouts( env, agent, env.reset(), log_every_steps=log_every_steps ) assert len(base_env.current_epoch_rollouts()) == env.batch_size return eval_fn def evaluate( loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, agent_type, eval_with_learner, log_every_steps, debug_video_path, report_fn=None, report_metric=None ): """Evaluate.""" if eval_with_learner: assert agent_type == "policy" if report_fn: assert report_metric is not None eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir) video_writer = None kwargs = {} if not eval_with_learner: if debug_video_path: video_writer = common_video.WholeVideoWriter( fps=10, output_path=debug_video_path, file_format="avi") kwargs["eval_fn"] = make_eval_fn_with_agent( agent_type, planner_hparams, model_dir, log_every_steps=log_every_steps, video_writer=video_writer ) eval_metrics = rl_utils.evaluate_all_configs( loop_hparams, policy_dir, **kwargs ) rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0) if video_writer is not None: video_writer.finish_to_disk() # Report metrics if report_fn: if report_metric == "mean_reward": metric_name = rl_utils.get_metric_name( sampling_temp=loop_hparams.eval_sampling_temps[0], max_num_noops=loop_hparams.eval_max_num_noops, clipped=False ) report_fn(eval_metrics[metric_name], 0) else: report_fn(eval_metrics[report_metric], 0) return eval_metrics def get_game_for_worker(map_name, directory_id): """Get game for the given worker (directory) id.""" if map_name == "v100unfriendly": games = ["chopper_command", "boxing", "asterix", "seaquest"] worker_per_game = 5 elif map_name == "human_nice": games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE worker_per_game = 5 else: raise ValueError("Unknown worker to game map name: %s" % map_name) games.sort() game_id = (directory_id - 1) // worker_per_game tf.logging.info("Getting game %d from %s." % (game_id, games)) return games[game_id] def main(_): now = datetime.datetime.now() now_tag = now.strftime("%Y_%m_%d_%H_%M") loop_hparams = trainer_lib.create_hparams( FLAGS.loop_hparams_set, FLAGS.loop_hparams ) if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1: loop_hparams.game = get_game_for_worker( FLAGS.worker_to_game_map, FLAGS.worker_id + 1) tf.logging.info("Set game to %s." % loop_hparams.game) if FLAGS.full_eval: loop_hparams.eval_rl_env_max_episode_steps = -1 planner_hparams = trainer_lib.create_hparams( FLAGS.planner_hparams_set, FLAGS.planner_hparams ) policy_dir = FLAGS.policy_dir model_dir = FLAGS.model_dir eval_metrics_dir = FLAGS.eval_metrics_dir if FLAGS.output_dir: cur_dir = FLAGS.output_dir if FLAGS.total_num_workers > 1: cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1)) policy_dir = os.path.join(cur_dir, "policy") model_dir = os.path.join(cur_dir, "world_model") eval_metrics_dir = os.path.join(cur_dir, "evaluator_" + now_tag) tf.logging.info("Writing metrics to %s." % eval_metrics_dir) if not tf.gfile.Exists(eval_metrics_dir): tf.gfile.MkDir(eval_metrics_dir) evaluate( loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, FLAGS.agent, FLAGS.eval_with_learner, FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None, debug_video_path=FLAGS.debug_video_path ) if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) tf.app.run()
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 2864, 383, 309, 22854, 17, 51, 22854, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2...
2.531783
3,697
# Generated by Selenium IDE import pytest import time import json from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
[ 2, 2980, 515, 416, 15300, 47477, 33497, 198, 11748, 12972, 9288, 198, 11748, 640, 198, 11748, 33918, 198, 6738, 384, 11925, 1505, 1330, 3992, 26230, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11321, 13, 1525, 1330, 2750, 198, 67...
3.566929
127
from django.contrib import admin from grandchallenge.components.models import ( ComponentInterface, ComponentInterfaceValue, ) admin.site.register(ComponentInterface, ComponentInterfaceAdmin) admin.site.register(ComponentInterfaceValue, ComponentInterfaceValueAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 198, 6738, 4490, 36747, 3540, 13, 5589, 3906, 13, 27530, 1330, 357, 198, 220, 220, 220, 35100, 39317, 11, 198, 220, 220, 220, 35100, 39317, 11395, 11, 198, 8, 628, 628, 198, 28482, ...
3.943662
71
# -*- coding: utf-8 -*- """ Created on Mon Mar 28 15:28:24 2016 @author: Parag Guruji, paragguruji@gmail.com """ from .helpers import setup_env done = setup_env()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 2892, 1526, 2579, 1315, 25, 2078, 25, 1731, 1584, 198, 198, 31, 9800, 25, 2547, 363, 38749, 7285, 11, 1582, 9460, 14717, 7285, 31, 14816, 13, 7...
2.538462
65
"""Discover Nanoleaf Aurora devices.""" from . import MDNSDiscoverable
[ 37811, 44596, 18008, 2305, 1878, 22218, 4410, 526, 15931, 198, 6738, 764, 1330, 10670, 8035, 44596, 540, 628 ]
4
18
import os.path as op import numpy as np import pandas as pd from sklearn.pipeline import make_pipeline from sklearn.linear_model import RidgeCV from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold, cross_val_score import mne from pyriemann.tangentspace import TangentSpace import config_drago as cfg meg = 'mag' scale = 1e22 rank = 65 reg = 1e-6 seed = 42 n_jobs = 10 cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed) file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5') covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch) info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item() picks = mne.pick_types(info, meg=meg) covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg) X = proj_covs_ts(covs) X = X.reshape(len(X), -1) info = pd.read_csv(op.join(cfg.path_data, 'participants.csv')) subjects = [d['subject'] for d in covs_allch if 'subject' in d] y = info.set_index('Observations').age.loc[subjects] ridge = make_pipeline(StandardScaler(), RidgeCV(alphas=np.logspace(-3, 5, 100))) score = - cross_val_score(ridge, X, y, cv=cv, scoring="neg_mean_absolute_error", n_jobs=n_jobs, verbose=True)
[ 11748, 28686, 13, 6978, 355, 1034, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 1341, 35720, 13, 79, 541, 4470, 1330, 787, 62, 79, 541, 4470, 198, 6738, 1341, 35720, 13, 29127, 62, 19849, ...
2.229706
579
# Copyright 2017~ mengalong <alongmeng@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import daiquiri from six.moves.urllib import parse as urlparse from stevedore import driver logger = daiquiri.getLogger(__name__)
[ 2, 15069, 2177, 93, 1450, 13528, 506, 1279, 24176, 76, 1516, 31, 14816, 13, 785, 29, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845,...
3.548544
206
MATH_BYTECODE = ( "606060405261022e806100126000396000f360606040523615610074576000357c01000000000000" "000000000000000000000000000000000000000000009004806316216f391461007657806361bc22" "1a146100995780637cf5dab0146100bc578063a5f3c23b146100e8578063d09de08a1461011d5780" "63dcf537b11461014057610074565b005b610083600480505061016c565b60405180828152602001" "91505060405180910390f35b6100a6600480505061017f565b604051808281526020019150506040" "5180910390f35b6100d26004808035906020019091905050610188565b6040518082815260200191" "505060405180910390f35b61010760048080359060200190919080359060200190919050506101ea" "565b6040518082815260200191505060405180910390f35b61012a6004805050610201565b604051" "8082815260200191505060405180910390f35b610156600480803590602001909190505061021756" "5b6040518082815260200191505060405180910390f35b6000600d9050805080905061017c565b90" "565b60006000505481565b6000816000600082828250540192505081905550600060005054905080" "507f3496c3ede4ec3ab3686712aa1c238593ea6a42df83f98a5ec7df9834cfa577c5816040518082" "815260200191505060405180910390a18090506101e5565b919050565b6000818301905080508090" "506101fb565b92915050565b600061020d6001610188565b9050610214565b90565b600060078202" "90508050809050610229565b91905056" ) MATH_ABI = [ { "constant": False, "inputs": [], "name": "return13", "outputs": [ {"name": "result", "type": "int256"}, ], "type": "function", }, { "constant": True, "inputs": [], "name": "counter", "outputs": [ {"name": "", "type": "uint256"}, ], "type": "function", }, { "constant": False, "inputs": [ {"name": "amt", "type": "uint256"}, ], "name": "increment", "outputs": [ {"name": "result", "type": "uint256"}, ], "type": "function", }, { "constant": False, "inputs": [ {"name": "a", "type": "int256"}, {"name": "b", "type": "int256"}, ], "name": "add", "outputs": [ {"name": "result", "type": "int256"}, ], "type": "function", }, { "constant": False, "inputs": [], "name": "increment", "outputs": [ {"name": "", "type": "uint256"}, ], "type": "function" }, { "constant": False, "inputs": [ {"name": "a", "type": "int256"}, ], "name": "multiply7", "outputs": [ {"name": "result", "type": "int256"}, ], "type": "function", }, { "anonymous": False, "inputs": [ {"indexed": False, "name": "value", "type": "uint256"}, ], "name": "Increased", "type": "event", }, ]
[ 201, 198, 44, 12599, 62, 17513, 51, 2943, 16820, 796, 357, 201, 198, 220, 220, 220, 366, 1899, 1899, 1899, 26598, 2075, 940, 1828, 68, 37988, 3064, 19420, 830, 34107, 830, 69, 15277, 1899, 1899, 26598, 24940, 21599, 44318, 2231, 4304, ...
1.821297
1,634
# -*- coding: utf-8 -*- import os import sys from datetime import timedelta from oslo.config import cfg CONF = cfg.CONF CONF.register_opts([ cfg.StrOpt('log-dir'), cfg.StrOpt('log-file'), cfg.StrOpt('debug'), cfg.StrOpt('verbose'), ], 'log') CONF.register_opts([ cfg.StrOpt('connection'), cfg.StrOpt('data'), ], 'DB') CONF.register_opts([ cfg.StrOpt('server'), cfg.StrOpt('port'), cfg.StrOpt('from_addr'), cfg.StrOpt('info_list'), cfg.StrOpt('alert_list'), ], 'MAIL') CONF.register_opts([ cfg.StrOpt('allow_ip'), cfg.StrOpt('secret_key'), cfg.StrOpt('env'), cfg.StrOpt('local_group'), cfg.StrOpt('acl_dir'), cfg.StrOpt('view_acl_group') ], 'etc') CONF.register_opts([ cfg.IntOpt('dnsupdater_port'), ], 'api') CONF.register_opts([ cfg.StrOpt('acl_groups'), cfg.IntOpt('cname_ttl'), cfg.StrOpt('view_zone') ], 'view') CONF.register_opts([ cfg.StrOpt('base-url', default='/', help='The url prefix of this site.'), cfg.StrOpt('run-mode', default="werkzeug", choices=('gunicorn', 'werkzeug'), help="Run server use the specify mode."), cfg.StrOpt('bind', default='0.0.0.0', help='The IP address to bind'), cfg.IntOpt('port', default=8080, help='The port to listen'), cfg.BoolOpt('debug', default=False), ], 'web') CONF.register_opts([ cfg.StrOpt('config', default=None, help='The path to a Gunicorn config file.'), cfg.StrOpt('bind', default='127.0.0.1:8888'), cfg.IntOpt('workers', default=0, help='The number of worker processes for handling requests'), cfg.BoolOpt('daemon', default=False, help='Daemonize the Gunicorn process'), cfg.StrOpt('accesslog', default=None, help='The Access log file to write to.' '"-" means log to stderr.'), cfg.StrOpt('loglevel', default='info', help='The granularity of Error log outputs.', choices=('debug', 'info', 'warning', 'error', 'critical')), cfg.BoolOpt('ignore-healthcheck-accesslog', default=False), cfg.IntOpt('timeout', default=30, help='Workers silent for more than this many seconds are ' 'killed and restarted.'), cfg.StrOpt('worker-class', default='sync', help='The type of workers to use.', choices=('sync', 'eventlet', 'gevent', 'tornado')) ], 'gunicorn')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 6738, 28686, 5439, 13, 11250, 1330, 30218, 70, 198, 198, 10943, 37, 796, 30218, ...
1.988304
1,368
from __future__ import print_function import time from rover import Robot from connections import Connections
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 640, 198, 6738, 35761, 1330, 16071, 198, 6738, 8787, 1330, 8113, 507, 628, 197, 198, 197, 197, 198 ]
4.142857
28
# # File: $Id: parser.py 1865 2008-10-28 00:47:27Z scanner $ # """ This is where the logic and definition of our wiki markup parser lives. We use the Python Creoleparser (which requires Genshi) We make a custom dialect so that the parser can know the URL base for all of the topics (pages) in the wiki and some additional goop so that we can tell what other topics a given topic refers to. """ # system imports # from urllib import quote from urlparse import urlparse try: import threading except ImportError: import dummy_threading as threading # Django imports # from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ # 3rd party imports # from creoleparser.dialects import create_dialect, creole10_base, creole11_base from creoleparser.core import Parser from genshi import builder # We see if we have the 'typogrify' app installed. If we do we will # use it for rendering our templates to prettify them a bit. # try: from typogrify.templatetags.typogrify import typogrify except ImportError: # Model imports # from aswiki.models import Topic ############################################################################ ############################################################################ # ############################################################################ # def class_fn(topic_name): """ This function is invoked by the markup dialect every time it encounters a wiki topic. It returns a string that is the css class name to add to wiki links as they are turned in to proper <a href></a> links. We use this as a way to annotate topics that do not exist yet with some graphical attribute so that users can easily tell which topics are not yet created. We use the wiki.models.TopicManager's css_class_name method to do this lookup. NOTE: Since this module is imported by the wiki.models module we need to import that module inside here so that we can access the Topic model. This is cheap since it will already be imported. Arguments: - `topic_name`: the topic name being checked for existence. """ # XXX This is where we should do a cache lookup of the topic name # and only if that fails fall back to # Topic.objects.css_class_name(topic_name) # return Topic.objects.css_class_name(topic_name) #################################################################### # def output_mailto(arg_string): """ Given the arguments of an anchor macro output the proper genshi stream that will render a mailto link. We also need to support the magic argument string format of '<you> AT <word> AT <foo> DOT <foo>' Arguments: - `arg_string`: The argument string of the anchor macro. - `macro_body`: The macro body if provided - `block_type`: True if this is a block macro. """ # XXX Need to support the fancy format.. but for now just get the basic # working. return builder.tag.a(arg_string, href="mailto:%s" % arg_string) #################################################################### # def output_subtopics(arg_string): """ This will take a single string as its input. It will find all topics for which the string as a topic name is the parent topic. There is some semantic magic in a topic if it contains periods, ie: the '.' character. This forms a kind of hierarchy. Loosely speaking all topics that start with the same prefix, separated by '.' are sub-topics. So: 2007.Agenda is a sub-topic of 2007. 2007.Agenda.foo is a subtopic of 2007 and 2007.Agenda. This macro will insert in to the output <ul> of the topics that are proper subtopics of the given string, ordered by name. So in the above example if I were to say <<subtopics 2007>> it would give me "2007.Agenda" and "2007.Agenda.foo" in a <ul> If the arg string ends with a dot, then it is treated as the separator. ie: <<subtopics 2007.>> and <<subtopics 2007>> are identical. Arguments: - `arg_string`: The topic we want to find all subtopics of. """ arg_string = arg_string if arg_string[-1] != '.': arg_string = arg_string + "." topics = Topic.objects.filter(lc_name__istartswith = arg_string.lower()).order_by('lc_name') if topics.count() == 0: return None ul = builder.tag.ul() # For every topic that matches our pattern we insert a 'li' link # to that topic in our output. We also add this topic to the # 'extra_references' list in our global TOPIC_LIST object. This is # so that the prerender../save() methods of the Topic object we are # rendering this output for can know to add those topics to the list # of topics referenced by the topic being rendered. for topic in topics: TOPIC_LIST.extra_references.append(topic) ul.append(builder.tag.li(builder.tag.a(topic.name, href = topic.get_absolute_url()))) return ul #################################################################### # def output_attachments(arg_string): """ Returns a <ul> of all of the attachments attached to the topic name given as the arg_string. Arguments: - `arg_string`: Expected to be the name of a topic. If no such topic exist, then no attachment list is generated. """ try: topic = Topic.objects.get(lc_name = arg_string.lower()) except Topic.DoesNotExist: return None ul = builder.tag.ul() # For every file attachment on this topic, add a 'li' link # to that attachment. # for attachment in topic.file_attachments.all(): ul.append(builder.tag.li(builder.tag.a(attachment.basename(), href = attachment.get_absolute_url()))) return ul #################################################################### # def macro_fn(name, arg_string, macro_body, block_type, environ): """ Handles the macros we define for our version of markup. Arguments: - `name`: The name of the macro - `arg_string`: The argument string, including any delimiters - `macro_body`: The macro body, None for macro with no body. - `block_type`: True for block type macros. - `environ` : The environment object, passed through from creoleparser.core.Parser class's 'parse()' method. """ name = name.strip().lower() arg_string = arg_string.strip() if name == 'anchor': if block_type: return builder.tag.a(macro_body, name = arg_string) else: return builder.tag.a(name = arg_string) elif name == 'mailto': return output_mailto(arg_string) elif name == 'gettext': if block_type: return _(macro_body) else: return _(arg_string) elif name == 'subtopics': return output_subtopics(arg_string) elif name == 'attachlist': return output_attachments(arg_string) elif name == 'attachment': # For including downloadable attachments in a wiki document. if block_type: return builder.tag.a(macro_body, href=arg_string) else: return builder.tag.a(arg_string, href=arg_string) return None ## ## Create our custom dialect. It will use our class function and a TopicList ## instance. The root URL for all wiki topics will be the same as the ## 'aswiki_topic_index' url. ## ## NOTE: This assumes that the url for a specific Topic is the same as the url ## for the aswiki_topic_index with the Topic name appended to it ## TOPIC_LIST = TopicList() # dialect = creoleparser.dialects.Creole10( # wiki_links_base_url = reverse('aswiki_topic_index'), # wiki_links_space_char = '%20', # use_additions = True, # no_wiki_monospace = False, # wiki_links_class_func = class_fn, # wiki_links_path_func = TOPIC_LIST.path_fn, # macro_func = macro_fn, # interwiki_links_base_urls=dict(wikicreole='http://wikicreole.org/wiki/', # wikipedia='http://wikipedia.org/wiki/',) # ) parser = Parser(dialect = create_dialect(\ creole11_base, wiki_links_base_url = reverse('aswiki_topic_index'), # NOTE: Make this # a two element # list for images # to be loaded # from a separate # URL wiki_links_space_char = '%20', # NOTE: make this a two element list to # give images a different space # character. no_wiki_monospace = False, wiki_links_class_func = class_fn, wiki_links_path_func = (TOPIC_LIST.path_fn, TOPIC_LIST.image_fn), bodied_macros = { }, non_bodied_macros = { }, macro_func = macro_fn, # custom_markup = (), interwiki_links_base_urls = { 'wikicreole' : 'http://wikicreole.org/wiki/', 'wikipedia' :'http://wikipedia.org/wiki/' } ))
[ 2, 198, 2, 9220, 25, 720, 7390, 25, 30751, 13, 9078, 47801, 3648, 12, 940, 12, 2078, 3571, 25, 2857, 25, 1983, 57, 27474, 720, 198, 2, 198, 37811, 198, 1212, 318, 810, 262, 9156, 290, 6770, 286, 674, 22719, 41485, 30751, 3160, 13,...
2.616695
3,582
import base64 import binascii from datetime import timedelta from django.contrib.auth import authenticate from django.utils import timezone from oauthlib.oauth2 import RequestValidator from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication from oauth_api.settings import oauth_api_settings GRANT_TYPE_MAPPING = { 'authorization_code': (AbstractApplication.GRANT_AUTHORIZATION_CODE,), 'password': (AbstractApplication.GRANT_PASSWORD,), 'client_credentials': (AbstractApplication.GRANT_CLIENT_CREDENTIALS,), 'refresh_token': (AbstractApplication.GRANT_AUTHORIZATION_CODE, AbstractApplication.GRANT_PASSWORD, AbstractApplication.GRANT_CLIENT_CREDENTIALS) }
[ 11748, 2779, 2414, 198, 11748, 9874, 292, 979, 72, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 8323, 5344, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198, 198, 6738...
2.96875
256
x = [1, 2, 3] y = modify(x) print("x == y", x == y) print("x == y", x is y)
[ 87, 796, 685, 16, 11, 362, 11, 513, 60, 201, 198, 88, 796, 13096, 7, 87, 8, 201, 198, 4798, 7203, 87, 6624, 331, 1600, 2124, 6624, 331, 8, 201, 198, 4798, 7203, 87, 6624, 331, 1600, 2124, 318, 331, 8 ]
1.902439
41
import sys, os import tarfile import shutil from edx_gen import _edx_consts from edx_gen import _read_metadata from edx_gen import _write_structure from edx_gen import _write_comps from edx_gen import _write_comp_html from edx_gen import _write_comp_checkboxes from edx_gen import _write_comp_video from edx_gen import _xml_google_doc from edx_gen import _markdown from edx_gen import _util import __SETTINGS__ #-------------------------------------------------------------------------------------------------- # Text strings WARNING = " WARNING:" #-------------------------------------------------------------------------------------------------- # write to either units folder or problems folder, depending on the type #-------------------------------------------------------------------------------------------------- # write to either units folder or problems folder, depending on the type #--------------------------------------------------------------------------------------------------
[ 11748, 25064, 11, 28686, 198, 11748, 13422, 7753, 198, 11748, 4423, 346, 198, 6738, 1225, 87, 62, 5235, 1330, 220, 4808, 276, 87, 62, 1102, 6448, 198, 6738, 1225, 87, 62, 5235, 1330, 220, 4808, 961, 62, 38993, 198, 6738, 1225, 87, 6...
4.625571
219
# 1. Create students score dictionary. students_score = {} # 2. Input student's name and check if input is correct. (Alphabet, period, and blank only.) # 2.1 Creat a function that evaluate the validity of name. while True: # 2.2 Input student's name. name = input("Please input student's name. \n") check_name(name) # 2.3 Check if the name is alphabet. If not, ask to input correct name again. while check_name(name) != True: name = input("Please input student's name. (Alphabet and period only.)\n") # 3. Input student's score and check if input is correct. (digits only and between zero and 100) score = input(f"Please input {name}'s score.(0 ~ 100)\n") while score.isdigit() == False or int(score) not in range(0, 101): score = input("Please input valid numbers only.(Number from zero to 100.)\n") students_score[name] = score # 4. Ask another student's information. another_student = input( "Do you want to input another student's information as well? (Y/N)\n" ) while another_student.lower() not in ("yes", "y", "n", "no"): # 4.1 Check if the input is valid. another_student = input("Please input Y/N only.\n") if another_student.lower() in ("yes", "y"): continue elif another_student.lower() in ("no", "n"): break for student in students_score: score = students_score[student] score = int(score) if score >= 90: students_score[student] = "A" elif score in range(70, 90): students_score[student] = "B" elif score in range(50, 70): students_score[student] = "C" elif score in range(40, 50): students_score[student] = "D" else: students_score[student] = "F" print(students_score)
[ 2, 352, 13, 13610, 2444, 4776, 22155, 13, 198, 19149, 658, 62, 26675, 796, 23884, 198, 198, 2, 362, 13, 23412, 3710, 338, 1438, 290, 2198, 611, 5128, 318, 3376, 13, 357, 2348, 19557, 11, 2278, 11, 290, 9178, 691, 2014, 198, 198, 2...
2.687783
663
import unittest from recipe import utils
[ 11748, 555, 715, 395, 201, 198, 201, 198, 6738, 8364, 1330, 3384, 4487, 201, 198, 201, 198 ]
2.764706
17
import numpy as np import cv2 import os.path as osp import json from human_body_prior.tools.model_loader import load_vposer import torch vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/' def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32): ''' Calculates the rotation matrices for a batch of rotation vectors Parameters ---------- rot_vecs: torch.tensor Nx3 array of N axis-angle vectors Returns ------- R: torch.tensor Nx3x3 The rotation matrices for the given axis-angle parameters ''' batch_size = rot_vecs.shape[0] device = rot_vecs.device angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) rot_dir = rot_vecs / angle cos = torch.unsqueeze(torch.cos(angle), dim=1) sin = torch.unsqueeze(torch.sin(angle), dim=1) # Bx1 arrays rx, ry, rz = torch.split(rot_dir, 1, dim=1) K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ .view((batch_size, 3, 3)) ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) return rot_mat
[ 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 198, 11748, 28686, 13, 6978, 355, 267, 2777, 198, 11748, 33918, 198, 198, 6738, 1692, 62, 2618, 62, 3448, 273, 13, 31391, 13, 19849, 62, 29356, 1330, 3440, 62, 85, 1930, 263, 1...
2.2064
625
import docker from dockerfile_generator import render import os import json from tqdm import tqdm from typing import Union, Any, Optional def build_image(repo_url: str, tag: str, path: str) -> None: """ build_image builds the image with the given tag """ client = docker.from_env() print(f"Building image: {tag}") client.images.build(tag=tag, path=path) print("Successfully built image!") def push_image(tag: str) -> None: """ push_image pushes the given tag. It uses the current docker environment """ client = docker.from_env() print(f"Pushing image: {tag}") with tqdm(total=100, ascii=False) as progress_bar: last_percent = 0.0 for line in client.images.push(tag, stream=True): percent = get_completion_percentage(line) if percent: progress_bar.update(percent - last_percent) last_percent = percent def build_and_push_image(repo_url: str, tag: str, path: str, image_type: str) -> None: """ build_and_push_operator creates the Dockerfile for the operator and pushes it to the target repo """ dockerfile_text = render(image_type, ["."]) with open(f"{path}/Dockerfile", "w") as f: f.write(dockerfile_text) build_image(repo_url, tag, path) os.remove(f"{path}/Dockerfile") push_image(tag)
[ 11748, 36253, 198, 6738, 36253, 7753, 62, 8612, 1352, 1330, 8543, 198, 11748, 28686, 198, 11748, 33918, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 198, 6738, 19720, 1330, 4479, 11, 4377, 11, 32233, 628, 198, 4299, 1382, 62, ...
2.511927
545
from VcfFilter import VcfFilter import argparse import os #get command line arguments parser = argparse.ArgumentParser(description='Script to select a certain variant type from a VCF file') #parameters parser.add_argument('--bcftools_folder', type=str, required=True, help='Folder containing the Bcftools binary' ) parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' ) parser.add_argument('--type', type=str, required=False, help='Type of variant to select. i.e. snps/indels etc' ) args = parser.parse_args() if __name__ == '__main__': vcf_f=VcfFilter(vcf=args.filename,bcftools_folder=args.bcftools_folder) vcf_f.filter_by_variant_type(type=args.type)
[ 6738, 569, 12993, 22417, 1330, 569, 12993, 22417, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 198, 2, 1136, 3141, 1627, 7159, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 11213, 11639, 7391, 284, 2922, 257, 17...
2.973684
304
import unittest from test import support import base64 import binascii import os import sys import subprocess def test_main(): support.run_unittest(__name__) if __name__ == '__main__': test_main()
[ 11748, 555, 715, 395, 198, 6738, 1332, 1330, 1104, 198, 11748, 2779, 2414, 198, 11748, 9874, 292, 979, 72, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 850, 14681, 628, 628, 628, 628, 198, 4299, 1332, 62, 12417, 33529, 198, 220, ...
2.815789
76
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-02-25 20:32 from __future__ import unicode_literals from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 1959, 319, 33448, 12, 2999, 12, 1495, 1160, 25, 2624, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198...
2.754386
57
"""Retrieve and request tweets from the DS API""" import requests import spacy from .models import DB, Tweet, User nlp = spacy.load("my_model") # Add and updates tweets def add_or_update_user(username): """Adds and updates the user with twiter handle 'username' to our database """ #TODO: Figure out try: r = requests.get( f"https://lambda-ds-twit-assist.herokuapp.com/user/{username}") user = r.json() user_id = user["twitter_handle"]["id"] # print(user) # This is either respectively grabs or creates a user for our db db_user = (User.query.get(user_id)) or User(id=user_id, name=username) # This adds the db_user to our database DB.session.add(db_user) tweets = user["tweets"] # if tweets: # db_user.newest_tweet_id = tweets[0].id for tweet in tweets: tweet_vector = vectorize_tweet(tweet["full_text"]) tweet_id = tweet["id"] db_tweet = (Tweet.query.get(tweet_id)) or Tweet( id=tweet["id"], text=tweet["full_text"], vect=tweet_vector) db_user.tweets.append(db_tweet) DB.session.add(db_tweet) except Exception as e: print("Error processing {}: {}".format(username, e)) raise e else: DB.session.commit()
[ 37811, 9781, 30227, 290, 2581, 12665, 422, 262, 17400, 7824, 37811, 198, 11748, 7007, 198, 11748, 599, 1590, 198, 6738, 764, 27530, 1330, 20137, 11, 18752, 11, 11787, 628, 198, 21283, 79, 796, 599, 1590, 13, 2220, 7203, 1820, 62, 19849,...
2.226384
614
import io grid = {} y = 0 x = 0 for l in io.open("day22.in").read().splitlines(): for x in range(len(l)): grid[(y,x)] = l[x] y += 1 y = y // 2 x = x // 2 dx = 0 dy = -1 r = 0 for iter in range(10000000): if (y,x) not in grid or grid[(y,x)] == '.': (dy, dx) = (-dx, dy) grid[(y,x)] = 'W' elif grid[(y,x)] == 'W': grid[(y,x)] = '#' r += 1 elif grid[(y,x)] == '#': (dy, dx) = (dx, -dy) grid[(y,x)] = 'F' elif grid[(y,x)] == 'F': (dy, dx) = (-dy, -dx) grid[(y,x)] = '.' y += dy x += dx print(r)
[ 11748, 33245, 198, 198, 25928, 796, 23884, 198, 88, 796, 657, 198, 87, 796, 657, 198, 1640, 300, 287, 33245, 13, 9654, 7203, 820, 1828, 13, 259, 11074, 961, 22446, 35312, 6615, 33529, 198, 220, 220, 220, 329, 2124, 287, 2837, 7, 119...
1.70977
348
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous from functools import wraps _COMPLEX_PLOTTING_ERROR_MSG = """ Complex fields cannot be plotted. Use operators to get the amplitude or the result at a defined sweeping phase before plotting. """ _FIELD_CONTAINER_PLOTTING_MSG = """" This fields_container contains multiple fields. Only one time-step result can be plotted at a time. Extract a field with ``fields_container[index]``. """ def protect_grpc(func): """Capture gRPC exceptions and return a more succinct error message.""" return wrapper
[ 6738, 1036, 14751, 13557, 17620, 1330, 4808, 818, 5275, 49, 14751, 12331, 11, 4808, 29800, 16818, 276, 49, 41913, 31222, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 198, 62, 41335, 55, 62, 6489, 29089, 2751, 62, 24908, 62, 5653, 38, ...
3.470588
170
''' IO '''
[ 7061, 6, 198, 9399, 198, 7061, 6, 198 ]
1.375
8
from mumodo.mumodoIO import open_intervalframe_from_textgrid import numpy from deep_disfluency.utils.accuracy import wer final_file = open('wer_test.text', "w") ranges1 = [line.strip() for line in open( "/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASR_ranges.text")] ranges2 = [line.strip() for line in open( "/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASR_ranges.text")] for ranges in [ranges1, ranges2]: final_file.write("\n\n") for r in ranges: for s in ["A", "B"]: iframe = open_intervalframe_from_textgrid("{0}{1}.TextGrid" .format(r, s)) hyp = " ".join(iframe['Hyp']['text']) ref = " ".join(iframe['Ref']['text']) wer = wer(ref, hyp) cost = wer(ref, hyp, macro=True) print r, s, wer print>>final_file, r, s, wer, cost final_file.close() # Based on the results, output the 'good' ASR results results = open("wer_test.text") no_ho = 0 no_test = 0 ingood = True file = open("../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASRgood_ranges.text", "w") for l in results: # print l if l == "\n": print no_ho no_ho = 0 file.close() file = open( "../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASRgood_ranges.text", "w") continue if float(l.strip('\n').split(" ")[ 2]) < 0.4: # both speakers are under 40% error rate- likely half decent separation # print l if ingood and "B" in l.strip("\n").split(" ")[1]: no_ho += 1 #file.write(l.strip('\n').split(" ")[0]+l.strip('\n').split(" ")[1]+"\n") file.write(l.strip('\n').split(" ")[0] + "\n") ingood = True else: ingood = False print no_ho results.close() file.close()
[ 6738, 25682, 24313, 13, 76, 388, 24313, 9399, 1330, 1280, 62, 3849, 85, 1604, 28073, 62, 6738, 62, 5239, 25928, 198, 11748, 299, 32152, 198, 6738, 2769, 62, 67, 4468, 2290, 1387, 13, 26791, 13, 4134, 23843, 1330, 266, 263, 628, 198, ...
2.061538
1,040
from dataclasses import dataclass from dataclasses import field from time import time from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Tuple
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 4818, 330, 28958, 1330, 2214, 198, 6738, 640, 1330, 640, 198, 6738, 19720, 1330, 4377, 198, 6738, 19720, 1330, 4889, 540, 198, 6738, 19720, 1330, 360, 713, 198, 6738, 19720, 133...
4.339286
56
from .document_summary import definition as document_summary_definition from .organization_summary import definition as organization_summmary_definition definition = { "where": "?subj a foaf:Person .", "fields": { "name": { "where": "?subj rdfs:label ?obj ." }, #Contact info "email": { "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasEmail ?vce . ?vce a vcard:Email, vcard:Work . ?vce vcard:email ?obj . """ }, "telephone": { "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasTelephone ?vct . ?vct a vcard:Telephone . ?vct vcard:telephone ?obj . """ }, "address": { "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasAddress ?obj . """, "definition": { "where": "?subj a vcard:Address .", "fields": { "address": { "where": "?subj vcard:streetAddress ?obj ." }, "city": { "where": "?subj vcard:locality ?obj ." }, "state": { "where": "?subj vcard:region ?obj ." }, "zip": { "where": "?subj vcard:postalCode ?obj ." } } } }, "website": { "list": True, "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasURL ?vcu . ?vcu a vcard:URL . ?vcu vcard:url ?obj . """, "optional": True }, "researchArea": { "where": """ ?subj vivo:hasResearchArea ?ra . ?ra rdfs:label ?obj . """, "optional": True, "list": True }, "geographicFocus": { "where": """ ?subj vivo:geographicFocus ?gf . ?gf rdfs:label ?obj . """, "optional": True, "list": True }, "overview": { "where": "?subj vivo:overview ?obj .", "optional": True, }, "positions": { "where": "?subj vivo:relatedBy ?obj .", "definition": { "where": "?subj a vivo:Position .", "fields": { "title": { "where": "?subj rdfs:label ?obj ." }, "organization": { "where": "?subj vivo:relates ?obj .", "definition": organization_summmary_definition } } }, "optional": True, "list": True }, "publications": { "where": """ ?subj vivo:relatedBy ?aship . ?aship a vivo:Authorship . ?aship vivo:relates ?obj . """, "definition": document_summary_definition, "optional": True, "list": True } } }
[ 6738, 764, 22897, 62, 49736, 1330, 6770, 355, 3188, 62, 49736, 62, 46758, 198, 6738, 764, 9971, 1634, 62, 49736, 1330, 6770, 355, 4009, 62, 16345, 3020, 560, 62, 46758, 198, 198, 46758, 796, 1391, 198, 220, 220, 220, 366, 3003, 1298, ...
1.52788
2,457
from django.db.models import Q from hier.search import SearchResult from .models import app_name, Apart, Meter, Bill, Service, Price
[ 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 1195, 198, 6738, 13550, 13, 12947, 1330, 11140, 23004, 198, 6738, 764, 27530, 1330, 598, 62, 3672, 11, 22596, 11, 46423, 11, 3941, 11, 4809, 11, 7886, 198 ]
3.694444
36
import os import shutil import numpy as np from pyrevolve.custom_logging.logger import logger import sys
[ 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 12972, 18218, 6442, 13, 23144, 62, 6404, 2667, 13, 6404, 1362, 1330, 49706, 198, 11748, 25064, 628 ]
3.419355
31
#$Id$
[ 29953, 7390, 3, 628 ]
1.75
4
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup with open("README.md") as f: readme = f.read() setup( name="dpr", version="0.1.0", description="Facebook AI Research Open Domain Q&A Toolkit", url="https://github.com/facebookresearch/DPR/", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "setuptools>=18.0", ], install_requires=[ "cython", "faiss-cpu>=1.6.1", "filelock", "numpy", "regex", "torch>=1.2.0", "transformers>=3.0.0,<3.1.0", "tqdm>=4.27", "wget", "spacy>=2.1.8", ], )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, ...
2.286017
472
# encoding: utf-8 """ @version: v1.0 @author: Richard @license: Apache Licence @contact: billions.richard@qq.com @site: @software: PyCharm @time: 2019/9/12 20:37 """ from pprint import pprint as pp from operator import itemgetter import time from collections import OrderedDict from hard.smallest_range.srcs.big_2d_list import BIG_LIST_85 from hard.smallest_range.srcs.big_2d_list import BIG_LIST_86 if __name__ == '__main__': s = Solution() nums = [[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]] # nums = [[10], [11]] # nums = [[11,38,83, # 84,84,85,88,89,89,92],[28,61,89],[52,77,79,80,81],[21,25,26,26,26,27],[9,83,85,90],[84,85,87],[26,68,70,71],[36,40,41,42,45],[-34,21],[-28,-28,-23,1,13,21,28,37,37,38],[-74,1,2,22,33,35,43,45],[54,96,98,98,99],[43,54,60,65,71,75],[43,46],[50,50,58,67,69],[7,14,15],[78,80,89,89,90],[35,47,63,69,77,92,94]] # [-74, 1, 2, 22, 33, 35, 43, 45], [54, 96, 98, 98, 99], [43, 54, 60, 65, 71, 75], [43, 46], # [50, 50, 58, 67, 69], [7, 14, 15], [78, 80, 89, 89, 90], [35, 47, 63, 69, 77, 92, 94]] nums = BIG_LIST_85 # nums = BIG_LIST_86 min_range = s.smallestRange(nums) print(min_range)
[ 2, 21004, 25, 3384, 69, 12, 23, 220, 220, 198, 198, 37811, 220, 198, 31, 9641, 25, 410, 16, 13, 15, 220, 198, 31, 9800, 25, 6219, 198, 31, 43085, 25, 24843, 10483, 594, 220, 220, 198, 31, 32057, 25, 13188, 13, 7527, 446, 31, 3...
2.020134
596
from pcf.core.gcp_resource import GCPResource from pcf.core import State import logging from google.cloud import storage from google.cloud import exceptions logger = logging.getLogger(__name__)
[ 6738, 279, 12993, 13, 7295, 13, 70, 13155, 62, 31092, 1330, 20145, 4805, 274, 1668, 198, 6738, 279, 12993, 13, 7295, 1330, 1812, 198, 11748, 18931, 198, 6738, 23645, 13, 17721, 1330, 6143, 198, 6738, 23645, 13, 17721, 1330, 13269, 198, ...
3.45614
57
import numpy as np
[ 11748, 299, 32152, 355, 45941, 628 ]
3.333333
6
from __future__ import absolute_import import abc import os import json import glob import shutil from tensorflow.python.estimator import gc from tensorflow.python.estimator import util from tensorflow.python.estimator.canned import metric_keys from tensorflow.python.framework import errors_impl from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging from tensorflow.python.summary import summary_iterator from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter def _verify_compare_fn_args(compare_fn): """Verifies compare_fn arguments.""" args = set(util.fn_args(compare_fn)) if 'best_eval_result' not in args: raise ValueError( 'compare_fn (%s) must include best_eval_result argument.' % compare_fn) if 'current_eval_result' not in args: raise ValueError( 'compare_fn (%s) must include current_eval_result argument.' % compare_fn) non_valid_args = list(args - set(['best_eval_result', 'current_eval_result'])) if non_valid_args: raise ValueError('compare_fn (%s) has following not expected args: %s' % (compare_fn, non_valid_args)) def _loss_smaller(best_eval_result, current_eval_result): """Compares two evaluation results and returns true if the 2nd one is smaller. Both evaluation results should have the values for MetricKeys.LOSS, which are used for comparison. Args: best_eval_result: best eval metrics. current_eval_result: current eval metrics. Returns: True if the loss of current_eval_result is smaller; otherwise, False. Raises: ValueError: If input eval result is None or no loss is available. """ default_key = metric_keys.MetricKeys.LOSS if not best_eval_result or default_key not in best_eval_result: raise ValueError( 'best_eval_result cannot be empty or no loss is found in it.') if not current_eval_result or default_key not in current_eval_result: raise ValueError( 'current_eval_result cannot be empty or no loss is found in it.') return best_eval_result[default_key] > current_eval_result[default_key]
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 11748, 450, 66, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 15095, 198, 11748, 4423, 346, 198, 198, 6738, 11192, 273, 11125, 13, 29412, 13, 395, 320, 1352, 1330, 308, 66, ...
2.805063
790
# Copyright: 2005-2012 Brian Harring <ferringb@gmail.com # Copyright: 2006 Marien Zwart <marienz@gentoo.org> # License: BSD/GPL2 """ base restriction class """ from functools import partial from snakeoil import caching, klass from snakeoil.currying import pretty_docs def curry_node_type(cls, node_type, extradoc=None): """Helper function for creating restrictions of a certain type. This uses :obj:`partial` to pass a node_type to the wrapped class, and extends the docstring. :param cls: callable (usually a class) that is wrapped. :param node_type: value passed as node_type. :param extradoc: addition to the docstring. Defaults to "Automatically set to %s type." % node_type :return: a wrapped callable. """ if extradoc is None: extradoc = "Automatically set to %s type." % (node_type,) doc = cls.__doc__ result = partial(cls, node_type=node_type) if doc is None: doc = '' else: # do this so indentation on pydoc __doc__ is sane doc = "\n".join(line.lstrip() for line in doc.split("\n")) + "\n" doc += extradoc return pretty_docs(result, doc) value_type = "values" package_type = "package" valid_types = (value_type, package_type)
[ 2, 15069, 25, 5075, 12, 6999, 8403, 2113, 1806, 1279, 2232, 1806, 65, 31, 14816, 13, 785, 198, 2, 15069, 25, 4793, 1526, 2013, 1168, 24657, 1279, 3876, 2013, 89, 31, 6783, 2238, 13, 2398, 29, 198, 2, 13789, 25, 347, 10305, 14, 38,...
2.75
456
"""extend_ip_field Revision ID: 8da20383f6e1 Revises: eeb702f77d7d Create Date: 2021-01-14 10:50:56.275257 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "8da20383f6e1" down_revision = "eeb702f77d7d" branch_labels = None depends_on = None
[ 37811, 2302, 437, 62, 541, 62, 3245, 198, 198, 18009, 1166, 4522, 25, 807, 6814, 1238, 34741, 69, 21, 68, 16, 198, 18009, 2696, 25, 304, 1765, 36680, 69, 3324, 67, 22, 67, 198, 16447, 7536, 25, 33448, 12, 486, 12, 1415, 838, 25, ...
2.375
128
import ply.lex as lex tokens =["NUM","OPERADORES"] t_NUM = '\d+' t_OPERADORES = '[+|*|-]' t_ignore='\n\t ' lexer = lex.lex() # 1+2 1-2 1*2 # ola mundo import sys for line in sys.stdin: lexer.input(line) for tok in lexer: print(tok)
[ 11748, 35960, 13, 2588, 355, 31191, 198, 198, 83, 482, 641, 796, 14692, 41359, 2430, 31054, 2885, 1581, 1546, 8973, 198, 198, 83, 62, 41359, 796, 705, 59, 67, 10, 6, 198, 83, 62, 31054, 2885, 1581, 1546, 796, 44438, 10, 91, 9, 91,...
1.931298
131
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-07-10 20:40 from __future__ import unicode_literals from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 24, 319, 2864, 12, 2998, 12, 940, 1160, 25, 1821, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.736842
57
from __future__ import annotations import typing from ctc import spec from . import timestamp_crud from . import metric_crud from . import analytics_spec # def update_payload( # timescale: analytics_spec.Timescale, # old_payload: analytics_spec.AnalyticsPayload, # ) -> analytics_spec.AnalyticsPayload: # new_timestamps = get_new_timestamps( # timescale=timescale, # old_payload=old_payload, # ) # new_blocks = get_new_blocks( # new_timestamps=new_timestamps, # old_payload=old_payload, # ) # new_metrics = get_metrics(blocks=new_blocks) # return combine_new_data( # old_payload=old_payload, # new_metrics=new_metrics, # )
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 19720, 198, 198, 6738, 269, 23047, 1330, 1020, 198, 6738, 764, 1330, 41033, 62, 6098, 463, 198, 6738, 764, 1330, 18663, 62, 6098, 463, 198, 6738, 764, 1330, 23696, 62, 16684, 628, ...
2.291667
312
# Copyright 2016 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model architecture for predictive model, including CDNA, DNA, and STP.""" import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.python.platform import flags from tensorflow.contrib.layers.python import layers as tf_layers from lstm_ops import basic_conv_lstm_cell FLAGS = flags.FLAGS # Amount to use when lower bounding tensors RELU_SHIFT = 1e-12 # kernel size for DNA and CDNA. DNA_KERN_SIZE = 5 def kl_divergence(mu, log_sigma): """KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1). Args: mu: mu parameter of the distribution. log_sigma: log(sigma) parameter of the distribution. Returns: the KL loss. """ return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma), axis=1) def construct_latent_tower(images): """Builds convolutional latent tower for stochastic model. At training time this tower generates a latent distribution (mean and std) conditioned on the entire video. This latent variable will be fed to the main tower as an extra variable to be used for future frames prediction. At inference time, the tower is disabled and only returns latents sampled from N(0,1). If the multi_latent flag is on, a different latent for every timestep would be generated. Args: images: tensor of ground truth image sequences Returns: latent_mean: predicted latent mean latent_std: predicted latent standard deviation latent_loss: loss of the latent twoer samples: random samples sampled from standard guassian """ with slim.arg_scope([slim.conv2d], reuse=False): stacked_images = tf.concat(images, 3) latent_enc1 = slim.conv2d( stacked_images, 32, [3, 3], stride=2, scope='latent_conv1', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm1'}) latent_enc2 = slim.conv2d( latent_enc1, 64, [3, 3], stride=2, scope='latent_conv2', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm2'}) latent_enc3 = slim.conv2d( latent_enc2, 64, [3, 3], stride=1, scope='latent_conv3', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm3'}) latent_mean = slim.conv2d( latent_enc3, FLAGS.latent_channels, [3, 3], stride=2, activation_fn=None, scope='latent_mean', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm_mean'}) latent_std = slim.conv2d( latent_enc3, FLAGS.latent_channels, [3, 3], stride=2, scope='latent_std', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_std_norm'}) latent_std += FLAGS.latent_std_min divergence = kl_divergence(latent_mean, latent_std) latent_loss = tf.reduce_mean(divergence) if FLAGS.multi_latent: # timestep x batch_size x latent_size samples = tf.random_normal( [FLAGS.sequence_length-1] + latent_mean.shape, 0, 1, dtype=tf.float32) else: # batch_size x latent_size samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32) if FLAGS.inference_time: # No latent tower at inference time, just standard gaussian. return None, None, None, samples else: return latent_mean, latent_std, latent_loss, samples def construct_model(images, actions=None, states=None, iter_num=-1.0, k=-1, use_state=True, num_masks=10, stp=False, cdna=True, dna=False, context_frames=2): """Build convolutional lstm video predictor using STP, CDNA, or DNA. Args: images: tensor of ground truth image sequences actions: tensor of action sequences states: tensor of ground truth state sequences iter_num: tensor of the current training iteration (for sched. sampling) k: constant used for scheduled sampling. -1 to feed in own prediction. use_state: True to include state and action in prediction num_masks: the number of different pixel motion predictions (and the number of masks for each of those predictions) stp: True to use Spatial Transformer Predictor (STP) cdna: True to use Convoluational Dynamic Neural Advection (CDNA) dna: True to use Dynamic Neural Advection (DNA) context_frames: number of ground truth frames to pass in before feeding in own predictions Returns: gen_images: predicted future image frames gen_states: predicted future states Raises: ValueError: if more than one network option specified or more than 1 mask specified for DNA model. """ # Each image is being used twice, in latent tower and main tower. # This is to make sure we are using the *same* image for both, ... # ... given how TF queues work. images = [tf.identity(image) for image in images] if stp + cdna + dna != 1: raise ValueError('More than one, or no network option specified.') batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4] lstm_func = basic_conv_lstm_cell # Generated robot states and images. gen_states, gen_images = [], [] current_state = states[0] if k == -1: feedself = True else: # Scheduled sampling: # Calculate number of ground-truth frames to pass in. num_ground_truth = tf.to_int32( tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k))))) feedself = False # LSTM state sizes and states. lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32])) lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None lstm_state5, lstm_state6, lstm_state7 = None, None, None # Latent tower latent_loss = 0.0 if FLAGS.stochastic_model: latent_tower_outputs = construct_latent_tower(images) latent_mean, latent_std, latent_loss, samples = latent_tower_outputs # Main tower for image, action in zip(images[:-1], actions[:-1]): # Reuse variables after the first timestep. reuse = bool(gen_images) done_warm_start = len(gen_images) > context_frames - 1 with slim.arg_scope( [lstm_func, slim.layers.conv2d, slim.layers.fully_connected, tf_layers.layer_norm, slim.layers.conv2d_transpose], reuse=reuse): if feedself and done_warm_start: # Feed in generated image. prev_image = gen_images[-1] elif done_warm_start: # Scheduled sampling prev_image = scheduled_sample(image, gen_images[-1], batch_size, num_ground_truth) else: # Always feed in ground_truth prev_image = image # Predicted state is always fed back in state_action = tf.concat(axis=1, values=[action, current_state]) enc0 = slim.layers.conv2d( prev_image, 32, [5, 5], stride=2, scope='scale1_conv1', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm1'}) hidden1, lstm_state1 = lstm_func( enc0, lstm_state1, lstm_size[0], scope='state1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') hidden2, lstm_state2 = lstm_func( hidden1, lstm_state2, lstm_size[1], scope='state2') hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3') enc1 = slim.layers.conv2d( hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2') hidden3, lstm_state3 = lstm_func( enc1, lstm_state3, lstm_size[2], scope='state3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4') hidden4, lstm_state4 = lstm_func( hidden3, lstm_state4, lstm_size[3], scope='state4') hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5') enc2 = slim.layers.conv2d( hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3') # Pass in state and action. smear = tf.reshape( state_action, [int(batch_size), 1, 1, int(state_action.get_shape()[1])]) smear = tf.tile( smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1]) if use_state: enc2 = tf.concat(axis=3, values=[enc2, smear]) # Setup latent if FLAGS.stochastic_model: latent = samples if FLAGS.multi_latent: latent = samples[timestep] if not FLAGS.inference_time: latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage, lambda: tf.identity(latent), lambda: latent_mean + tf.exp(latent_std / 2.0) * latent) with tf.control_dependencies([latent]): enc2 = tf.concat([enc2, latent], 3) enc3 = slim.layers.conv2d( enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4') hidden5, lstm_state5 = lstm_func( enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8 hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6') enc4 = slim.layers.conv2d_transpose( hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1') hidden6, lstm_state6 = lstm_func( enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16 hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7') # Skip connection. hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16 enc5 = slim.layers.conv2d_transpose( hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2') hidden7, lstm_state7 = lstm_func( enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32 hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8') # Skip connection. hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32 enc6 = slim.layers.conv2d_transpose( hidden7, hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm9'}) if dna: # Using largest hidden state for predicting untied conv kernels. enc7 = slim.layers.conv2d_transpose( enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None) else: # Using largest hidden state for predicting a new image layer. enc7 = slim.layers.conv2d_transpose( enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None) # This allows the network to also generate one image from scratch, # which is useful when regions of the image become unoccluded. transformed = [tf.nn.sigmoid(enc7)] if stp: stp_input0 = tf.reshape(hidden5, [int(batch_size), -1]) stp_input1 = slim.layers.fully_connected( stp_input0, 100, scope='fc_stp') transformed += stp_transformation(prev_image, stp_input1, num_masks) elif cdna: cdna_input = tf.reshape(hidden5, [int(batch_size), -1]) transformed += cdna_transformation(prev_image, cdna_input, num_masks, int(color_channels)) elif dna: # Only one mask is supported (more should be unnecessary). if num_masks != 1: raise ValueError('Only one mask is supported for DNA model.') transformed = [dna_transformation(prev_image, enc7)] masks = slim.layers.conv2d_transpose( enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None) masks = tf.reshape( tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])), [int(batch_size), int(img_height), int(img_width), num_masks + 1]) mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks) output = mask_list[0] * prev_image for layer, mask in zip(transformed, mask_list[1:]): output += layer * mask gen_images.append(output) current_state = slim.layers.fully_connected( state_action, int(current_state.get_shape()[1]), scope='state_pred', activation_fn=None) gen_states.append(current_state) return gen_images, gen_states, latent_loss ## Utility functions def stp_transformation(prev_image, stp_input, num_masks): """Apply spatial transformer predictor (STP) to previous image. Args: prev_image: previous image to be transformed. stp_input: hidden layer to be used for computing STN parameters. num_masks: number of masks and hence the number of STP transformations. Returns: List of images transformed by the predicted STP parameters. """ # Only import spatial transformer if needed. from spatial_transformer import transformer identity_params = tf.convert_to_tensor( np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] for i in range(num_masks - 1): params = slim.layers.fully_connected( stp_input, 6, scope='stp_params' + str(i), activation_fn=None) + identity_params transformed.append(transformer(prev_image, params)) return transformed def cdna_transformation(prev_image, cdna_input, num_masks, color_channels): """Apply convolutional dynamic neural advection to previous image. Args: prev_image: previous image to be transformed. cdna_input: hidden lyaer to be used for computing CDNA kernels. num_masks: the number of masks and hence the number of CDNA transformations. color_channels: the number of color channels in the images. Returns: List of images transformed by the predicted CDNA kernels. """ batch_size = int(cdna_input.get_shape()[0]) height = int(prev_image.get_shape()[1]) width = int(prev_image.get_shape()[2]) # Predict kernels using linear function of last hidden layer. cdna_kerns = slim.layers.fully_connected( cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None) # Reshape and normalize. cdna_kerns = tf.reshape( cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) cdna_kerns /= norm_factor # Treat the color channel dimension as the batch dimension since the same # transformation is applied to each color channel. # Treat the batch dimension as the channel dimension so that # depthwise_conv2d can apply a different transformation to each sample. cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) # Swap the batch and channel dimensions. prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) # Transform image. transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME') # Transpose the dimensions to where they belong. transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(transformed, axis=-1) return transformed def dna_transformation(prev_image, dna_input): """Apply dynamic neural advection to previous image. Args: prev_image: previous image to be transformed. dna_input: hidden lyaer to be used for computing DNA transformation. Returns: List of images transformed by the predicted CDNA kernels. """ # Construct translated images. prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]]) image_height = int(prev_image.get_shape()[1]) image_width = int(prev_image.get_shape()[2]) inputs = [] for xkern in range(DNA_KERN_SIZE): for ykern in range(DNA_KERN_SIZE): inputs.append( tf.expand_dims( tf.slice(prev_image_pad, [0, xkern, ykern, 0], [-1, image_height, image_width, -1]), [3])) inputs = tf.concat(axis=3, values=inputs) # Normalize channels to 1. kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT kernel = tf.expand_dims( kernel / tf.reduce_sum( kernel, [3], keep_dims=True), [4]) return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth): """Sample batch with specified mix of ground truth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size num_ground_truth: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x. """ idx = tf.random_shuffle(tf.range(int(batch_size))) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size))) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) return tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps])
[ 2, 15069, 1584, 383, 309, 22854, 37535, 46665, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351,...
2.42576
7,469
load("@rules_jvm_external//:defs.bzl", "artifact") # For more information see # - https://github.com/bmuschko/bazel-examples/blob/master/java/junit5-test/BUILD # - https://github.com/salesforce/bazel-maven-proxy/tree/master/tools/junit5 # - https://github.com/junit-team/junit5-samples/tree/master/junit5-jupiter-starter-bazel def junit5_test(name, srcs, test_package, resources = [], deps = [], runtime_deps = [], **kwargs): """JUnit runner macro""" FILTER_KWARGS = [ "main_class", "use_testrunner", "args", ] for arg in FILTER_KWARGS: if arg in kwargs.keys(): kwargs.pop(arg) junit_console_args = [] if test_package: junit_console_args += ["--select-package", test_package] else: fail("must specify 'test_package'") native.java_test( name = name, srcs = srcs, use_testrunner = False, main_class = "org.junit.platform.console.ConsoleLauncher", args = junit_console_args, deps = deps + [ artifact("org.junit.jupiter:junit-jupiter-api"), artifact("org.junit.jupiter:junit-jupiter-params"), artifact("org.junit.jupiter:junit-jupiter-engine"), artifact("org.hamcrest:hamcrest-library"), artifact("org.hamcrest:hamcrest-core"), artifact("org.hamcrest:hamcrest"), artifact("org.mockito:mockito-core"), ], visibility = ["//java:__subpackages__"], resources = resources, runtime_deps = runtime_deps + [ artifact("org.junit.platform:junit-platform-console"), ], **kwargs )
[ 2220, 7203, 31, 38785, 62, 73, 14761, 62, 22615, 1003, 25, 4299, 82, 13, 65, 48274, 1600, 366, 433, 29660, 4943, 198, 198, 2, 1114, 517, 1321, 766, 198, 2, 532, 3740, 1378, 12567, 13, 785, 14, 65, 14664, 354, 7204, 14, 65, 41319, ...
2.163185
766
# This module provides mocked versions of classes and functions provided # by Carla in our runtime environment.
[ 2, 770, 8265, 3769, 29180, 6300, 286, 6097, 290, 5499, 2810, 198, 2, 416, 1879, 5031, 287, 674, 19124, 2858, 13, 628, 628 ]
5
23
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ee = '\033[1m' green = '\033[32m' yellow = '\033[33m' cyan = '\033[36m' line = cyan+'-' * 0x2D print(ee+line) R,G,B = [float(X) / 0xFF for X in input(f'{yellow}RGB: {green}').split()] K = 1-max(R,G,B) C,M,Y = [round(float((1-X-K)/(1-K) * 0x64),1) for X in [R,G,B]] K = round(K * 0x64,1) print(f'{yellow}CMYK: {green}{C}%, {M}%, {Y}%, {K}%') print(line)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 1453, 796, 705, 59, 44427, 58, 16, 76, 6, 198, 14809, 796, 705, 59, 44427, 58, 2624, 76, 6, 198, 36022, ...
1.795556
225
#!/usr/bin/env python """Builds the documentaion. First it runs gendoc to create rst files for the source code. Then it runs sphinx make. .. Warning:: This will delete the content of the output directory first! So you might loose data. You can use updatedoc.py -nod. Usage, just call:: updatedoc.py -h """ import argparse import os import shutil import sys import gendoc thisdir = os.path.abspath(os.path.dirname(__file__)) def setup_argparse(): """Sets up the argument parser and returns it :returns: the parser :rtype: :class:`argparse.ArgumentParser` :raises: None """ parser = argparse.ArgumentParser( description="Builds the documentaion. First it runs gendoc to create rst files\ for the source code. Then it runs sphinx make.\ WARNING: this will delete the contents of the output dirs. You can use -nod.") ipath = os.path.join(thisdir, '../src') ipath = os.path.abspath(ipath) idefault = [ipath] parser.add_argument('-i', '--input', nargs='+', default=idefault, help='list of input directories. gendoc is called for every\ source dir.\ Default is \'%s\'.' % ', '.join(idefault)) opath = os.path.join(thisdir, 'reference') opath = os.path.abspath(opath) odefault = [opath] parser.add_argument('-o', '--output', nargs='+', default=odefault, help='list of output directories. if you have multiple source\ directories, the corresponding output directorie is used.\ if there are less dirs than for source, the last output dir\ is used for the remaining source dirs.\ WARNING: the output directories are emptied by default. See -nod.\ Default is \'%s\'.' % ', '.join(odefault)) gadefault = ['-T', '-f', '-e', '-o'] parser.add_argument('-ga', '--gendocargs', nargs='*', default=gadefault, help="list of arguments to pass to gendoc. use -gh for info.\ Default is \'%s\'" % ', '.join(gadefault)) parser.add_argument('-nod', '--nodelete', action='store_true', help='Do not empty the output directories first.') parser.add_argument('-gh', '--gendochelp', action='store_true', help='print the help for gendoc and exit') return parser def prepare_dir(directory, delete=True): """Create apidoc dir, delete contents if delete is True. :param directory: the apidoc directory. you can use relative paths here :type directory: str :param delete: if True, deletes the contents of apidoc. This acts like an override switch. :type delete: bool :returns: None :rtype: None :raises: None """ if os.path.exists(directory): if delete: assert directory != thisdir, 'Trying to delete docs! Specify other output dir!' print 'Deleting %s' % directory shutil.rmtree(directory) print 'Creating %s' % directory os.mkdir(directory) else: print 'Creating %s' % directory os.mkdir(directory) def run_gendoc(source, dest, args): """Starts gendoc which reads source and creates rst files in dest with the given args. :param source: The python source directory for gendoc. Can be a relative path. :type source: str :param dest: The destination for the rst files. Can be a relative path. :type dest: str :param args: Arguments for gendoc. See gendoc for more information. :type args: list :returns: None :rtype: None :raises: SystemExit """ args.insert(0, 'gendoc.py') args.append(dest) args.append(source) print 'Running gendoc.main with: %s' % args gendoc.main(args) def main(argv=sys.argv[1:]): """Parse commandline arguments and run the tool :param argv: the commandline arguments. :type argv: list :returns: None :rtype: None :raises: None """ parser = setup_argparse() args = parser.parse_args(argv) if args.gendochelp: sys.argv[0] = 'gendoc.py' genparser = gendoc.setup_parser() genparser.print_help() sys.exit(0) print 'Preparing output directories' print '='*80 for odir in args.output: prepare_dir(odir, not args.nodelete) print '\nRunning gendoc' print '='*80 for i, idir in enumerate(args.input): if i >= len(args.output): odir = args.output[-1] else: odir = args.output[i] run_gendoc(idir, odir, args.gendocargs) if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 15580, 82, 262, 3188, 64, 295, 13, 3274, 340, 4539, 308, 437, 420, 284, 2251, 374, 301, 3696, 329, 262, 2723, 2438, 13, 3244, 340, 4539, 599, 20079, 87, 787, 13, 198, 492, 1...
2.362362
1,998
# 1 + (n-1)*[3 + X] = 1 + 3*(n-1) + X*(n-1) = 1 + 3*(n-1) + (n^2 + n - 2)/2 # = (1 - 3 - 1) + (3n + n/2) + (n^2/2) # The complexity is O(n^2)
[ 198, 2, 352, 1343, 357, 77, 12, 16, 27493, 58, 18, 1343, 1395, 60, 796, 352, 1343, 513, 9, 7, 77, 12, 16, 8, 1343, 1395, 9, 7, 77, 12, 16, 8, 796, 352, 1343, 513, 9, 7, 77, 12, 16, 8, 1343, 357, 77, 61, 17, 1343, 299, ...
1.56044
91
import os from typing import List, Tuple from BridgeOptimizer.datastructure.hypermesh.LoadCollector import LoadCollector from BridgeOptimizer.datastructure.hypermesh.LoadStep import LoadStep from BridgeOptimizer.datastructure.hypermesh.Force import Force from BridgeOptimizer.datastructure.hypermesh.SPC import SPC
[ 11748, 28686, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 198, 6738, 10290, 27871, 320, 7509, 13, 19608, 459, 5620, 13, 12114, 16321, 5069, 13, 8912, 31337, 273, 1330, 8778, 31337, 273, 198, 6738, 10290, 27871, 320, 7509, 13, 19608, 4...
3.472527
91
tajniBroj = 51 broj = 2 while tajniBroj != broj: broj = int(input("Pogodite tajni broj: ")) if tajniBroj == broj: print("Pogodak!") elif tajniBroj < broj: print("Tajni broj je manji od tog broja.") else: print("Tajni broj je veci od tog broja.") print("Kraj programa")
[ 83, 1228, 8461, 15783, 73, 796, 6885, 198, 7957, 73, 796, 362, 198, 198, 4514, 256, 1228, 8461, 15783, 73, 14512, 1379, 73, 25, 198, 220, 220, 220, 1379, 73, 796, 493, 7, 15414, 7203, 47, 519, 375, 578, 256, 1228, 8461, 1379, 73, ...
1.939394
165
import numpy as np from sklearn import metrics from neupy import algorithms from base import BaseTestCase
[ 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 1330, 20731, 198, 198, 6738, 497, 929, 88, 1330, 16113, 198, 6738, 2779, 1330, 7308, 14402, 20448, 628 ]
4
27
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: dan@reciprocitylabs.com # Maintained By: dan@reciprocitylabs.com from sqlalchemy.ext.associationproxy import association_proxy from ggrc import db from ggrc.models.mixins import Mapping from ggrc.models.mixins import Timeboxed from ggrc.models.reflection import PublishOnly
[ 2, 15069, 357, 34, 8, 2211, 3012, 3457, 1539, 7035, 11, 290, 20420, 1279, 3826, 37195, 20673, 2393, 29, 198, 2, 49962, 739, 2638, 1378, 2503, 13, 43073, 13, 2398, 14, 677, 4541, 14, 43, 2149, 24290, 12, 17, 13, 15, 1279, 3826, 385...
3.082192
146
# Copyright 2020-2021 The American University in Cairo and the Cloud V Project. # # This file is part of the DFFRAM Memory Compiler. # See https://github.com/Cloud-V/DFFRAM for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. RAM_instantiation = """ /* An auto generated testbench to verify RAM{word_num}x{word_size} Authors: Mohamed Shalan (mshalan@aucegypt.edu) Ahmed Nofal (nofal.o.ahmed@gmail.com) */ `define VERBOSE_1 `define VERBOSE_2 `define UNIT_DELAY #1 `define USE_LATCH 1 `define SIZE {word_size}/8 //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/primitives.v" //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/sky130_fd_sc_hd.v" // // Temporary override: IcarusVerilog cannot read these for some reason ^ `include "hd_primitives.v" `include "hd_functional.v" `include "{filename}" module tb_RAM{word_num}x{word_size}; localparam SIZE = `SIZE; localparam A_W = {addr_width}+$clog2(SIZE); localparam M_SZ = 2**A_W; reg CLK; reg [(SIZE-1):0] WE0; reg EN0; reg [(SIZE*8-1):0] Di0; wire [(SIZE*8-1):0] Do0; reg [A_W-1:0] A0, ADDR; reg [7:0] Phase; reg [7:0] RANDOM_BYTE; event done; RAM{word_num} #(.USE_LATCH(`USE_LATCH), .WSIZE(SIZE)) SRAM ( .CLK(CLK), .WE0(WE0), .EN0(EN0), .Di0(Di0), .Do(Do0), .A0(A0[A_W-1:$clog2(SIZE)]) ); initial begin $dumpfile("tb_RAM{word_num}x{word_size}.vcd"); $dumpvars(0, tb_RAM{word_num}x{word_size}); @(done) $finish; end /* Memory golden Model */ reg [(SIZE*8-1):0] RAM[(M_SZ)-1 : 0]; reg [(SIZE*8-1):0] RAM_DATA_RW; genvar c; generate for (c=0; c < SIZE; c = c+1) begin: mem_golden_model always @(posedge CLK) begin if(EN0) begin RAM_DATA_RW <= RAM[A0/SIZE]; if(WE0[c]) RAM[A0/SIZE][8*(c+1)-1:8*c] <= Di0[8*(c+1)-1:8*c]; end end end endgenerate """ begin_single_ported_test = """ initial begin CLK = 0; WE0 = 0; EN0 = 1; """ single_ported_custom_test = """ Phase = 0; // Perform a single word write then read mem_write_word({{SIZE{{8'h90}}}}, 4); mem_read_word_0(4); """ RAM_instantiation_1RW1R = """ /* An auto generated testbench to verify RAM{word_num}x{word_size} Authors: Mohamed Shalan (mshalan@aucegypt.edu) Ahmed Nofal (nofal.o.ahmed@gmail.com) */ `define VERBOSE_1 `define VERBOSE_2 `define UNIT_DELAY #1 `define USE_LATCH 1 `define SIZE {word_size}/8 //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/primitives.v" //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/sky130_fd_sc_hd.v" // // Temporary override: IcarusVerilog cannot read these for some reason ^ `include "hd_primitives.v" `include "hd_functional.v" `include "{filename}" module tb_RAM{word_num}x{word_size}_1RW1R; localparam SIZE = `SIZE; localparam A_W = {addr_width}+$clog2(SIZE); localparam M_SZ = 2**A_W; reg CLK; reg [(SIZE-1):0] WE0; reg EN0; reg ENR; reg [(SIZE*8-1):0] Di0; wire [(SIZE*8-1):0] Do0; wire [(SIZE*8-1):0] Do1; reg [A_W-1:0] A0, A1, ADDR; reg [7:0] Phase; reg [7:0] RANDOM_BYTE; event done; RAM{word_num}_1RW1R #(.USE_LATCH(`USE_LATCH), .WSIZE(`SIZE)) SRAM ( .CLK(CLK), .WE0(WE0), .EN0(EN0), .EN1(ENR), .Di0(Di0), .Do0(Do0), .Do1(Do1), .A0(A0[A_W-1:$clog2(SIZE)]), .A1(A1[A_W-1:$clog2(SIZE)]) ); initial begin $dumpfile("tb_RAM{word_num}x{word_size}_1RW1R.vcd"); $dumpvars(0, tb_RAM{word_num}x{word_size}_1RW1R); @(done) $finish; end /* Memory golden Model */ reg [(SIZE*8-1):0] RAM[(M_SZ)-1 : 0]; reg [(SIZE*8-1):0] RAM_DATA_RW; reg [(SIZE*8-1):0] RAM_DATA_R; genvar c; generate for (c=0; c < SIZE; c = c+1) begin: mem_golden_model always @(posedge CLK) begin if(EN0) begin RAM_DATA_RW <= RAM[A0/SIZE]; if(WE0[c]) RAM[A0/SIZE][8*(c+1)-1:8*c] <= Di0[8*(c+1)-1:8*c]; end if (ENR) begin RAM_DATA_R <= RAM[A1/SIZE]; end end end endgenerate """ begin_dual_ported_test = """ initial begin CLK = 0; WE0 = 0; EN0 = 1; ENR = 1; """ dual_ported_custom_test = """ Phase = 0; // Perform a 2 word write then read 2 words mem_write_word({{SIZE{{8'h90}}}}, 4); mem_write_word({{SIZE{{8'h33}}}}, 8); mem_read_2words(4,8); """ start_test_common = """ always #10 CLK = !CLK; integer i; """ test_port_1RW1R = """ /*********************************************************** Write and read from different ports ************************************************************/ // Fill the memory with a known pattern // Word Write then Read Phase = 1; `ifdef VERBOSE_1 $display("\\nFinished Phase 0, starting Phase 1"); `endif for(i=0; i<M_SZ; i=i+SIZE) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFC ; RANDOM_BYTE = $urandom; mem_write_word( {SIZE{RANDOM_BYTE}}, ADDR); mem_read_word_1( ADDR ); end // HWord Write then Read Phase = 2; `ifdef VERBOSE_1 $display("\\nFinished Phase 1, starting Phase 2"); `endif for(i=0; i<M_SZ; i=i+SIZE/2) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFE; RANDOM_BYTE = $urandom; mem_write_hword( {SIZE/2{RANDOM_BYTE}}, ADDR); mem_read_word_1( ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end // Byte Write then Read Phase = 3; `ifdef VERBOSE_1 $display("\\nFinished Phase 2, starting Phase 3"); `endif for(i=0; i<M_SZ; i=i+1) begin ADDR = (($urandom%M_SZ)); mem_write_byte($urandom%255, ADDR); mem_read_word_1(ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end """ test_port_RW = """ /*********************************************************** Write and read from same port ************************************************************/ Phase = 4; `ifdef VERBOSE_1 $display("\\nFinished Phase 3, starting Phase 4"); `endif for(i=0; i<M_SZ; i=i+SIZE) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFC ; RANDOM_BYTE = $urandom; mem_write_word( {SIZE{RANDOM_BYTE}}, ADDR); mem_read_word_0( ADDR ); end // HWord Write then Read Phase = 5; `ifdef VERBOSE_1 $display("\\nFinished Phase 4, starting Phase 5"); `endif for(i=0; i<M_SZ; i=i+SIZE/2) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFE; RANDOM_BYTE = $urandom; mem_write_hword( {SIZE/2{RANDOM_BYTE}}, ADDR); mem_read_word_0( ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end // Byte Write then Read Phase = 6; `ifdef VERBOSE_1 $display("\\nFinished Phase 5, starting Phase 6"); `endif for(i=0; i<M_SZ; i=i+1) begin ADDR = (($urandom%M_SZ)); mem_write_byte($urandom%255, ADDR); mem_read_word_0(ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end $display ("\\n>> Test Passed! <<\\n"); -> done; """ end_test = """ end """ tasks = """ task mem_write_byte(input [7:0] byte, input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr;//[A_WIDTH:2]; WE0 = (1 << addr[$clog2(SIZE)-1:0]); Di0 = (byte << (addr[$clog2(SIZE)-1:0] * 8)); @(posedge CLK); `ifdef VERBOSE_2 $display("WRITE BYTE: 0x%X to %0X(%0D) (0x%X, %B)", byte, addr, addr, Di0, WE0); `endif WE0 = {SIZE{8'h00}}; end endtask task mem_write_hword(input [SIZE*8-1:0] hword, input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr;//[A_WIDTH:$clog2(SIZE)]; WE0 = {{SIZE/2{addr[$clog2(SIZE)-1]}},{SIZE/2{~addr[$clog2(SIZE)-1]}}}; Di0 = (hword << (addr[$clog2(SIZE)-1] * (SIZE/2)*8)); @(posedge CLK); `ifdef VERBOSE_2 $display("WRITE HWORD: 0x%X to %0X(%0D) (0x%X, %B)", hword, addr, addr, Di0, WE0); `endif WE0 = {SIZE{8'h00}}; end endtask task mem_write_word(input [SIZE*8-1:0] word, input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr; WE0 = {SIZE{8'hFF}}; Di0 = word; @(posedge CLK); `ifdef VERBOSE_2 $display("WRITE WORD: 0x%X to %0X(%0D) (0x%X, %B)", word, addr, addr, Di0, WE0); `endif WE0 = {SIZE{8'h00}}; end endtask task mem_read_word_0(input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr;//[9:2]; WE0 = {SIZE{8'h00}}; @(posedge CLK); #5; `ifdef VERBOSE_2 $display("READ WORD: 0x%X from %0D", Do0, addr); `endif check0(); end endtask task check0; begin if(RAM_DATA_RW !== Do0) begin $display("\\n>>Test Failed! <<\\t(Phase: %0d, Iteration: %0d", Phase, i); $display("Address: 0x%X, READ: 0x%X - Should be: 0x%X", A0, Do0, RAM[A0/SIZE]); $fatal(1); end end endtask """ dual_ported_tasks = """ task mem_read_2words(input [A_W-1:0] addr0, input [A_W-1:0] addr1); begin @(posedge CLK); A0= addr0;//[9:2]; A1= addr1;//[9:2]; WE0 = {SIZE{8'h00}}; @(posedge CLK); #5; `ifdef VERBOSE_2 $display("READ WORD0: 0x%X from %0D", Do0, addr0); $display("READ WORD1: 0x%X from %0D", Do1, addr1); `endif check0(); check1(); end endtask task mem_read_word_1(input [A_W-1:0] addr); begin @(posedge CLK); A1 = addr;//[9:2]; WE0 = {SIZE{8'h00}}; @(posedge CLK); #5; `ifdef VERBOSE_2 $display("READ WORD: 0x%X from %0D", Do1, addr); `endif check1(); end endtask task check1; begin if(RAM_DATA_R !== Do1) begin $display("\\n>>Test Failed! <<\\t(Phase: %0d, Iteration: %0d", Phase, i); $display("Address: 0x%X, READ: 0x%X - Should be: 0x%X", A1, Do1, RAM[A1/SIZE]); $fatal(1); end end endtask """ endmodule = """ endmodule """
[ 2, 15069, 12131, 12, 1238, 2481, 383, 1605, 2059, 287, 23732, 290, 262, 10130, 569, 4935, 13, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 262, 360, 5777, 24115, 14059, 3082, 5329, 13, 198, 2, 4091, 3740, 1378, 12567, 13, 785, 14, 18...
1.873013
5,977
# noqa: D100 from typing import Optional import numpy as np import xarray from xclim.core.units import ( convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint, ) from xclim.core.utils import ensure_chunk_size from ._multivariate import ( daily_temperature_range, extreme_temperature_range, precip_accumulation, ) from ._simple import tg_mean from .generic import select_resample_op from .run_length import lazy_indexing # Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start # See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases # -------------------------------------------------- # # ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! # # -------------------------------------------------- # __all__ = [ "temperature_seasonality", "precip_seasonality", "tg_mean_warmcold_quarter", "tg_mean_wetdry_quarter", "prcptot_wetdry_quarter", "prcptot_warmcold_quarter", "prcptot", "prcptot_wetdry_period", "isothermality", ] _xr_argops = { "wettest": xarray.DataArray.argmax, "warmest": xarray.DataArray.argmax, "dryest": xarray.DataArray.argmin, "driest": xarray.DataArray.argmin, "coldest": xarray.DataArray.argmin, } _np_ops = { "wettest": "max", "warmest": "max", "dryest": "min", "driest": "min", "coldest": "min", } # FIXME: src_timestep is not used here. def _anuclim_coeff_var(arr: xarray.DataArray) -> xarray.DataArray: """Calculate the annual coefficient of variation for ANUCLIM indices.""" std = arr.resample(time="YS").std(dim="time") mu = arr.resample(time="YS").mean(dim="time") return std / mu def _from_other_arg( criteria: xarray.DataArray, output: xarray.DataArray, op, freq: str ) -> xarray.DataArray: """Pick values from output based on operation returning an index from criteria. Parameters ---------- criteria : DataArray Series on which operation returning index is applied. output : DataArray Series to be indexed. op : func Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax. freq : str Temporal grouping. Returns ------- DataArray Output values where criteria is met at the given frequency. """ ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output}) dim = "time" return ds.resample(time=freq).map(get_other_op) def _to_quarter( freq: str, pr: Optional[xarray.DataArray] = None, tas: Optional[xarray.DataArray] = None, ) -> xarray.DataArray: """Convert daily, weekly or monthly time series to quarterly time series according to ANUCLIM specifications.""" if freq.upper().startswith("D"): if tas is not None: tas = tg_mean(tas, freq="7D") if pr is not None: # Accumulate on a week # Ensure units are back to a "rate" for rate2amount below pr = convert_units_to(precip_accumulation(pr, freq="7D"), "mm") pr.attrs["units"] = "mm/week" freq = "W" if freq.upper().startswith("W"): window = 13 elif freq.upper().startswith("M"): window = 3 else: raise NotImplementedError( f'Unknown input time frequency "{freq}": must be one of "D", "W" or "M".' ) if tas is not None: tas = ensure_chunk_size(tas, time=np.ceil(window / 2)) if pr is not None: pr = ensure_chunk_size(pr, time=np.ceil(window / 2)) if pr is not None: pram = rate2amount(pr) out = pram.rolling(time=window, center=False).sum() out.attrs = pr.attrs out.attrs["units"] = pram.units if tas is not None: out = tas.rolling(time=window, center=False).mean(skipna=False) out.attrs = tas.attrs out = ensure_chunk_size(out, time=-1) return out
[ 2, 645, 20402, 25, 360, 3064, 198, 6738, 19720, 1330, 32233, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2124, 18747, 198, 198, 6738, 2124, 565, 320, 13, 7295, 13, 41667, 1330, 357, 198, 220, 220, 220, 10385, 62, 41667, 62, ...
2.433498
1,624
# Copyright (c) 2013, VHRS and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _, msgprint from frappe.utils import (cint, cstr, date_diff, flt, getdate, money_in_words, nowdate, rounded, today) from datetime import datetime from datetime import date import datetime from calendar import monthrange
[ 2, 15069, 357, 66, 8, 2211, 11, 569, 39, 6998, 290, 20420, 198, 2, 1114, 5964, 1321, 11, 3387, 766, 5964, 13, 14116, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 11748, 5306, 27768, 198, 6738, 5306, 277...
3.066176
136
import os DEFAULT_ROOT = './materials' datasets_dt = {}
[ 11748, 28686, 628, 198, 7206, 38865, 62, 13252, 2394, 796, 705, 19571, 33665, 82, 6, 628, 198, 19608, 292, 1039, 62, 28664, 796, 23884, 628, 198 ]
2.384615
26
from .comment import CommentParser from .protobuf import Protobuf from .proto_structures import Syntax
[ 6738, 764, 23893, 1330, 18957, 46677, 198, 6738, 764, 11235, 672, 3046, 1330, 5038, 672, 3046, 198, 6738, 764, 1676, 1462, 62, 7249, 942, 1330, 26375, 897, 628 ]
3.714286
28
from flask import render_template, request, Blueprint core = Blueprint('core', __name__)
[ 6738, 42903, 1330, 8543, 62, 28243, 11, 2581, 11, 39932, 198, 198, 7295, 796, 39932, 10786, 7295, 3256, 11593, 3672, 834, 8, 628, 628 ]
3.875
24
# encoding: utf-8 import functools import os from urllib.parse import urlsplit import boto3 import botocore import pytest from botocore.exceptions import BotoCoreError, ClientError from mock import MagicMock from parameterized import parameterized from ..mirror import MirrorUploader from ..model import ( DataSource, ExternalIntegration, Hyperlink, Identifier, Representation, create, ) from ..s3 import ( MinIOUploader, MinIOUploaderConfiguration, MockS3Client, MultipartS3Upload, S3AddressingStyle, S3Uploader, S3UploaderConfiguration, ) from ..testing import DatabaseTest from ..util.datetime_helpers import datetime_utc, utc_now def test_mirror_one(self): edition, pool = self._edition(with_license_pool=True) original_cover_location = "http://example.com/a-cover.png" content = open(self.sample_cover_path("test-book-cover.png"), "rb").read() cover, ignore = pool.add_link( Hyperlink.IMAGE, original_cover_location, edition.data_source, Representation.PNG_MEDIA_TYPE, content=content, ) cover_rep = cover.resource.representation assert None == cover_rep.mirrored_at original_epub_location = "https://books.com/a-book.epub" epub, ignore = pool.add_link( Hyperlink.OPEN_ACCESS_DOWNLOAD, original_epub_location, edition.data_source, Representation.EPUB_MEDIA_TYPE, content="i'm an epub", ) epub_rep = epub.resource.representation assert None == epub_rep.mirrored_at s3 = self._create_s3_uploader(client_class=MockS3Client) # Mock final_mirror_url so we can verify that it's called with # the right arguments s3.final_mirror_url = mock_final_mirror_url book_url = "http://books-go/here.epub" cover_url = "http://s3.amazonaws.com/covers-go/here.png" s3.mirror_one(cover.resource.representation, cover_url) s3.mirror_one(epub.resource.representation, book_url) [ [data1, bucket1, key1, args1, ignore1], [data2, bucket2, key2, args2, ignore2], ] = s3.client.uploads # Both representations have had .mirror_url set and been # mirrored to those URLs. assert data1.startswith(b"\x89") assert "covers-go" == bucket1 assert "here.png" == key1 assert Representation.PNG_MEDIA_TYPE == args1["ContentType"] assert (utc_now() - cover_rep.mirrored_at).seconds < 10 assert b"i'm an epub" == data2 assert "books-go" == bucket2 assert "here.epub" == key2 assert Representation.EPUB_MEDIA_TYPE == args2["ContentType"] # In both cases, mirror_url was set to the result of final_mirror_url. assert ( "final_mirror_url was called with bucket books-go, key here.epub" == epub_rep.mirror_url ) assert ( "final_mirror_url was called with bucket covers-go, key here.png" == cover_rep.mirror_url ) # mirrored-at was set when the representation was 'mirrored' for rep in epub_rep, cover_rep: assert (utc_now() - rep.mirrored_at).seconds < 10 assert False == MockMultipartS3Upload.completed assert True == MockMultipartS3Upload.aborted assert "Error!" == rep.mirror_exception rep.mirror_exception = None # Failed during completion with s3.multipart_upload( rep, rep.url, upload_class=AnotherFailingMultipartS3Upload ) as upload: upload.upload_part("Part 1") assert False == MockMultipartS3Upload.completed assert True == MockMultipartS3Upload.aborted assert "Error!" == rep.mirror_exception
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 11748, 1257, 310, 10141, 198, 11748, 28686, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 2956, 7278, 489, 270, 198, 198, 11748, 275, 2069, 18, 198, 11748, 10214, 420, 382, 198, 11748, 12972, 9288, ...
2.28184
1,696
"""Set the build version to be 'qa', 'rc', 'release'""" import sys import os import re import logging log = logging.getLogger() log.addHandler(logging.StreamHandler()) log.setLevel(logging.DEBUG) if __name__ == '__main__': sys.exit(main())
[ 37811, 7248, 262, 1382, 2196, 284, 307, 705, 20402, 3256, 705, 6015, 3256, 705, 20979, 6, 37811, 198, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 18931, 198, 198, 6404, 796, 18931, 13, 1136, 11187, 1362, 3419, 198...
2.873563
87
#!/usr/bin/python import yaml import os import ast import sys from collections import OrderedDict curr_dir = os.getcwd() work_dir = sys.argv[1] network_type = sys.argv[2] testplan_dict = {} testplan_dict["name"] = "System performance test" testplan_dict["description"] = "This test is to create as much chaincode computation load as possible" testplan_dict["runid"] = "RUNID_HERE" if network_type == "ibp": testplan_dict["networkid"] = sys.argv[3] testplan_dict["collectFabricMetrics"] = False testplan_dict["storageclass"] = "default" testplan_dict["saveLog"] = False testplan_dict["continueAfterFail"] = True testplan_dict["tests"] = [] testplan_dict["peernodeAlias"] =[] if os.path.exists(work_dir) != True: print 'certs keyfiles directory do not exist' exit(1) # Load template file with open(curr_dir + "/templates/testplan_template.yml", 'r') as stream: template = yaml.load(stream) channel_create = template["CHANNEL_CREATE"] # channel_join = template["CHANNEL_JOIN"] chaincode_install = template["CHAINCODE_INSTALL"] chaincode_instantiate = template["CHAINCODE_INSTANTIATE"] chaincode_invoke = template["CHAINCODE_INVOKE"] execute_command = template["EXECUTE_COMMAND"] connectionProfile = {} org_list = [] org_list_lowercase = [] orderer_list = [] peer_list = [] org_peers_dict = {} org_anchor_dict ={} allAnchor_list =[] # Load connection profile for orgName in os.listdir(work_dir + '/keyfiles'): if os.path.isfile(work_dir + '/keyfiles/' + orgName + '/connection.yml'): with open(work_dir + '/keyfiles/' + orgName + '/connection.yml', 'r') as stream: connectionProfile = yaml.load(stream) if connectionProfile["orderers"] is None: continue orderer_list = orderer_list + connectionProfile["orderers"].keys() if (connectionProfile["organizations"][orgName.lower()]["peers"] != None): org_list.append(orgName) org_list_lowercase.append(orgName.lower()) org_peers_dict[orgName] = connectionProfile["organizations"][orgName.lower( )]["peers"] peer_list = peer_list + \ connectionProfile["organizations"][orgName.lower( )]["peers"] org_anchor_dict[orgName] = sorted( connectionProfile["organizations"][orgName.lower( )]["peers"])[0] # When there is only peer or orderer, we skip tests. if len(orderer_list) == 0 or len(peer_list) == 0: outputfile =open(work_dir + '/testplan_example.yml','w') outputfile.write("") outputfile.close() exit(0) orderer_list = list(OrderedDict.fromkeys(orderer_list)) peer_list = list(OrderedDict.fromkeys(peer_list)) for orgName in org_list : tempOrgAnchorObj={} tempOrgAnchorObj[orgName+"Anchor"] = org_anchor_dict[orgName] testplan_dict["peernodeAlias"].append(tempOrgAnchorObj) tempOrgPeersObj={} tempOrgPeersObj[orgName+"Peers"] = ','.join(org_peers_dict[orgName]) testplan_dict["peernodeAlias"].append(tempOrgPeersObj) allAnchor_list.append(org_anchor_dict[orgName]) testplan_dict["peernodeAlias"].append({"allAnchors":','.join(allAnchor_list)}) testplan_dict["peernodeAlias"].append({"allPeers":','.join(peer_list)}) print 'org list: ' print org_list_lowercase print 'orderer_list: ' print orderer_list print 'peer_list: ' print peer_list print 'allAnchor_list' print allAnchor_list # CREATE_CHANNEL channel_create["parameters"]["connectionProfile"] = org_list[0] if network_type == 'cello': channel_create["parameters"]["channelConsortium"] = 'FabricConsortium' else: channel_create["parameters"]["channelConsortium"] = 'SampleConsortium' channel_create["parameters"]["channelOrgs"] = ','.join(org_list_lowercase) channel_create["parameters"]["ordererName"] = orderer_list[0] testplan_dict["tests"].append(channel_create) # JOIN_CHANNEL and INSTALL_CHAINCODE join_list = [] install_list = [] for org in org_list: channel_join = template["CHANNEL_JOIN"] channel_join["parameters"]["connectionProfile"] = org channel_join["parameters"]["peers"] = ','.join(org_peers_dict[org]) channel_join["parameters"]["ordererName"] = orderer_list[0] join_list.append(str(channel_join)) # CHAINCODE_INSTALL chaincode_install["parameters"]["connectionProfile"] = org chaincode_install["parameters"]["peers"] = ','.join(org_peers_dict[org]) install_list.append(str(chaincode_install)) for join_org in join_list: join_item = ast.literal_eval(join_org) testplan_dict["tests"].append(join_item) for install_org in install_list: install_item = ast.literal_eval(install_org) testplan_dict["tests"].append(install_item) # CHAINCODE_INSTANTIATE chaincode_instantiate["parameters"]["connectionProfile"] = org_list[0] chaincode_instantiate["parameters"]["peers"] = ','.join(peer_list) # CHAINCODE_INVOKE # Invoke with fixed transaction count : 100 chaincode_invoke["iterationCount"] = '100' chaincode_invoke["parameters"]["connectionProfile"] = org_list[0] chaincode_invoke["parameters"]["peers"] = ','.join(peer_list) chaincoode_invoke_count = str(chaincode_invoke) # Invoke with fixed running duration : 0 hour 10 minutes 0 second. # And enable running tests parallel by setting waitUntilFinish to true chaincode_invoke["iterationCount"] = '0h10m0s' chaincode_invoke["waitUntilFinish"] = False chaincoode_invoke_time = str(chaincode_invoke) # Invoke with fixed running duration : 0 hour 10 minutes 0 second chaincode_invoke["iterationCount"] = '0h10m0s' chaincode_invoke["parameters"]["peers"] = peer_list[0] chaincoode_invoke_parallel = str(chaincode_invoke) testplan_dict["tests"].append(chaincode_instantiate) testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_count)) testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_time)) testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_parallel)) # Execute command with default images testplan_dict["tests"].append(ast.literal_eval(str(execute_command))) # Execute command with customized image execute_command["name"] = "execute-command-with-customized-image" execute_command["container"] = "user/ownimage" testplan_dict["tests"].append(ast.literal_eval(str(execute_command))) connYamlStr= yaml.dump(testplan_dict,default_flow_style=False) tempstr= connYamlStr for orgName in org_list : tempstr = tempstr.replace(orgName+"Anchor:",orgName+"Anchor: &"+orgName+"Anchor") tempstr = tempstr.replace(orgName+"Peers:",orgName+"Peers: &"+orgName+"Peers") tempstr = tempstr.replace("allAnchors:","allAnchors: &allAnchors") tempstr = tempstr.replace("allPeers:","allPeers: &allPeers") tempstr = tempstr.replace("runid:","runid: &runid") if network_type == "ibp": tempstr = tempstr.replace("networkid:","networkid: &networkid") # Dump testplan file outputfile =open(work_dir + '/testplan_example.yml','w') outputfile.write(tempstr) outputfile.close()
[ 198, 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 331, 43695, 198, 11748, 28686, 198, 11748, 6468, 198, 11748, 25064, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 198, 22019, 81, 62, 15908, 796, 28686, 13, 1136, 66, 16...
2.549281
2,780
"""Collection of tests.""" import pytest import dblib.lib f0 = dblib.lib.Finding('CD spook', 'my_PC', 'The CD drive is missing.') f1 = dblib.lib.Finding('Unplugged', 'my_PC', 'The power cord is unplugged.') f2 = dblib.lib.Finding('Monitor switched off', 'my_PC', 'The monitor is switched off.') def test_add_remove(): """Test function.""" db = dblib.lib.BackyardDB() # regular cases db.add(f0) assert f0 in db.findings assert len(db.findings) == 1 db.add(f1) assert f1 in db.findings assert len(db.findings) == 2 db.add(f2) assert f2 in db.findings assert len(db.findings) == 3 db.add(None) assert len(db.findings) == 3 db.remove(f1) assert f1 not in db.findings assert len(db.findings) == 2 # test exceptions with pytest.raises(TypeError): db.add(1) def test_update(): """Test function.""" db = dblib.lib.BackyardDB() db.add(f0) db.add(f1) db.update(f1, f2) assert f2 in db.findings assert len(db.findings) == 2
[ 37811, 36307, 286, 5254, 526, 15931, 198, 11748, 12972, 9288, 198, 198, 11748, 288, 2436, 571, 13, 8019, 628, 198, 69, 15, 796, 288, 2436, 571, 13, 8019, 13, 36276, 10786, 8610, 599, 566, 3256, 705, 1820, 62, 5662, 3256, 705, 464, 6...
2.285398
452
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.decorators import Completer from azure.cli.core.commands.client_factory import get_subscription_id from ._client_factory import cf_policy_insights
[ 2, 16529, 1783, 10541, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 198, 2, 16529, 1783, 10541, 198,...
5.212121
99
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2017-02-25 22:22 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import django_smalluuid.models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 940, 13, 19, 319, 2177, 12, 2999, 12, 1495, 2534, 25, 1828, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.829545
88
from aiogram import Bot, types from aiogram.dispatcher import Dispatcher from aiogram.utils import executor TOKEN = "Token for you bot" bot = Bot(token=TOKEN) dp = Dispatcher(bot) if __name__ == '__main__': executor.start_polling(dp)
[ 6738, 257, 72, 21857, 1330, 18579, 11, 3858, 201, 198, 6738, 257, 72, 21857, 13, 6381, 8071, 2044, 1330, 3167, 8071, 2044, 201, 198, 6738, 257, 72, 21857, 13, 26791, 1330, 3121, 273, 201, 198, 10468, 43959, 796, 366, 30642, 329, 345, ...
2.617021
94
from django.core.exceptions import ImproperlyConfigured from importlib import import_module try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_unicode as force_text from django.utils.functional import Promise import json
[ 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 12205, 525, 306, 16934, 1522, 198, 6738, 1330, 8019, 1330, 1330, 62, 21412, 198, 198, 28311, 25, 198, 220, 220, 220, 422, 42625, 14208, 13, 26791, 13, 12685, 7656, 1330, 2700, 62, 5...
3.458824
85
# timedpid.py # Source: https://github.com/DrGFreeman/PyTools # # MIT License # # Copyright (c) 2017 Julien de la Bruere-Terreault <drgfreeman@tuta.io> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This module defines a simple Proportional - Integral - Derivative (PID) # controller with different time step calculation methods. This is a python # implementation of my Arduino TimedPID library which can be found at # https://github.com/DrGFreeman/TimedPID. Refer to this repository for detailed # documentation. import time
[ 2, 28805, 35317, 13, 9078, 198, 2, 8090, 25, 3740, 1378, 12567, 13, 785, 14, 6187, 38, 20366, 8463, 14, 20519, 33637, 198, 2, 198, 2, 17168, 13789, 198, 2, 198, 2, 15069, 357, 66, 8, 2177, 5979, 2013, 390, 8591, 1709, 518, 260, ...
3.704327
416
# # -*- coding: utf-8-*- # receives messages via zmq and executes some simple # operations. # # (c) ISC Clemenz & Weinbrecht GmbH 2018 # import json import requests import zmq import pmon
[ 2, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 12, 9, 12, 198, 2, 11583, 6218, 2884, 1976, 76, 80, 290, 42985, 617, 2829, 198, 2, 4560, 13, 198, 2, 198, 2, 357, 66, 8, 3180, 34, 3779, 3653, 89, 1222, 40966, 4679, 21474,...
2.666667
72
from padmini import operations as op """ def test_ti(): assert S.ti("ta", "e") == "te" assert S.ti("AtAm", "e") == "Ate" def test_antya(): assert S.antya("ti", "u") == "tu" assert S.antya("te", "Am") == "tAm" """
[ 6738, 14841, 45313, 1330, 4560, 355, 1034, 628, 198, 198, 37811, 198, 4299, 1332, 62, 20259, 33529, 198, 220, 220, 220, 6818, 311, 13, 20259, 7203, 8326, 1600, 366, 68, 4943, 6624, 366, 660, 1, 198, 220, 220, 220, 6818, 311, 13, 202...
2.228571
105
# coding: utf-8 #just prints the emails of members of a group to stdout, #both primary and secondary members # run as # $python extractemails_nogui.py "Tidal Disruption Events" from __future__ import print_function '__author__' == 'Federica Bianco, NYU - GitHub: fedhere' import sys import pandas as pd from argparse import ArgumentParser from config import tvsfile def parse_args(subglist): """ Use ArgParser to build up the arguments we will use in our script """ stored_args = {} # get the script name without the extension & use it to build up # the json filename parser = ArgumentParser(description='Selecting members by subgroup') parser.add_argument('subgroup', action='store', default=None, help='Choose the subgroup affiliation:' + ' -- '.join([s for s in subglist])) args = parser.parse_args() return args if __name__ == '__main__': if tvsfile is None: print ("Required Argument: Google Doc file identifier (if you do not have it email federica!)") sys.exit() TVSMembers = pd.read_csv('https://docs.google.com/spreadsheets/d/' + tvsfile + '/export?gid=0&format=csv', index_col=0) subgroups = TVSMembers.primary.unique() conf = parse_args([x for x in subgroups if str(x) != 'nan']) primary = conf.subgroup secondary = conf.subgroup emails = TVSMembers[TVSMembers.primary == primary]['email'].values print ("These are the members with primary affiliation with " + primary) print ("") print (' '.join([em + ','for em in emails])) emails = TVSMembers[(TVSMembers.secondary == secondary) | (TVSMembers['secondary.1'] == secondary) | (TVSMembers['secondary.2'] == secondary)]['email'].values print ("\n") print ("These are the members with secondary affiliation with " + secondary) print ("") print (' '.join([em + ','for em in emails])) print ("") print ("If you also want their names and affiliations use: ") print ("$python extractemailsW.py " + conf.subgroup)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 3137, 20842, 262, 7237, 286, 1866, 286, 257, 1448, 284, 14367, 448, 11, 198, 2, 16885, 4165, 290, 9233, 1866, 198, 2, 1057, 355, 198, 2, 720, 29412, 7925, 368, 1768, 62, 77, 519, 9019, 13,...
2.586982
845
from discord.ext import commands, menus import utils import random , discord, os, importlib, mystbin, typing, aioimgur, functools, tweepy import traceback, textwrap from discord.ext.menus.views import ViewMenuPages class SusUsersEmbed(menus.ListPageSource): class TestersEmbed(menus.ListPageSource): def tweepy_post(self, post_text = None): consumer_key = os.getenv('tweet_key') consumer_secret = os.getenv('tweet_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) access_token = os.getenv('tweet_access') access_secret = os.getenv('tweet_token') auth.set_access_token(access_token, access_secret) twitter_api = tweepy.API(auth) return twitter_api.update_status(status = post_text)
[ 6738, 36446, 13, 2302, 1330, 9729, 11, 26798, 198, 11748, 3384, 4487, 198, 11748, 4738, 837, 36446, 11, 28686, 11, 1330, 8019, 11, 21619, 8800, 11, 19720, 11, 257, 952, 19791, 11, 1257, 310, 10141, 11, 4184, 538, 88, 198, 11748, 12854...
2.859316
263
# This file is part of the UFO. # # This file contains definitions for functions that # are extensions of the cmath library, and correspond # either to functions that are in cmath, but inconvenient # to access from there (e.g. z.conjugate()), # or functions that are simply not defined. # # from __future__ import absolute_import __date__ = "22 July 2010" __author__ = "claude.duhr@durham.ac.uk" import cmath from .object_library import all_functions, Function # # shortcuts for functions from cmath # complexconjugate = Function(name = 'complexconjugate', arguments = ('z',), expression = 'z.conjugate()') re = Function(name = 're', arguments = ('z',), expression = 'z.real') im = Function(name = 'im', arguments = ('z',), expression = 'z.imag') # New functions (trigonometric) sec = Function(name = 'sec', arguments = ('z',), expression = '1./cmath.cos(z)') asec = Function(name = 'asec', arguments = ('z',), expression = 'cmath.acos(1./z)') csc = Function(name = 'csc', arguments = ('z',), expression = '1./cmath.sin(z)') acsc = Function(name = 'acsc', arguments = ('z',), expression = 'cmath.asin(1./z)')
[ 2, 770, 2393, 318, 636, 286, 262, 19728, 13, 198, 2, 198, 2, 770, 2393, 4909, 17336, 329, 5499, 326, 198, 2, 389, 18366, 286, 262, 269, 11018, 5888, 11, 290, 6053, 198, 2, 2035, 284, 5499, 326, 389, 287, 269, 11018, 11, 475, 375...
2.288396
586