content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from distutils.version import StrictVersion from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image import json import time import cv2 PATH_TO_FROZEN_GRAPH = '../data/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_frozen.pb' info='Time taken to load Model into memory:' start_time=time.time() detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') end_time=time.time() time_taken=end_time-start_time print(info,time_taken) # Load the labels #Load categories categories = [] with open('../data/' + 'categories.txt', 'r') as f: for line in f: cat = line.split('\n')[0] if cat != 'classes': categories.append(cat) f.close() print('Number of categories:', len(categories)) # Load image size with open('../data/' + 'inputsize.txt', 'r') as f: reqsize = int(f.readline().split('\n')[0]) #print(reqsize) #image_filename = '../data/' + 'image1.jpg' sess=tf.Session(graph=detection_graph) image_filename = '../data/' + 'Tiger.jpg' img = Load_and_process_img(image_filename) key_name='MobilenetV2/Predictions/Reshape_1' result,time_taken=run_inference_b1(key_name,img,detection_graph,1000) print('Time Taken to run Inference is:',time_taken) print(result)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 2237, 13, 76, 5241, 13, 333, 297, 571, 355, 2956, 297, 571, 198, 11748, 25064, 198, 11748, 13422, 7753, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 19974, 7753, 19...
2.558176
636
from __future__ import unicode_literals import frappe, json from frappe.model.utils.user_settings import update_user_settings, sync_user_settings
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 11748, 5306, 27768, 11, 33918, 198, 6738, 5306, 27768, 13, 19849, 13, 26791, 13, 7220, 62, 33692, 1330, 4296, 62, 7220, 62, 33692, 11, 17510, 62, 7220, 62, 33692, 628, 6...
3.547619
42
import BoltzmannMachine as bm import QHO as qho import numpy as np import datetime # Visualization imports from IPython.display import clear_output from PIL import Image import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams['figure.dpi']=300 # Set the quantum gas with N particles, a limit of 10 for the # quantum numbers and default temperature and frequency N = 10*10 gas = qho.QHOGas(N=N) n_max = 10 training_size = 100000 # the amount of hidden units was set by trial and error hidden_units = 70 # the recipe suggests to set the batchsize to 10, though it can range # from 10 to 100 batchsize = 10 # the recipe suggests a learning rate that makes the weight updates about # 1e-3 times the weights (to within an order of magnitude) eta = 0.005 # the amount of steps was set by trial and error nsteps = 300000 # define the validation set to be used in training_visualization validation_set = gas.generate(amount=20) # Init the boltzmann machine and train it while visualizing the suggested plots training_set = gas.generate(amount=training_size, n_max=n_max) m = bm.BoltzmannMachine(num_hidden=hidden_units) a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None) # Store in a file run_id = int(datetime.datetime.now().timestamp()) np.savetxt(f"a_{run_id}.csv", a, delimiter=',') np.savetxt(f"b_{run_id}.csv", b, delimiter=',') np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
[ 11748, 21764, 89, 9038, 37573, 355, 275, 76, 198, 11748, 1195, 32298, 355, 10662, 8873, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4818, 8079, 198, 2, 15612, 1634, 17944, 198, 6738, 6101, 7535, 13, 13812, 1330, 1598, 62, 22915, 198...
3.085653
467
import time import os import sys import shutil import json import argparse from zipfile import ZipFile from contextlib import contextmanager from datetime import datetime from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \ extract_packs_artifacts from Tests.Marketplace.marketplace_services import init_storage_client from Tests.scripts.utils.log_util import install_logging from Tests.scripts.utils import logging_wrapper as logging MAX_SECONDS_TO_WAIT_FOR_LOCK = 600 LOCK_FILE_PATH = 'lock.txt' def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number, private_packs): """Upload updated index zip to cloud storage. Args: public_index_folder_path (str): public index folder full path. extract_destination_path (str): extract folder full path. public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob. build_number (str): circleCI build number, used as an index revision. private_packs (list): List of private packs and their price. """ with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file: for private_pack in private_packs: private_pack['price'] = 0 index = { 'revision': build_number, 'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), 'packs': private_packs } json.dump(index, index_file, indent=4) index_zip_name = os.path.basename(public_index_folder_path) index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip", root_dir=extract_destination_path, base_dir=index_zip_name) try: public_ci_dummy_index_blob.reload() public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob public_ci_dummy_index_blob.upload_from_filename(index_zip_path) logging.success("Finished uploading index.zip to storage.") except Exception: logging.exception("Failed in uploading index. Mismatch in index file generation.") sys.exit(1) finally: shutil.rmtree(public_index_folder_path) def option_handler(): """Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """ parser = argparse.ArgumentParser(description="Store packs in cloud storage.") # disable-secrets-detection-start parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True) parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True) parser.add_argument('-s', '--service_account', help=("Path to gcloud service account, is for circleCI usage. " "For local development use your personal account and " "authenticate using Google Cloud SDK by running: " "`gcloud auth application-default login` and leave this parameter blank. " "For more information go to: " "https://googleapis.dev/python/google-api-core/latest/auth.html"), required=False) parser.add_argument('-n', '--ci_build_number', help="CircleCi build number (will be used as hash revision at index file)", required=True) parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index", required=True) parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.", required=False) parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.") parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True) parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs", required=True) parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket", required=True) # disable-secrets-detection-end return parser.parse_args() if __name__ == '__main__': main()
[ 11748, 640, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 4423, 346, 198, 11748, 33918, 198, 11748, 1822, 29572, 198, 6738, 19974, 7753, 1330, 38636, 8979, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, 6738, 4818, 8079, 1330, 4818, 8...
2.516508
1,787
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 628 ]
3.888889
9
dataset_type = 'FlyingChairs' data_root = 'data/FlyingChairs_release' img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False) global_transform = dict( translates=(0.05, 0.05), zoom=(1.0, 1.5), shear=(0.86, 1.16), rotate=(-10., 10.)) relative_transform = dict( translates=(0.00375, 0.00375), zoom=(0.985, 1.015), shear=(1.0, 1.0), rotate=(-1.0, 1.0)) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict( type='ColorJitter', brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5), dict(type='RandomGamma', gamma_range=(0.7, 1.5)), dict(type='Normalize', **img_norm_cfg), dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)), dict(type='RandomFlip', prob=0.5, direction='horizontal'), dict(type='RandomFlip', prob=0.5, direction='vertical'), dict( type='RandomAffine', global_transform=global_transform, relative_transform=relative_transform), dict(type='RandomCrop', crop_size=(320, 448)), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['imgs', 'flow_gt'], meta_keys=[ 'img_fields', 'ann_fields', 'filename1', 'filename2', 'ori_filename1', 'ori_filename2', 'filename_flow', 'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg' ]), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='InputResize', exponent=6), dict(type='Normalize', **img_norm_cfg), dict(type='TestFormatBundle'), dict( type='Collect', keys=['imgs'], meta_keys=[ 'flow_gt', 'filename1', 'filename2', 'ori_filename1', 'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg', 'scale_factor', 'pad_shape' ]) ] flyingchairs_train = dict( type=dataset_type, pipeline=train_pipeline, data_root=data_root, split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt') data = dict( train_dataloader=dict( samples_per_gpu=1, workers_per_gpu=2, drop_last=True, persistent_workers=True), val_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), train=flyingchairs_train, val=dict( type=dataset_type, pipeline=test_pipeline, data_root=data_root, test_mode=True, split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'), test=dict( type=dataset_type, pipeline=test_pipeline, data_root=data_root, test_mode=True, split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'))
[ 19608, 292, 316, 62, 4906, 796, 705, 49095, 1925, 3468, 6, 198, 7890, 62, 15763, 796, 705, 7890, 14, 49095, 1925, 3468, 62, 20979, 6, 198, 198, 9600, 62, 27237, 62, 37581, 796, 8633, 7, 32604, 41888, 15, 1539, 657, 1539, 657, 13, ...
2.153788
1,320
# Copyright 2019 Intel Corporation. import logging from collections import namedtuple import numpy as np import six from plaidml2 import DType from plaidml2.core import TensorShape, Buffer from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib logger = logging.getLogger(__name__) def __init(): """Docstring for function plaidml2.edsl.__init""" ffi_call(lib.plaidml_edsl_init) ffi.init_once(__init, 'plaidml_edsl_init') Constraint = namedtuple('Constraint', ['lhs', 'rhs']) _ContractionPart = namedtuple('_ContractionPart', ['op', 'args']) # bind a concrete shape to this tensor def bind(self, shape): ffi_call(lib.plaidml_expr_bind_shape, self.as_ptr(), shape.as_ptr()) class TensorRef: """Docstring for class TensorRef""" def wrap_tensor(x): if isinstance(x, six.integer_types): return Tensor(expr=ffi_call(lib.plaidml_expr_int, x)) if np.issubdtype(type(x), np.integer): return Tensor(expr=ffi_call(lib.plaidml_expr_int, x.item())) if isinstance(x, float): return Tensor(expr=ffi_call(lib.plaidml_expr_float, x)) if isinstance(x, TensorDim): return Tensor(expr=ffi_call(lib.plaidml_expr_dim, x.as_ptr())) if isinstance(x, Tensor): return x raise TypeError('Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'.format( type(x), fn, args, x)) def call(fn, *args): args = [wrap_tensor(x) for x in args] raw_args = [x.as_ptr() for x in args] return Tensor(expr=ffi_call(lib.plaidml_expr_call, fn.encode(), len(args), raw_args)) def cast(x, dtype): return Tensor(expr=ffi_call(lib.plaidml_expr_cast, wrap_tensor(x).as_ptr(), dtype)) def as_bool(x): return cast(x, DType.BOOLEAN) def as_float(x, bit_size): map = { 16: DType.FLOAT16, 32: DType.FLOAT32, 64: DType.FLOAT64, } dtype = map.get(bit_size) if not dtype: raise 'Unsupport bit_size for as_float' return cast(x, dtype) def as_int(x, bit_size): map = { 8: DType.INT8, 16: DType.INT16, 32: DType.INT32, 64: DType.INT64, } dtype = map.get(bit_size) if not dtype: raise 'Unsupport bit_size for as_int' return cast(x, dtype) def as_uint(x, bit_size): map = { 8: DType.UINT8, 16: DType.UINT16, 32: DType.UINT32, 64: DType.UINT64, } dtype = map.get(bit_size) if not dtype: raise 'Unsupport bit_size for as_uint' return cast(x, dtype) def ceil(x): return call('ceil', x) def cond(lhs, rhs, true_case): return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_COND, (lhs, rhs, true_case))) def cos(x): return call('cos', x) def exp(x): return call('exp', x) def floor(x): return call('floor', x) def gather(x, y): return call('gather', x, y) def gradients(loss, variables): wrts = [x.as_ptr() for x in variables] raw_grads = ffi.new('plaidml_expr*[]', len(wrts)) ffi_call( lib.plaidml_expr_gradient, len(wrts), wrts, loss.as_ptr(), raw_grads, ) return [Tensor(expr=x) for x in raw_grads] def ident(x): return call('ident', x) def index(x, axis): return call('index', x, axis) def jacobian(loss, variables): wrts = [x.as_ptr() for x in variables] raw_grads = ffi.new('plaidml_expr*[]', len(wrts)) ffi_call( lib.plaidml_expr_jacobian, len(wrts), wrts, loss.as_ptr(), raw_grads, ) return [Tensor(expr=x) for x in raw_grads] def log(x): return call('log', x) def max(x, y): return call('max', x, y) def min(x, y): return call('min', x, y) def pow(x, y): return call('pow', x, y) def prng(state, shape): return call('prng', state, *shape) def reshape(x, dims): return call('reshape', x, *dims) def round(x): return call('round', x) def scatter(x, y, z): return call('scatter', x, y, z) def select(cond, true_case, false_case): return call('cond', cond, true_case, false_case) def shape(x): return call('shape', x) def sin(x): return call('sin', x) def sqrt(x): return call('sqrt', x) def tan(x): return call('tan', x) def tanh(x): return call('tanh', x)
[ 2, 15069, 13130, 8180, 10501, 13, 198, 198, 11748, 18931, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2237, 198, 198, 6738, 458, 1698, 4029, 17, 1330, 360, 6030, 198, 6738, 458, 1698, ...
2.170106
1,987
""" Copyright (c) 2018 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from abc import abstractmethod from functools import partial import torch.nn as nn from .rmnet_angular import RMNetAngular from .mobilefacenet import MobileFaceNet from .landnet import LandmarksNet from .se_resnet_angular import SEResNetAngular from .shufflenet_v2_angular import ShuffleNetV2Angular from .backbones.se_resnet import se_resnet50, se_resnet101, se_resnet152 from .backbones.resnet import resnet50 from .backbones.se_resnext import se_resnext50, se_resnext101, se_resnext152 models_backbones = {'rmnet': RMNetAngular, 'mobilenetv2': MobileFaceNet, 'mobilenetv2_2x': partial(MobileFaceNet, width_multiplier=2.0), 'mobilenetv2_1_5x': partial(MobileFaceNet, width_multiplier=1.5), 'resnet50': partial(SEResNetAngular, base=resnet50), 'se_resnet50': partial(SEResNetAngular, base=se_resnet50), 'se_resnet101': partial(SEResNetAngular, base=se_resnet101), 'se_resnet152': partial(SEResNetAngular, base=se_resnet152), 'se_resnext50': partial(SEResNetAngular, base=se_resnext50), 'se_resnext101': partial(SEResNetAngular, base=se_resnext101), 'se_resnext152': partial(SEResNetAngular, base=se_resnext152), 'shufflenetv2': ShuffleNetV2Angular} models_landmarks = {'landnet': LandmarksNet}
[ 37811, 198, 15069, 357, 66, 8, 2864, 8180, 10501, 198, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 921, 743, 73...
2.527181
791
""" Class description goes here. """ import json import logging
[ 198, 37811, 5016, 6764, 2925, 994, 13, 37227, 198, 198, 11748, 33918, 198, 11748, 18931, 628 ]
4.1875
16
# Orthogonal linear system solver tests from math import sqrt import numpy as np from orthogonal import orthogonal ################################################################################ # 2x2 orthogonal matrix A = np.matrix('1 1;' '1 -1', float) A = A*1.0/sqrt(2.0) # Known terms vector b = np.matrix('2; 3') # Solve the system x = orthogonal(A, b, 1) # Check if np.allclose(b, A*x) == False: raise Exception('Orthogonal test failure') ################################################################################ # 2x2 orthogonal matrix A = np.matrix('2 -2 1;' '1 2 2;' '2 1 -2', float) A = A*1.0/3.0 # Known terms vector b = np.matrix('2; 3; 4') # Solve the system x = orthogonal(A, b) # Check if np.allclose(b, A*x) == False: raise Exception('Orthogonal test failure')
[ 2, 47664, 519, 20996, 14174, 1080, 1540, 332, 5254, 198, 198, 6738, 10688, 1330, 19862, 17034, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 29617, 519, 20996, 1330, 29617, 519, 20996, 628, 198, 29113, 29113, 14468, 198, 2, 362, 8...
2.632399
321
import os import shutil from dataclasses import dataclass from datetime import datetime from typing import Dict, List, Optional from huggingface_hub import Repository from loguru import logger from prettytable import PrettyTable from .splits import TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT from .tasks import TASKS from .utils import BOLD_TAG, CYAN_TAG, GREEN_TAG, PURPLE_TAG, RESET_TAG, YELLOW_TAG, http_get, http_post from .validation import validate_file FILE_STATUS = ( " Uploaded", " Queued", " In Progress...", " Success!", " Failed: file not found", " Failed: unsupported file type", " Failed: server error", " Invalid column mapping, please fix it and re-upload the file.", ) JOB_STATUS = ( ("", "queued"), ("", "start"), ("", "data_munging"), ("", "model_training"), ("", "success"), ("", "failed"), ) PROJECT_STATUS = ( ("", "Created"), ("", "Data processing started"), ("", "Data processing successful"), ("", "Failed to download data files from the huggingface hub"), ("", "Missing 'train' or 'valid' split in data files"), ("", "Failed to process data files"), ("", "Failed to upload processed data files to the huggingface hub"), ) SPLITS = (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)
[ 11748, 28686, 198, 11748, 4423, 346, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 11, 32233, 198, 198, 6738, 46292, 2550, 62, 40140, 1330, 1432...
2.849224
451
# -*- coding: utf-8 -*- # Copyright 2012 Viewfinder Inc. All Rights Reserved. """Apple Push Notification service utilities. Original copyright for this code: https://github.com/jayridge/apnstornado TokenToBinary(): converts a hex-encoded token into a binary value CreateMessage(): formats a binary APNs message from parameters ParseResponse(): parses APNs binary response for status & identifier ErrorStatusToString(): converts error status to error message """ __author__ = 'spencer@emailscrubbed.com (Spencer Kimball)' import base64 import json import struct import time from tornado import escape _MAX_PAYLOAD_BYTES = 256 """Maximum number of bytes in the APNS payload.""" _ELLIPSIS_BYTES = escape.utf8(u'') """UTF-8 encoding of the Unicode ellipsis character.""" def _TruncateAlert(alert, max_bytes): """Converts the alert text to UTF-8 encoded JSON format, which is how the alert will be stored in the APNS payload. If the number of resulting bytes exceeds "max_bytes", then truncates the alert text at a Unicode character boundary, taking care not to split JSON escape sequences. Returns the truncated UTF-8 encoded alert text, including a trailing ellipsis character. """ alert_json = escape.utf8(json.dumps(escape.recursive_unicode(alert), ensure_ascii=False)) # Strip quotes added by JSON. alert_json = alert_json[1:-1] # Check if alert fits with no truncation. if len(alert_json) <= max_bytes: return escape.utf8(alert) # Make room for an appended ellipsis. assert max_bytes >= len(_ELLIPSIS_BYTES), 'max_bytes must be at least %d' % len(_ELLIPSIS_BYTES) max_bytes -= len(_ELLIPSIS_BYTES) # Truncate the JSON UTF8 string at a Unicode character boundary. truncated = alert_json[:max_bytes].decode('utf-8', errors='ignore') # If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep # chopping trailing characters until the truncated string is valid JSON. It may take several # tries, such as in the case where a "\u1234" sequence has been split. while True: try: alert = json.loads(u'"%s"' % truncated) break except Exception: truncated = truncated[:-1] # Return the UTF-8 encoding of the alert with the ellipsis appended to it. return escape.utf8(alert) + _ELLIPSIS_BYTES
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 2321, 3582, 22805, 3457, 13, 1439, 6923, 33876, 13, 198, 198, 37811, 16108, 23691, 42808, 2139, 20081, 13, 198, 198, 20556, 6634, 329, 428, 2438, 25, 3740, 137...
3.292199
705
r"""Training and evaluating quantum kernels =========================================== .. meta:: :property="og:description": Kernels and alignment training with Pennylane. :property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png .. related:: tutorial_kernel_based_training Kernel-based training with scikit-learn tutorial_data_reuploading_classifier Classification with data reuploading *Authors: Peter-Jan Derks, Paul Fhrmann, Elies Gil-Fuster, Tom Hubregtsen, Johannes Jakob Meyer and David Wierichs. Posted: 24 June 2021* Kernel methods are one of the cornerstones of classical machine learning. Here we are concerned with kernels that can be evaluated on quantum computers, *quantum kernels* for short. In this tutorial you will learn how to evaluate kernels, use them for classification and train them with gradient-based optimization, and all that using the functionality of PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__. The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own `QHack <https://qhack.ai/>`__ hackathon. What are kernel methods? ------------------------ To understand what a kernel method does, let's first revisit one of the simplest methods to assign binary labels to datapoints: linear classification. Imagine we want to discern two different classes of points that lie in different corners of the plane. A linear classifier corresponds to drawing a line and assigning different labels to the regions on opposing sides of the line: .. figure:: ../demonstrations/kernels_module/linear_classification.png :align: center :width: 30% We can mathematically formalize this by assigning the label :math:`y` via .. math:: y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b). The vector :math:`\boldsymbol{w}` points perpendicular to the line and thus determine its slope. The independent term :math:`b` specifies the position on the plane. In this form, linear classification can also be extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a line does not divide the entire space into two regions anymore. Instead one needs a *hyperplane*. It is immediately clear that this method is not very powerful, as datasets that are not separable by a hyperplane can't be classified without error. We can actually sneak around this limitation by performing a neat trick: if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our datapoints into a larger *feature space* and then perform linear classification there, we could actually realise non-linear classification in our original space! .. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png :align: center :width: 65% If we go back to the expression for our prediction and include the embedding, we get .. math:: y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b). We will forgo one tiny step, but it can be shown that for the purpose of optimal classification, we can choose the vector defining the decision boundary as a linear combination of the embedded datapoints :math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting this into the formula yields .. math:: y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right). This rewriting might not seem useful at first, but notice the above formula only contains inner products between vectors in the embedding space: .. math:: k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle. We call this function the *kernel*. It provides the advantage that we can often find an explicit formula for the kernel :math:`k` that makes it superfluous to actually perform the (potentially expensive) embedding :math:`\phi`. Consider for example the following embedding and the associated kernel: .. math:: \phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\ k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2. This means by just replacing the regular scalar product in our linear classification with the map :math:`k`, we can actually express much more intricate decision boundaries! This is very important, because in many interesting cases the embedding :math:`\phi` will be much costlier to compute than the kernel :math:`k`. In this demo, we will explore one particular kind of kernel that can be realized on near-term quantum computers, namely *Quantum Embedding Kernels (QEKs)*. These are kernels that arise from embedding data into the space of quantum states. We formalize this by considering a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps a datapoint :math:`\boldsymbol{x}` to the state .. math:: |\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle. The kernel value is then given by the *overlap* of the associated embedded quantum states .. math:: k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2. """ ############################################################################## # A toy problem # ------------- # In this demo, we will treat a toy problem that showcases the # inner workings of classification with quantum embedding kernels, # training variational embedding kernels and the available functionalities # to do both in PennyLane. We of course need to start with some imports: from pennylane import numpy as np import matplotlib as mpl np.random.seed(1359) ############################################################################## # And we proceed right away to create a dataset to work with, the # ``DoubleCake`` dataset. Firstly, we define two functions to enable us to # generate the data. # The details of these functions are not essential for understanding the demo, # so don't mind them if they are confusing. def _make_circular_data(num_sectors): """Generate datapoints arranged in an even circle.""" center_indices = np.array(range(0, num_sectors)) sector_angle = 2 * np.pi / num_sectors angles = (center_indices + 0.5) * sector_angle x = 0.7 * np.cos(angles) y = 0.7 * np.sin(angles) labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1 return x, y, labels ############################################################################## # Next, we define a function to help plot the ``DoubleCake`` data: def plot_double_cake_data(X, Y, ax, num_sectors=None): """Plot double cake data and corresponding sectors.""" x, y = X.T cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"]) ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s") if num_sectors is not None: sector_angle = 360 / num_sectors for i in range(num_sectors): color = ["#FF0000", "#0000FF"][(i % 2)] other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)] ax.add_artist( mpl.patches.Wedge( (0, 0), 1, i * sector_angle, (i + 1) * sector_angle, lw=0, color=color, alpha=0.1, width=0.5, ) ) ax.add_artist( mpl.patches.Wedge( (0, 0), 0.5, i * sector_angle, (i + 1) * sector_angle, lw=0, color=other_color, alpha=0.1, ) ) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_aspect("equal") ax.axis("off") return ax ############################################################################## # Let's now have a look at our dataset. In our example, we will work with # 3 sectors: import matplotlib.pyplot as plt num_sectors = 3 X, Y = make_double_cake_data(num_sectors) ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors) ############################################################################## # Defining a Quantum Embedding Kernel # ----------------------------------- # PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__ # allows for a particularly simple # implementation of Quantum Embedding Kernels. The first ingredient we # need for this is an *ansatz*, which we will construct by repeating a # layer as building block. Let's start by defining this layer: import pennylane as qml def layer(x, params, wires, i0=0, inc=1): """Building block of the embedding ansatz""" i = i0 for j, wire in enumerate(wires): qml.Hadamard(wires=[wire]) qml.RZ(x[i % len(x)], wires=[wire]) i += inc qml.RY(params[0, j], wires=[wire]) qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1]) ############################################################################## # To construct the ansatz, this layer is repeated multiple times, reusing # the datapoint ``x`` but feeding different variational # parameters ``params`` into each of them. # Together, the datapoint and the variational parameters fully determine # the embedding ansatz :math:`U(\boldsymbol{x})`. # In order to construct the full kernel circuit, we also require its adjoint # :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``. def ansatz(x, params, wires): """The embedding ansatz""" for j, layer_params in enumerate(params): layer(x, layer_params, wires, i0=j * len(wires)) adjoint_ansatz = qml.adjoint(ansatz) def random_params(num_wires, num_layers): """Generate random variational parameters in the shape for the ansatz.""" return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True) ############################################################################## # Together with the ansatz we only need a device to run the quantum circuit on. # For the purpose of this tutorial we will use PennyLane's ``default.qubit`` # device with 5 wires in analytic mode. dev = qml.device("default.qubit", wires=5, shots=None) wires = dev.wires.tolist() ############################################################################## # Let us now define the quantum circuit that realizes the kernel. We will compute # the overlap of the quantum states by first applying the embedding of the first # datapoint and then the adjoint of the embedding of the second datapoint. We # finally extract the probabilities of observing each basis state. ############################################################################## # The kernel function itself is now obtained by looking at the probability # of observing the all-zero state at the end of the kernel circuit -- because # of the ordering in ``qml.probs``, this is the first entry: def kernel(x1, x2, params): return kernel_circuit(x1, x2, params)[0] ############################################################################## # # .. note:: # An alternative way to set up the kernel circuit in PennyLane would be # to use the observable type # `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__. # This is shown in the # `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more # background information on the kernel circuit structure itself. # # Before focusing on the kernel values we have to provide values for the # variational parameters. At this point we fix the number of layers in the # ansatz circuit to :math:`6`. init_params = random_params(num_wires=5, num_layers=6) ############################################################################## # Now we can have a look at the kernel value between the first and the # second datapoint: kernel_value = kernel(X[0], X[1], init_params) print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}") ############################################################################## # The mutual kernel values between all elements of the dataset form the # *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix`` # method, which makes use of symmetry of the kernel, # :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`. # In addition, the option ``assume_normalized_kernel=True`` ensures that we do not # calculate the entries between the same datapoints, as we know them to be 1 # for our noiseless simulation. Overall this means that we compute # :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints. # To include the variational parameters, we construct a ``lambda`` function that # fixes them to the values we sampled above. init_kernel = lambda x1, x2: kernel(x1, x2, init_params) K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True) with np.printoptions(precision=3, suppress=True): print(K_init) ############################################################################## # Using the Quantum Embedding Kernel for predictions # -------------------------------------------------- # The quantum kernel alone can not be used to make predictions on a # dataset, becaues it is essentially just a tool to measure the similarity # between two datapoints. To perform an actual prediction we will make use # of scikit-learn's Support Vector Classifier (SVC). from sklearn.svm import SVC ############################################################################## # To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function # that takes two sets of datapoints and returns the associated kernel matrix. # We can make use of the function ``qml.kernels.kernel_matrix`` that provides # this functionality. It expects the kernel to not have additional parameters # besides the datapoints, which is why we again supply the variational # parameters via the ``lambda`` function from above. # Once we have this, we can let scikit-learn adjust the SVM from our Quantum # Embedding Kernel. # # .. note:: # This step does *not* modify the variational parameters in our circuit # ansatz. What it does is solving a different optimization task for the # :math:`\alpha` and :math:`b` vectors we introduced in the beginning. svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y) ############################################################################## # To see how well our classifier performs we will measure which percentage # of the dataset it classifies correctly. accuracy_init = accuracy(svm, X, Y) print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}") ############################################################################## # We are also interested in seeing what the decision boundaries in this # classification look like. This could help us spotting overfitting issues # visually in more complex data sets. To this end we will introduce a # second helper method. ############################################################################## # With that done, let's have a look at the decision boundaries for our # initial classifier: init_plot_data = plot_decision_boundaries(svm, plt.gca()) ############################################################################## # We see the outer points in the dataset can be correctly classified, but # we still struggle with the inner circle. But remember we have a circuit # with many free parameters! It is reasonable to believe we can give # values to those variational parameters which improve the overall accuracy # of our SVC. # # Training the Quantum Embedding Kernel # ------------------------------------- # # To be able to train the Quantum Embedding Kernel we need some measure of # how well it fits the dataset in question. Performing an exhaustive # search in parameter space is not a good solution because it is very # resource intensive, and since the accuracy is a discrete quantity we # would not be able to detect small improvements. # # We can, however, resort to a more specialized measure, the # *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the # similarity predicted by the quantum kernel to the actual labels of the # training data. It is based on *kernel alignment*, a similiarity measure # between two kernels with given kernel matrices :math:`K_1` and # :math:`K_2`: # # .. math:: # \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}. # # .. note:: # Seen from a more theoretical side, :math:`\operatorname{KA}` # is nothing else than the cosine of the angle between the kernel # matrices :math:`K_1` and :math:`K_2` if we see them as vectors # in the space of matrices with the Hilbert-Schmidt (or # Frobenius) scalar product # :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This # reinforces the geometric picture of how this measure relates # to objects, namely two kernels, being aligned in a vector space. # # The training data enters the picture by defining an *ideal* kernel # function that expresses the original labelling in the vector # :math:`\boldsymbol{y}` by assigning to two datapoints the product # of the corresponding labels: # # .. math:: # k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j. # # The assigned kernel is thus :math:`+1` if both datapoints lie in the # same class and :math:`-1` otherwise and its kernel matrix is simply # given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`. # The kernel-target alignment is then defined as the kernel alignment # of the kernel matrix :math:`K` generated by the # quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`: # # .. math:: # \operatorname{KTA}_{\boldsymbol{y}}(K) # = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}} # = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N} # # where :math:`N` is the number of elements in :math:`\boldsymbol{y}`, # that is the number of datapoints in the dataset. # # In summary, the kernel-target alignment effectively captures how well # the kernel you chose reproduces the actual similarities of the data. It # does have one drawback, however: having a high kernel-target alignment # is only a necessary but not a sufficient condition for a good # performance of the kernel [#Alignment]_. This means having good alignment is # guaranteed for good performance, but optimal alignment will not always # bring optimal training accuracy with it. # # Let's now come back to the actual implementation. PennyLane's # ``kernels`` module allows you to easily evaluate the kernel # target alignment: kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True) print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}") ############################################################################## # Now let's code up an optimization loop and improve the kernel-target alignment! # # We will make use of regular gradient descent optimization. To speed up # the optimization we will not use the entire training set to compute # :math:`\operatorname{KTA}` but rather # sample smaller subsets of the data at each step, we choose :math:`4` # datapoints at random. Remember that PennyLane's built-in optimizer works # to *minimize* the cost function that is given to it, which is why we # have to multiply the kernel target alignment by :math:`-1` to actually # *maximize* it in the process. # # .. note:: # Currently, the function ``qml.kernels.target_alignment`` is not # differentiable yet, making it unfit for gradient descent optimization. # We therefore first define a differentiable version of this function. def target_alignment( X, Y, kernel, assume_normalized_kernel=False, rescale_class_labels=True, ): """Kernel-target alignment between kernel and labels.""" K = qml.kernels.square_kernel_matrix( X, kernel, assume_normalized_kernel=assume_normalized_kernel, ) if rescale_class_labels: nplus = np.count_nonzero(np.array(Y) == 1) nminus = len(Y) - nplus _Y = np.array([y / nplus if y == 1 else y / nminus for y in Y]) else: _Y = np.array(Y) T = np.outer(_Y, _Y) inner_product = np.sum(K * T) norm = np.sqrt(np.sum(K * K) * np.sum(T * T)) inner_product = inner_product / norm return inner_product params = init_params opt = qml.GradientDescentOptimizer(0.2) for i in range(500): # Choose subset of datapoints to compute the KTA on. subset = np.random.choice(list(range(len(X))), 4) # Define the cost function for optimization cost = lambda _params: -target_alignment( X[subset], Y[subset], lambda x1, x2: kernel(x1, x2, _params), assume_normalized_kernel=True, ) # Optimization step params = opt.step(cost, params) # Report the alignment on the full dataset every 50 steps. if (i + 1) % 50 == 0: current_alignment = target_alignment( X, Y, lambda x1, x2: kernel(x1, x2, params), assume_normalized_kernel=True, ) print(f"Step {i+1} - Alignment = {current_alignment:.3f}") ############################################################################## # We want to assess the impact of training the parameters of the quantum # kernel. Thus, let's build a second support vector classifier with the # trained kernel: # First create a kernel with the trained parameter baked into it. trained_kernel = lambda x1, x2: kernel(x1, x2, params) # Second create a kernel matrix function using the trained kernel. trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel) # Note that SVC expects the kernel argument to be a kernel matrix function. svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y) ############################################################################## # We expect to see an accuracy improvement vs.the SVM with random # parameters: accuracy_trained = accuracy(svm_trained, X, Y) print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}") ############################################################################## # We have now achieved perfect classification! # # Following on the results that SVM's have proven good generalisation # behavior, it will be interesting to inspect the decision boundaries of # our classifier: trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca()) ############################################################################## # Indeed, we see that now not only every data instance falls within the # correct class, but also that there are no strong artifacts that would make us # distrust the model. In this sense, our approach benefits from both: on # one hand it can adjust itself to the dataset, and on the other hand # is not expected to suffer from bad generalisation. # # References # ---------- # # .. [#Training_QEKs] # # Thomas Hubregtsen, David Wierichs, Elies Gil-Fuster, Peter-Jan H. S. Derks, # Paul K. Faehrmann, and Johannes Jakob Meyer. # "Training Quantum Embedding Kernels on Near-Term Quantum Computers." # `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021. # # .. [#Alignment] # # Wang, Tinghua, Dongyan Zhao, and Shengfeng Tian. # "An overview of kernel alignment and its applications." # `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
[ 81, 37811, 44357, 290, 22232, 14821, 50207, 198, 10052, 2559, 18604, 198, 198, 492, 13634, 3712, 198, 220, 220, 220, 1058, 26745, 2625, 519, 25, 11213, 1298, 509, 44930, 290, 19114, 3047, 351, 6595, 2645, 1531, 13, 198, 220, 220, 220, ...
3.134623
7,703
#!/usr/bin/python3 import pygame import random import time ##VARIABLES TO CHANGE width = 500 height = 500 stats_height = 150 board_size = 5 window_name = "PyLoopover "+str(board_size)+"x"+str(board_size) scramble_turns = 50 t_round = 3 FPS = 30 ##DONT CHANGE THESE BOIS WHITE = (255,255,255) BLACK = (0,0,0) GREEN = (32,200,32) keys = {"w":0,"a":0,"s":0,"d":0,"q":0} last_was_Q = False def main(): gameboard = Board(board_size) pygame.init() pygame.mixer.quit() #weird workaroud #name the window & size it. pygame.display.set_caption(window_name) screen = pygame.display.set_mode((width,height+stats_height),0,32) #setup framerate pygame.time.set_timer(pygame.USEREVENT+1,int((1/FPS)*1000)) #setup event que pygame.event.set_allowed(None) #start with no events allowed pygame.event.set_allowed(pygame.USEREVENT+1) #timer event pygame.event.set_allowed(pygame.KEYDOWN) pygame.event.set_allowed(pygame.QUIT) #4 quitters #setup fonts font = pygame.font.SysFont('mono',int((width/board_size)/1.14)) font2 = pygame.font.SysFont('mono',int(stats_height/2.3)) #main l00p running = True while running: #eevveeentttss??? event = pygame.event.wait() if event.type == pygame.USEREVENT+1: #a fresh canvas screen.fill(WHITE) #draw stats time = gameboard.get_time() time_str = str( int( time[0] * (10 ** t_round) ) / (10 ** t_round) ) text_timer = font2.render("Time :"+time_str,True,time[1]) text_moves = font2.render("Moves:"+str(gameboard.moves),True,time[1]) screen.blit(text_timer,(0,height)) screen.blit(text_moves,(0,height+(stats_height/2))) #draw board gameboard.draw(screen,font) #update da screeeeeen pygame.display.update() #end the game if gameboard.is_solved() and gameboard.start_t > gameboard.end_t: gameboard.end_time() elif event.type == pygame.KEYDOWN: k = chr(event.key) #gimme a CHAR, not some weird integer domap = { "w":"gameboard.rotate_up(int(pygame.mouse.get_pos()[0]/(width/board_size)))", "a":"gameboard.rotate_right(int(pygame.mouse.get_pos()[1]/(height/board_size)))", "s":"gameboard.rotate_down(int(pygame.mouse.get_pos()[0]/(width/board_size)))", "d":"gameboard.rotate_left(int(pygame.mouse.get_pos()[1]/(height/board_size)))", "q":"gameboard.scramble(scramble_turns)" } #i guess? if k in ['w','a','s','d','q']: #starting game logic if k == "q": last_was_Q = True else: if last_was_Q: gameboard.start_time() last_was_Q = False exec(domap[k]) #end the game if gameboard.is_solved() and gameboard.start_t > gameboard.end_t: gameboard.end_time() #for quitters elif event.type == pygame.QUIT: print("Quitting...") running = False else: print("err0r, bAd 3v3nt lol") assert False if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 11748, 12972, 6057, 198, 11748, 4738, 198, 11748, 640, 198, 198, 2235, 53, 1503, 3539, 9148, 1546, 5390, 5870, 27746, 198, 10394, 796, 5323, 198, 17015, 796, 5323, 198, 34242, 62, 17015, ...
2.270008
1,237
# for continue , continue = skip!!! for i in range(1,11): if i == 6: continue; print(i); print(i); print(i); print(i); print(i);
[ 2, 329, 2555, 837, 2555, 796, 14267, 10185, 201, 198, 1640, 1312, 287, 2837, 7, 16, 11, 1157, 2599, 201, 198, 220, 220, 220, 611, 1312, 6624, 718, 25, 201, 198, 220, 220, 220, 220, 220, 220, 220, 2555, 26, 201, 198, 220, 220, 22...
1.954023
87
# ============================================================================= # # Copyright (c) 2016, Cisco Systems # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= """ NCS4K Production Packages External Names Internal Names ncs4k-full-x.iso-6.0.2 ncs4k-mini-x.iso-6.0.2 ncs4k-k9sec.pkg-6.0.2 ncs4k-mpls.pkg-6.0.2 ncs4k-mcast.pkg-6.0.2 ncs4k-mgbl.pkg-6.0.2 NCS6K Production Packages External Names Internal Names ncs6k-doc.pkg-5.2.4 ncs6k-doc-5.2.4 ncs6k-li.pkg-5.2.4 ncs6k-li-5.2.4 ncs6k-mcast.pkg-5.2.4 ncs6k-mcast-5.2.4 ncs6k-mgbl.pkg-5.2.4 ncs6k-mgbl-5.2.4 ncs6k-mini-x.iso-5.2.4 ncs6k-mini-x-5.2.4 ncs6k-mpls.pkg-5.2.4 ncs6k-mpls-5.2.4 ncs6k-sysadmin.iso-5.2.4 ncs6k-sysadmin-5.2.4 ncs6k-full-x.iso-5.2.4 ncs6k-full-x-5.2.4 ncs6k-5.2.5.CSCuy47880.smu ncs6k-5.2.5.CSCuy47880-1.0.0 <- subversion added Engineering Packages External Names Internal Names ncs6k-mcast.pkg-5.2.5.47I.DT_IMAGE ncs6k-mcast-5.2.5.47I ncs6k-mini-x.iso-6.1.0.07I.DT_IMAGE ncs6k-xr-5.2.5.47I ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i.smu ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i ASR9K-64 Production Packages - not finalized yet External Names Internal Names asr9k-mcast-x64-2.0.0.0-r611.x86_64.rpm asr9k-mcast-x64-2.0.0.0-r611 asr9k-bgp-x64-1.0.0.0-r611.x86_64.rpm asr9k-bgp-x64-1.0.0.0-r611 asr9k-mgbl-x64-3.0.0.0-r611.x86_64.rpm asr9k-mgbl-x64-3.0.0.0-r611 asr9k-full-x64.iso-6.1.1 asr9k-xr-6.1.1 asr9k-mini-x64.iso-6.1.1 asr9k-xr-6.1.1 Engineering Packages External Names Internal Names asr9k-mcast-x64-2.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mcast-x64-2.0.0.0-r61116I asr9k-bgp-x64-1.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-bgp-x64-1.0.0.0-r61116I asr9k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mgbl-x64-3.0.0.0-r61116I asr9k-full-x64.iso-6.1.1.16I.DT_IMAGE asr9k-full-x64-6.1.1.16I asr9k-mini-x64.iso-6.1.1.16I.DT_IMAGE asr9k-mini-x64-6.1.1.16I NCS5K Production Packages External Names Internal Names ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1 ncs5k-full-x.iso-6.0.1 ncs5k-xr-6.0.1 ncs5k-mini-x.iso-6.0.1 ncs5k-xr-6.0.1 ncs5k-mcast-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mcast-2.0.0.0-r601 ncs5k-mgbl-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mgbl-2.0.0.0-r601 ncs5k-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mpls-2.0.0.0-r601 ncs5k-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-k9sec-2.0.0.0-r601 ncs5k-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-isis-2.0.0.0-r601 ncs5k-ospf-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-ospf-2.0.0.0-r601 Engineering Packages External Names Internal Names ncs5k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.0.1.16I.DT_IMAGE ncs5k-mgbl-3.0.0.0-r60116I ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1.26I ncs5k-full-x.iso-6.0.1.16I.DT_IMAGE ncs5k-xr-6.0.1.16I NCS5500 Production Packages External Names Internal Names ncs5500-eigrp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-eigrp-2.0.0.0-r601 ncs5500-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-isis-2.0.0.0-r601 ncs5500-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-k9sec-2.0.0.0-r601 ncs5500-m2m-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-m2m-2.0.0.0-r601 ncs5500-mgbl-3.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mgbl-3.0.0.0-r601 ncs5500-mini-x.iso-6.0.1 ncs5500-xr-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601 ncs5500-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-2.0.0.0-r601 ncs5500-ospf-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-ospf-1.0.0.0-r601 ncs5500-parser-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-parser-1.0.0.0-r601 """ import re platforms = ['asr9k', 'ncs1k', 'ncs4k', 'ncs5k', 'ncs5500', 'ncs6k', 'xrv9k'] version_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": # 61117I or 611 or 6.1.1.17I or 6.1.1 re.compile("(?P<VERSION>(\d+\d+\d+(\d+\w+)?)|(\d+\.\d+\.\d+(\.\d+\w+)?)(?!\.\d)(?!-))"), "ncs4k ncs6k": # 5.2.4 or 5.2.4.47I re.compile("(?P<VERSION>\d+\.\d+\.\d+(\.\d+\w+)?)"), } smu_re = re.compile("(?P<SMU>CSC[a-z]{2}\d{5})") subversion_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": re.compile("-(?P<SUBVERSION>\d+\.\d+\.\d+\.\d+)-"), # 2.0.0.0 "ncs4k ncs6k": re.compile("CSC.*(?P<SUBVERSION>\d+\.\d+\.\d+?)"), # 0.0.4 } def __repr__(self): return self.package_name def __str__(self): return self.__repr__()
[ 2, 38093, 25609, 198, 2, 198, 2, 15069, 357, 66, 8, 1584, 11, 28289, 11998, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 10431...
1.587887
4,227
# -*- coding: utf-8 -*- # Copyright to Alexander Liu. # Any distrubites of this copy should inform its author. If for commercial, please inform the author for authentication. Apr 2014 import sys reload(sys) sys.setdefaultencoding('utf-8') from lxml import etree import time import json import urllib import urllib2 # For media posting from poster.encode import multipart_encode from poster.streaminghttp import register_openers # The down blow are the templates of all the responsing message valid for wechat # For more information, please visit : http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF global tpl_text global tpl_image global tpl_voice global tpl_video global tpl_music global tpl_news tpl_text = u'''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[]]></Content> </xml>''' tpl_image = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[image]]></MsgType> <Image> <MediaId><![CDATA[media_id]]></MediaId> </Image> </xml>''' tpl_voice = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[voice]]></MsgType> <Voice> <MediaId><![CDATA[media_id]]></MediaId> </Voice> </xml>''' tpl_video = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[video]]></MsgType> <Video> <MediaId><![CDATA[media_id]]></MediaId> <Title><![CDATA[title]]></Title> <Description><![CDATA[description]]></Description> </Video> </xml>''' tpl_music = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[music]]></MsgType> <Music> <Title><![CDATA[TITLE]]></Title> <Description><![CDATA[DESCRIPTION]]></Description> <MusicUrl><![CDATA[MUSIC_Url]]></MusicUrl> <HQMusicUrl><![CDATA[HQ_MUSIC_Url]]></HQMusicUrl> <ThumbMediaId><![CDATA[media_id]]></ThumbMediaId> </Music> </xml>''' tpl_news = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[news]]></MsgType> <ArticleCount>2</ArticleCount> <Articles> <item> <Title><![CDATA[title1]]></Title> <Description><![CDATA[description1]]></Description> <PicUrl><![CDATA[picurl]]></PicUrl> <Url><![CDATA[url]]></Url> </item> <item> <Title><![CDATA[title]]></Title> <Description><![CDATA[description]]></Description> <PicUrl><![CDATA[picurl]]></PicUrl> <Url><![CDATA[url]]></Url> </item> </Articles> </xml>''' # Positive response json_text = '''{ "touser":"OPENID", "msgtype":"text", "text": { "content":"Hello World" } }''' json_image = '''{ "touser":"OPENID", "msgtype":"image", "image": { "media_id":"MEDIA_ID" } }''' json_voice = '''{ "touser":"OPENID", "msgtype":"voice", "voice": { "media_id":"MEDIA_ID" } }''' json_video = '''{ "touser":"OPENID", "msgtype":"video", "video": { "media_id":"MEDIA_ID", "title":"TITLE", "description":"DESCRIPTION" } }''' json_music = '''{ "touser":"OPENID", "msgtype":"music", "music": { "title":"MUSIC_TITLE", "description":"MUSIC_DESCRIPTION", "musicurl":"MUSIC_URL", "hqmusicurl":"HQ_MUSIC_URL", "thumb_media_id":"THUMB_MEDIA_ID" } }''' json_news = '''{ "touser":"OPENID", "msgtype":"news", "news":{ "articles": [ { "title":"Happy Day", "description":"Is Really A Happy Day", "url":"URL", "picurl":"PIC_URL" }, { "title":"Happy Day", "description":"Is Really A Happy Day", "url":"URL", "picurl":"PIC_URL" } ] } }''' def getAPIToken(appid='', appsecret=''): '''Get wechat API token for cusmter service or others. If ```appid``` and ```appsecret``` are correct then a string 'token' will be return. If not , 'return None' ''' default_url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&' url = default_url + 'appid=' + appid + '&secret=' + appsecret try: a = urllib2.urlopen(url) except Exception as e: print e return None else: gotten = a.read() a_dict = json.loads(gotten) if a_dict.has_key('access_token'): return a_dict['access_token'] # means wrong appid or secret else: return None def postMessage2API(token='',messageString=''): '''Using the token, post the message to determained user. This returns a Boolean value''' url = "https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=" + token request = urllib2.Request(url, messageString) request.get_method = lambda : 'POST' try: response = urllib2.urlopen(request) except Exception as e: print e return False else: j = json.loads(response.read()) # The above works #print j # to check if the message was accepted if j['errcode'] == 0: return True else: return False
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 284, 10009, 18258, 13, 220, 198, 2, 4377, 1233, 25089, 2737, 286, 428, 4866, 815, 4175, 663, 1772, 13, 1002, 329, 5068, 11, 3387, 4175, 262, 1772, 329, 18239...
2.225588
2,509
from .color_converter import ColorConverter from .scale_converter import ScaleConverter
[ 6738, 764, 8043, 62, 1102, 332, 353, 1330, 5315, 3103, 332, 353, 198, 198, 6738, 764, 9888, 62, 1102, 332, 353, 1330, 21589, 3103, 332, 353, 198 ]
3.296296
27
from plasTeX import Command, Environment
[ 6738, 458, 292, 49568, 1330, 9455, 11, 9344, 628, 628, 628, 628, 628 ]
3.846154
13
print('\033[0;33;44mTeste\033[m') print('\033[4;33;44mTeste\033[m') print('\033[1;35;43mTeste\033[m') print('\033[7;32;40mTeste\033[m') print('\033[7;30mTeste\033[m') print(" - - - Testando os 40 - - -") print("\033[0;37;40mPreto\033[m") print("\033[0;30;41mVermelho\033[m") print("\033[0;30;42mVerde\033[m") print("\033[0;30;43mAmarelo\033[m") print("\033[0;30;44mRoxo\033[m") print("\033[0;30;45mLils\033[m") print("\033[0;30;46mTurquesa\033[m") print("\033[0;30;47mBranco\033[m") print("\033[0;36;48mFundo Transparente\033[m") print(" - - - Testando os 30 - - -") print("\033[0;37;40mTeste\033[m") print("\033[0;31;40mTeste\033[m") print("\033[0;32;40mTeste\033[m") print("\033[0;33;40mTeste\033[m") print("\033[0;34;40mTeste\033[m") print("\033[0;35;40mTeste\033[m") print("\033[0;36;40mTeste\033[m") print("\033[0;37;40mTeste\033[m") print("\033[0;38;40mTeste\033[m") print(" - - - Testando os 1 - - -") print("\033[0;30;47mTeste\033[m") print("\033[1;30;47mTexto em Negrito\033[m") print("\033[2;30;47mTeste\033[m") print("\033[3;30;47mFonta Itlica\033[m") print("\033[4;30;47mSublinhado\033[m") print("\033[5;30;47mTeste\033[m") print("\033[6;30;47mTeste\033[m") print("\033[7;30;47mTeste\033[m") print("\033[7;38;47mTeste\033[m")
[ 4798, 10786, 59, 44427, 58, 15, 26, 2091, 26, 2598, 76, 14402, 68, 59, 44427, 58, 76, 11537, 198, 4798, 10786, 59, 44427, 58, 19, 26, 2091, 26, 2598, 76, 14402, 68, 59, 44427, 58, 76, 11537, 198, 4798, 10786, 59, 44427, 58, 16, ...
1.86036
666
"""TurboGears project related information""" version = "2.4.3" description = "Next generation TurboGears" long_description=""" TurboGears brings together a best of breed python tools to create a flexible, full featured, and easy to use web framework. TurboGears 2 provides an integrated and well tested set of tools for everything you need to build dynamic, database driven applications. It provides a full range of tools for front end javascript develeopment, back database development and everything in between: * dynamic javascript powered widgets (ToscaWidgets2) * automatic JSON generation from your controllers * powerful, designer friendly XHTML based templating * object or route based URL dispatching * powerful Object Relational Mappers (SQLAlchemy) The latest development version is available in the `TurboGears Git repositories`_. .. _TurboGears Git repositories: https://github.com/TurboGears """ url="http://www.turbogears.org/" author= "Alessandro Molina, Mark Ramm, Christopher Perkins, Jonathan LaCour, Rick Copland, Alberto Valverde, Michael Pedersen and the TurboGears community" email = "amol@turbogears.org" copyright = """Copyright 2005-2020 Kevin Dangoor, Alberto Valverde, Mark Ramm, Christopher Perkins, Alessandro Molina and contributors""" license = "MIT"
[ 37811, 17483, 2127, 38, 4127, 1628, 3519, 1321, 37811, 198, 9641, 796, 366, 17, 13, 19, 13, 18, 1, 198, 11213, 796, 366, 10019, 5270, 22278, 38, 4127, 1, 198, 6511, 62, 11213, 2625, 15931, 198, 17483, 2127, 38, 4127, 6774, 1978, 257...
3.891892
333
## PRODUCE MEAN CALCULATIONS AND EXPORT AS .NPY from __future__ import print_function path = '/home/mkloewer/python/swm/' import os; os.chdir(path) # change working directory import numpy as np from scipy import sparse import time as tictoc from netCDF4 import Dataset # OPTIONS runfolder = 15 print('Calculating subgrid-EKE means from run ' + str(runfolder)) ## read data runpath = path+'data/run%04i' % runfolder skip = 5*365 e = np.load(runpath+'/e_sub.npy')[skip:,:,:] print('run %i read.' % runfolder) ## create ouputfolder try: os.mkdir(runpath+'/analysis') except: pass ## U,V,H mean em = e.mean(axis=0) print('e mean done.') ## STORING dic = dict() all_var2export = ['em'] for v in all_var2export: exec('dic[v] ='+v) np.save(runpath+'/analysis/mean_e.npy',dic) print('Everything stored.')
[ 2235, 4810, 3727, 52, 5222, 11948, 1565, 33290, 34, 6239, 18421, 5357, 7788, 15490, 7054, 764, 22182, 56, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6978, 796, 31051, 11195, 14, 28015, 5439, 413, 263, 14, 29412, 14, 2032, ...
2.526316
323
""" Module to generate solutions for Boggle grids. Andrew Gillis 22 Dec. 2009 """ from __future__ import print_function import os import sys import collections import trie if sys.version < '3': range = xrange
[ 37811, 198, 26796, 284, 7716, 8136, 329, 347, 20258, 50000, 13, 198, 198, 20508, 12981, 271, 2534, 4280, 13, 3717, 198, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 17...
3.338462
65
import logging import pytest from ocs_ci.framework.testlib import ( managed_service_required, skipif_ms_consumer, tier4, tier4a, ) from ocs_ci.ocs import constants from ocs_ci.utility import pagerduty log = logging.getLogger(__name__)
[ 11748, 18931, 198, 11748, 12972, 9288, 198, 198, 6738, 267, 6359, 62, 979, 13, 30604, 13, 9288, 8019, 1330, 357, 198, 220, 220, 220, 5257, 62, 15271, 62, 35827, 11, 198, 220, 220, 220, 14267, 361, 62, 907, 62, 49827, 11, 198, 220, ...
2.628866
97
# -*- coding: utf-8 -*- import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns from cmdstanpy import CmdStanModel #%% load data data = pd.read_csv("data/overfitting.csv", index_col = 'case_id') data.columns data.info() feature_names = data.columns.str.startswith("var_") predictors = data[data.columns[feature_names]] labels = data["Target_Practice"] ix_training = data.train == 1 training_data = predictors[ix_training] training_labels = labels[ix_training] ix_testing = data.train == 0 testing_data = predictors[ix_testing] testing_labels = labels[ix_testing] sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True) pca = prince.PCA(n_components = 2, as_array = False).fit(training_data) pca.plot_row_coordinates(training_data, color_labels = training_labels) pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name #%% Roshan Sharma model mdl_data = { # problem with JSON dump => cast to python native type 'N': ix_training.sum().tolist(), 'N2': ix_testing.sum().tolist(), 'K': feature_names.sum().tolist(), 'y': training_labels.values.tolist(), 'X': training_data.values.tolist(), 'new_X': testing_data.values.tolist(), } modelfile = "OverfittingRoshanSharma.stan" with open(modelfile, "w") as file: file.write(""" data { int N; // the number of training observations int N2; // the number of test observations int K; // the number of features int y[N]; // the response matrix[N,K] X; // the model matrix matrix[N2,K] new_X; // the matrix for the predicted values } parameters { // regression parameters real alpha; vector[K] beta; } transformed parameters { vector[N] linpred = alpha + X * beta; } model { alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008 beta ~ student_t(1, 0, 0.03); y ~ bernoulli_logit(linpred); } generated quantities { // y values predicted by the model vector[N2] y_pred = alpha + new_X * beta; } """) var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])] var_name_combi = ["alpha", "beta"] sm = CmdStanModel(stan_file = modelfile) # maximum likelihood estimation optim = sm.optimize(data = mdl_data).optimized_params_pd optim[optim.columns[~optim.columns.str.startswith("lp")]] plt.plot(optim[var_name_array[1:]].values[0]) # variational inference vb = sm.variational(data = mdl_data) vb.variational_sample.columns = vb.variational_params_dict.keys() vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))] vb.variational_params_pd[var_name_array] vb.variational_sample[var_name_array] # Markov chain Monte Carlo fit = sm.sample( data = mdl_data, show_progress = True, chains = 4, iter_sampling = 50000, iter_warmup = 10000, thin = 5 ) fit.draws().shape # iterations, chains, parameters fit.summary().loc[var_name_array] # pandas DataFrame print(fit.diagnose()) posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi} az_trace = az.from_cmdstanpy(fit) az.summary(az_trace).loc[var_name] # pandas DataFrame az.plot_trace(az_trace, var_names = ["alpha"]) az.plot_forest(az_trace, var_names = ["beta"]) sample_pred = fit.stan_variable('y_pred') # Tim Salimans model: DOES NOT WORK yet # need to figure out how to marginalize all discrete params
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 11, 19798, 292, 355, 279, 67, 11, 610, 85, 528, 355, 35560, 11, 19716, 11, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, ...
2.542097
1,354
# -*- coding: utf-8 -*- import json import threading import os import time import mats import sys import requests import traceback import re from util import debug, error
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 33918, 198, 11748, 4704, 278, 198, 11748, 28686, 198, 11748, 640, 198, 11748, 46054, 198, 11748, 25064, 198, 11748, 7007, 198, 11748, 12854, 1891, 198, 11748, ...
3.392157
51
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import os from scrapy import Request from scrapy.pipelines.images import ImagesPipeline from luoxia import settings
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 2896, 500, 534, 2378, 31108, 994, 198, 2, 198, 2, 2094, 470, 6044, 284, 751, 534, 11523, 284, 262, 7283, 3620, 62, 47, 4061, 3698, 1268, 1546, 4634, 198, 2, ...
2.906542
107
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Convolutional Neural Network Estimator for MNIST, built with tf.layers.""" from __future__ import absolute_import, division, print_function import argparse import json import os import numpy as np import tensorflow as tf def cnn_model_fn(features, labels, mode): """Model function for CNN.""" # Input Layer # Reshape X to 4-D tensor: [batch_size, width, height, channels] # MNIST images are 28x28 pixels, and have one color channel input_layer = tf.reshape(features['x'], [-1, 28, 28, 1]) # Convolutional Layer #1 # Computes 32 features using a 5x5 filter with ReLU activation. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 28, 28, 1] # Output Tensor Shape: [batch_size, 28, 28, 32] conv1 = tf.layers.conv2d( inputs=input_layer, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu ) # Pooling Layer #1 # First max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 28, 28, 32] # Output Tensor Shape: [batch_size, 14, 14, 32] pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # Convolutional Layer #2 # Computes 64 features using a 5x5 filter. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 14, 14, 32] # Output Tensor Shape: [batch_size, 14, 14, 64] conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu ) # Pooling Layer #2 # Second max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 14, 14, 64] # Output Tensor Shape: [batch_size, 7, 7, 64] pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # Flatten tensor into a batch of vectors # Input Tensor Shape: [batch_size, 7, 7, 64] # Output Tensor Shape: [batch_size, 7 * 7 * 64] pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # Dense Layer # Densely connected layer with 1024 neurons # Input Tensor Shape: [batch_size, 7 * 7 * 64] # Output Tensor Shape: [batch_size, 1024] dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) # Add dropout operation; 0.6 probability that element will be kept dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) # Logits layer # Input Tensor Shape: [batch_size, 1024] # Output Tensor Shape: [batch_size, 10] logits = tf.layers.dense(inputs=dropout, units=10) predictions = { # Generate predictions (for PREDICT and EVAL mode) 'classes': tf.argmax(input=logits, axis=1), # Add `softmax_tensor` to the graph. It is used for PREDICT and by the # `logging_hook`. 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate Loss (for both TRAIN and EVAL modes) loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Configure the Training Op (for TRAIN mode) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) train_op = optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) # Add evaluation metrics (for EVAL mode) eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predictions['classes'])} return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) if __name__ == '__main__': args, _ = _parse_args() train_data, train_labels = _load_training_data(args.train) eval_data, eval_labels = _load_testing_data(args.train) # Create the Estimator mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir) # Set up logging for predictions # Log the values in the 'Softmax' tensor with label 'probabilities' tensors_to_log = {'probabilities': 'softmax_tensor'} logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50) # Train the model train_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=100, num_epochs=None, shuffle=True ) # Evaluate the model and print results eval_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': eval_data}, y=eval_labels, num_epochs=1, shuffle=False ) train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=20000) eval_spec = tf.estimator.EvalSpec(eval_input_fn) tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec) if args.current_host == args.hosts[0]: mnist_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
[ 2, 15069, 12131, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 11074, 921, 198, 2, 743, 407, 779, 428, 2393, ...
2.499564
2,296
#! /usr/bin/env python3 # Copyright 2020 Tier IV, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math import sys from autoware_planning_msgs.msg import StopReasonArray from case_converter import pascal2snake from geometry_msgs.msg import PoseStamped import numpy as np import rclpy from rclpy.node import Node from rtree import index from self_pose_listener import SelfPoseListener if __name__ == "__main__": main(sys.argv[1:])
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 15069, 12131, 15917, 8363, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, ...
3.413428
283
import asyncio import unittest from .helpers import async_test
[ 11748, 30351, 952, 198, 11748, 555, 715, 395, 198, 6738, 764, 16794, 364, 1330, 30351, 62, 9288, 628 ]
3.555556
18
import turtle as t t.penup() gotoy = 222 t.speed(0) t.setup(988,520) t.goto(494,260) t.pendown() for counter in range(7): t.setheading(-90) rectangle(40,988,'#B22234') t.setheading(-90) t.forward(80) t.penup() t.setheading(0) t.goto(-494,260) t.pendown() rectangle(494,280,'#3C3B6E') t.goto(-474,245) for counter in range(4): for counter in range(6): star(9,5,'white') t.setheading(0) t.forward(84) t.penup() t.goto(-434,gotoy) gotoy = gotoy - 28 t.pendown() for counter in range(5): star(9,5,'white') t.setheading(0) t.forward(84) t.goto(-476,gotoy) gotoy = gotoy - 28 for counter in range(6): star(9,5,'white') t.setheading(0) t.forward(84) t.penup() t.hideturtle()
[ 11748, 28699, 355, 256, 201, 198, 201, 198, 83, 13, 3617, 929, 3419, 201, 198, 201, 198, 23442, 726, 796, 27795, 201, 198, 201, 198, 83, 13, 12287, 7, 15, 8, 201, 198, 83, 13, 40406, 7, 24, 3459, 11, 31211, 8, 201, 198, 83, 13...
1.774327
483
# Given a linked list, remove consecutive nodes that sums up to zero # https://www.careercup.com/question?id=5717797377146880 from util import * if __name__ == "__main__": s1 = [6, -6, 8, 4, -12, 9, 8, -8] s2 = [4, 6 - 10, 8, 9, 10, -19, 10, -18, 20, 25] s3 = [2, 3, -5, 10, 10, -5, -5, 20, 5, -5] samples = [s1,s2,s3] for sample in samples: head = create_linked_list(sample) print(linked_list_to_list(head)) result = remove_zero_sum(head) print(linked_list_to_list(result)) print("\n")
[ 2, 11259, 257, 6692, 1351, 11, 4781, 12785, 13760, 326, 21784, 510, 284, 6632, 198, 2, 3740, 1378, 2503, 13, 6651, 2798, 929, 13, 785, 14, 25652, 30, 312, 28, 3553, 1558, 44673, 26514, 1415, 3104, 1795, 198, 198, 6738, 7736, 1330, 1...
2.10728
261
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from knack.log import get_logger from knack.prompting import prompt_y_n from knack.util import CLIError from azure.mgmt.maps.models import ( MapsAccountCreateParameters, Sku) ACCOUNT_LOCATION = 'global' logger = get_logger(__name__)
[ 2, 16529, 1783, 10541, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 198, 2, 16529, 1783, 10541, 198,...
4.526718
131
import sys import os from tempfile import TemporaryDirectory import numpy as np import tensorflow.compat.v1 as tf tf.get_logger().setLevel('ERROR') # only show error messages from recommenders.utils.timer import Timer from recommenders.utils.constants import SEED from recommenders.models.deeprec.deeprec_utils import ( prepare_hparams ) from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab from recommenders.datasets.download_utils import maybe_download from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel # from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel # from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel # from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel # from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel #from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator #from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator print("System version: {}".format(sys.version)) print("Tensorflow version: {}".format(tf.__version__)) yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml' RANDOM_SEED = SEED # Set None for non-deterministic result # data_path = os.path.join("tests", "resources", "deeprec", "slirec") # data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2' data_path = sys.argv[1] print(os.path.abspath(data_path)) ## the path where I enter the cmd # for test train_file = os.path.join(data_path, r'train_instances.txt') valid_file = os.path.join(data_path, r'valid_instances.txt') test_file = os.path.join(data_path, r'valid.tsv') pred_file = os.path.join(data_path, r'inter_test.tsv') final_pred_file = os.path.join(data_path, r'final_test.tsv') user_vocab = os.path.join(data_path, r'user_vocab.pkl') item_vocab = os.path.join(data_path, r'item_vocab.pkl') cate_vocab = os.path.join(data_path, r'category_vocab.pkl') output_file = os.path.join(data_path, r'inter_test_output.txt') submit_file = os.path.join(data_path, r'final_test_output.txt') train_num_ngs = 9 # number of negative instances with a positive instance for training valid_num_ngs = 9 # number of negative instances with a positive instance for validation test_num_ngs = 9 # number of negative instances with a positive instance for testing _create_vocab( [train_file, valid_file], user_vocab, item_vocab, cate_vocab ) ### NOTE: ### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset. hparams = prepare_hparams(yaml_file, # user_dropout=False, embed_l2=0., layer_l2=0., enable_BN=True, ##-- True learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001 epochs=100000, EARLY_STOP=40000, batch_size=400, show_step=5000, MODEL_DIR=os.path.join(data_path, "model/"), SUMMARIES_DIR=os.path.join(data_path, "summary/"), user_vocab=user_vocab, item_vocab=item_vocab, cate_vocab=cate_vocab, need_sample=False, train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation. loss='log_loss', #'log_loss', 'softmax' max_seq_length=50, cont_feat_len=85, use_cont_feat=False, init_item_emb=False, shuffle=True ) print(hparams.values) input_creator = SequentialIterator model = SeqModel(hparams, input_creator, seed=RANDOM_SEED) # model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000')) with Timer() as train_time: model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc') print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0)) ### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test model.load_model(os.path.join(data_path, "model", 'best_model')) res_syn = model.run_eval(test_file, num_ngs=9) print(res_syn) model.predict(pred_file, output_file) model.predict(final_pred_file, submit_file) # print('Job finished. B, continue training = 20k, seq=50') # print('Job finished. B_v2, epoch=50k, seq=100') ## ASVD: 0.867497 ## GRU: 0.877529 ## SLi-Rec: 0.892736 ## B_v4: 0.8937 print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True") ## B_full_feature_v2 no cont_feat, with BN ##5k: 0.8778 ##10k: 0.8827 ##20k: 0.8848 ##25k: 0.8824 ##35k: 0.8878 ##40k: 0.8903 ##45k: 0.8876 ##50k: 0.8925 ##55k: 0.8903 ##60k: 0.8894 ##65k: 0.8904 ##70k: 0.8814 ##75k: 0.8896 ##80k: 0.8871 ##85k: 0.8920 ## with shuffle: ##5k: 0.8793 ##10k: 0.8884 ##15k: 0.8898 ##20k: 0.8923 ##25k: 0.8908 ##30k: 0.8895 ##35k: 0.8888 ##40k: 0.8913 ##45k: 0.8909 ##50k: 0.8876 ##65k: 0.8881
[ 11748, 25064, 198, 11748, 28686, 220, 198, 6738, 20218, 7753, 1330, 46042, 43055, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 13, 5589, 265, 13, 85, 16, 355, 48700, 198, 27110, 13, 1136, 62, 6404, 1362, 22446, 261...
2.242033
2,479
#!/usr/bin/env python """ ctypesgen.ctypedescs contains classes to represent a C type. All of them classes are subclasses of CtypesType. Unlike in previous versions of ctypesgen, CtypesType and its subclasses are completely independent of the parser module. The most important method of CtypesType and its subclasses is the py_string method. str(ctype) returns a string which, when evaluated in the wrapper at runtime, results in a ctypes type object. For example, a CtypesType representing an array of four integers could be created using: >>> ctype = CtypesArray(CtypesSimple("int",True,0),4) str(ctype) would evaluate to "c_int * 4". """ import warnings __docformat__ = "restructuredtext" ctypes_type_map = { # typename signed longs ("void", True, 0): "None", ("int", True, 0): "c_int", ("int", False, 0): "c_uint", ("int", True, 1): "c_long", ("int", False, 1): "c_ulong", ("char", True, 0): "c_char", ("char", False, 0): "c_ubyte", ("short", True, 0): "c_short", ("short", False, 0): "c_ushort", ("float", True, 0): "c_float", ("double", True, 0): "c_double", ("double", True, 1): "c_longdouble", ("int8_t", True, 0): "c_int8", ("__int8", True, 0): "c_int8", ("int16_t", True, 0): "c_int16", ("__int16", True, 0): "c_int16", ("int32_t", True, 0): "c_int32", ("__int32", True, 0): "c_int32", ("int64_t", True, 0): "c_int64", ("__int64", True, 0): "c_int64", ("uint8_t", True, 0): "c_uint8", ("uint16_t", True, 0): "c_uint16", ("uint32_t", True, 0): "c_uint32", ("uint64_t", True, 0): "c_uint64", ("_Bool", True, 0): "c_bool", } ctypes_type_map_python_builtin = { ("int", True, 2): "c_longlong", ("int", False, 2): "c_ulonglong", ("size_t", True, 0): "c_size_t", ("apr_int64_t", True, 0): "c_int64", ("off64_t", True, 0): "c_int64", ("apr_uint64_t", True, 0): "c_uint64", ("wchar_t", True, 0): "c_wchar", ("ptrdiff_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble ("ssize_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble ("va_list", True, 0): "c_void_p", } # This protocol is used for walking type trees. # Remove one level of indirection from funtion pointer; needed for typedefs # and function parameters. last_tagnum = 0 last_tagnum = 0
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 310, 9497, 5235, 13, 310, 4464, 276, 3798, 82, 4909, 6097, 284, 2380, 257, 327, 2099, 13, 1439, 286, 606, 198, 37724, 389, 850, 37724, 286, 327, 19199, 6030, 13, 198...
2.406953
978
from random import randint import pyxel from constants import Screen import cursors class ReachCircle(Circle): def respawn(self): self._x = randint(self._r, Screen.width - self._r) self._y = randint(self._r, Screen.height - self._r) self._r = randint(self.min_r, min(Screen.width, Screen.height) // 2) - 4 def draw(self): pyxel.circb(self._x, self._y, self._r, self._col)
[ 6738, 4738, 1330, 43720, 600, 198, 11748, 12972, 87, 417, 198, 198, 6738, 38491, 1330, 15216, 198, 11748, 13882, 669, 628, 628, 198, 198, 4871, 25146, 31560, 293, 7, 31560, 293, 2599, 628, 220, 220, 220, 825, 42929, 7, 944, 2599, 198,...
2.44186
172
from pulzarutils.utils import Utils from pulzarutils.utils import Constants from pulzarutils.messenger import Messenger from pulzarcore.core_db import DB
[ 6738, 17472, 41046, 26791, 13, 26791, 1330, 7273, 4487, 198, 6738, 17472, 41046, 26791, 13, 26791, 1330, 4757, 1187, 198, 6738, 17472, 41046, 26791, 13, 37348, 6540, 1330, 24306, 198, 6738, 17472, 89, 5605, 382, 13, 7295, 62, 9945, 1330, ...
3.690476
42
#!/usr/bin/env python2 from __future__ import print_function import atexit import logging import sys import ssg_test_suite.oscap import ssg_test_suite.virt from ssg_test_suite.rule import get_viable_profiles from ssg_test_suite.virt import SnapshotStack logging.getLogger(__name__).addHandler(logging.NullHandler()) def perform_profile_check(options): """Perform profile check. Iterate over profiles in datastream and perform scanning of unaltered VM using every profile according to input. Also perform remediation run. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) has_worked = False profiles = get_viable_profiles(options.target, options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: logging.info("Evaluation of profile {0}.".format(profile)) has_worked = True runner = options.remediate_using ssg_test_suite.oscap.run_profile(domain_ip, profile, 'initial', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'remediation', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'final', options.datastream, options.benchmark_id, runner=runner) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete() # depending on number of profiles we have either "origin" snapshot # still to be reverted (multiple profiles) or we are reverted # completely (only one profile was run)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 379, 37023, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 11748, 37786, 70, 62, 9288, 62, 2385, 578, 13, 418, 1112...
1.904336
1,453
import decimal import operator import warnings from wtforms import fields, widgets
[ 11748, 32465, 198, 11748, 10088, 198, 11748, 14601, 198, 198, 6738, 266, 83, 23914, 1330, 7032, 11, 40803, 628, 198 ]
4.3
20
import mtrain import numpy as np import pandas as pd import random def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True, debug=False, players=["Random", "Greedy", "Probability", "Neural"], file_name="PlayData/data4_12_250"): """ Runs the mexican train game repeatedly with different combinations of players to generate data to be used in testing and training the neural net. If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use The format for the file name for this is as follows: PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx This spreadsheet is to be used when training the neural net. This script has no required parameters, and will run the game with the default params if unchanged. If collect_data is on, the players are selected randomly each game from: ["Random", "Greedy", "Probability"] If collect_data is off, the players are selected in order from the parameter players. When collect_data is off: len(players) must equal num_players Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players """ #Sets column names for building dataframe later on column_names = ["round_number", "turn_number", "player_number", "play", "t_num", "hand", "unknown", "potential_plays", "points"] #Depending on mode of use, sets players and checks validity of player values modes = [] if collect_data: modes = ["Random", "Greedy", "Probability"] else: if not len(players) == num_players: raise RuntimeError("len(players) must equal num_players when collect_data is off") modes = players #Simulates num_games of games scores = np.ndarray((num_players, num_games)) wins = np.ndarray((num_players, num_games)) full_data = pd.DataFrame(columns=column_names) current_index = 0 for game_num in range(0, num_games): #Randomize players if in collect_data mode game_modes = [] if collect_data: for select in range(0, num_players): game_modes.append(random.choice(modes)) else: game_modes = modes #Run game with parameters results = mtrain.mexicantrain(num_players, domino_size, debug=debug, modes=game_modes, data_collection=collect_data, data_index=current_index, file_name=file_name) #If collecting data, data is stored into the dataframe if collect_data: current_index = results[2].index[-1] + 1 full_data = pd.concat([full_data, results[2]]) #Scores and wins are recorded into their respective arrays for player_num in range(0, num_players): scores[player_num, game_num] = results[0][player_num] if results[1] == player_num: wins[player_num, game_num] = 1 else: wins[player_num, game_num] = 0 #Calculates performance of the players score_averages = np.ndarray((num_players)) win_percentage = np.ndarray((num_players)) for player_num in range(0, num_players): score_averages[player_num] = np.mean(scores[player_num, :]) win_percentage[player_num] = np.mean(wins[player_num, :]) #If collecting data, prints data to a .xlsx file if collect_data: filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx" writer = pd.ExcelWriter(filename) full_data.to_excel(writer, "Sheet1") writer.save() #Prints results and returns them as well if debug: print(score_averages) if debug: print(win_percentage) return score_averages, win_percentage
[ 11748, 285, 27432, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 4738, 198, 198, 4299, 29308, 62, 19966, 7, 22510, 62, 32399, 28, 19, 11, 2401, 2879, 62, 7857, 28, 1065, 11, 997, 62, 19966, 28...
2.443963
1,615
__all__ = ("DottedMarkupLanguageException", "DecodeError")
[ 834, 439, 834, 796, 5855, 35, 8426, 9704, 929, 32065, 16922, 1600, 366, 10707, 1098, 12331, 4943, 628, 198 ]
3.210526
19
############################################################################## # # Below code is inspired on # https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py # -------------------------------------------------------- # Detectron2 # Licensed under the Apache 2.0 license. # -------------------------------------------------------- from fvcore.common.file_io import PathManager import os import numpy as np import xml.etree.ElementTree as ET from detectron2.structures import BoxMode from detectron2.data import DatasetCatalog, MetadataCatalog __all__ = ["register_licenseplates_voc"] CLASS_NAMES = [ "license_plate", ] def load_voc_instances(dirname: str, split: str): """ Load licenseplates VOC detection annotations to Detectron2 format. Args: dirname: Contain "annotations", "images" split (str): one of "train", "test" """ with PathManager.open(os.path.join(dirname, split + ".txt")) as f: fileids = np.loadtxt(f, dtype=np.str) dicts = [] for fileid in fileids: anno_file = os.path.join(dirname, "annotations", fileid + ".xml") jpeg_file = os.path.join(dirname, "images", fileid + ".jpg") tree = ET.parse(anno_file) r = { "file_name": jpeg_file, "image_id": fileid, "height": int(tree.findall("./size/height")[0].text), "width": int(tree.findall("./size/width")[0].text), } instances = [] for obj in tree.findall("object"): cls = obj.find("name").text bbox = obj.find("bndbox") bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] instances.append( {"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} ) r["annotations"] = instances dicts.append(r) return dicts if __name__ == "__main__": import random import cv2 from detectron2.utils.visualizer import Visualizer import argparse # Parse command line arguments ap = argparse.ArgumentParser() ap.add_argument("--split", default="train") ap.add_argument("--samples", type=int, default=10) ap.add_argument("--scale", type=float, default=1.0) args = ap.parse_args() dataset_name = f"licenseplates_{args.split}" register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split) dataset_dicts = DatasetCatalog.get(dataset_name) for d in random.sample(dataset_dicts, args.samples): img = cv2.imread(d["file_name"]) visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(dataset_name), scale=args.scale) vis = visualizer.draw_dataset_dict(d) cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1]) # Exit? Press ESC if cv2.waitKey(0) & 0xFF == 27: break cv2.destroyAllWindows()
[ 29113, 29113, 7804, 4242, 2235, 198, 2, 198, 2, 10383, 2438, 318, 7867, 319, 198, 2, 3740, 1378, 12567, 13, 785, 14, 19024, 34033, 14, 15255, 478, 1313, 17, 14, 2436, 672, 14, 9866, 14, 15255, 478, 1313, 17, 14, 7890, 14, 19608, 2...
2.359937
1,278
import glob import time import random filelist = glob.glob('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*') random.shuffle(filelist) begin = time.time() for i, f in enumerate(filelist): if i == 10000: break with open(f, "rb") as fin: result = fin.read() end = time.time() print("%.1f images/s" % (10000 / (end - begin)))
[ 11748, 15095, 198, 11748, 640, 198, 11748, 4738, 628, 198, 7753, 4868, 796, 15095, 13, 4743, 672, 10786, 14, 76, 429, 14, 38878, 260, 14, 6607, 88, 2797, 5488, 16, 14, 19608, 292, 1039, 14, 320, 11286, 316, 14, 27432, 15211, 15211, ...
2.424658
146
# Copyright (C) 2015-2019 Tormod Landet # SPDX-License-Identifier: Apache-2.0 import dolfin from . import register_boundary_condition, BoundaryConditionCreator from ocellaris.utils import ( CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition, )
[ 2, 15069, 357, 34, 8, 1853, 12, 23344, 309, 579, 375, 6379, 316, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 198, 11748, 288, 4024, 259, 198, 6738, 764, 1330, 7881, 62, 7784, 560, 62, 31448, 11...
2.859813
107
import unittest from count_split_inversions import count_inversions if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 954, 62, 35312, 62, 259, 47178, 1330, 954, 62, 259, 47178, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 555, 715, 395, 13, 12417, 3419, 198 ]
2.925
40
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest import tempfile import os from unittest import TestCase import numpy as np import tensorflow as tf if __name__ == '__main__': pytest.main([__file__])
[ 2, 198, 2, 15069, 1584, 383, 4403, 19260, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13...
3.546729
214
import time import krpc conn = krpc.connect(name='Sub-orbital flight') vessel = conn.space_center.active_vessel vessel.auto_pilot.target_pitch_and_heading(90, 90) vessel.auto_pilot.engage() vessel.control.throttle = 1 time.sleep(1) print('Launch!') vessel.control.activate_next_stage() fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel') expr = conn.krpc.Expression.less_than( conn.krpc.Expression.call(fuel_amount), conn.krpc.Expression.constant_float(0.1)) event = conn.krpc.add_event(expr) with event.condition: event.wait() print('Booster separation') vessel.control.activate_next_stage() mean_altitude = conn.get_call(getattr, vessel.flight(), 'mean_altitude') expr = conn.krpc.Expression.greater_than( conn.krpc.Expression.call(mean_altitude), conn.krpc.Expression.constant_double(10000)) event = conn.krpc.add_event(expr) with event.condition: event.wait() print('Gravity turn') vessel.auto_pilot.target_pitch_and_heading(60, 90) apoapsis_altitude = conn.get_call(getattr, vessel.orbit, 'apoapsis_altitude') expr = conn.krpc.Expression.greater_than( conn.krpc.Expression.call(apoapsis_altitude), conn.krpc.Expression.constant_double(100000)) event = conn.krpc.add_event(expr) with event.condition: event.wait() print('Launch stage separation') vessel.control.throttle = 0 time.sleep(1) vessel.control.activate_next_stage() vessel.auto_pilot.disengage() srf_altitude = conn.get_call(getattr, vessel.flight(), 'surface_altitude') expr = conn.krpc.Expression.less_than( conn.krpc.Expression.call(srf_altitude), conn.krpc.Expression.constant_double(1000)) event = conn.krpc.add_event(expr) with event.condition: event.wait() vessel.control.activate_next_stage() while vessel.flight(vessel.orbit.body.reference_frame).vertical_speed < -0.1: print('Altitude = %.1f meters' % vessel.flight().surface_altitude) time.sleep(1) print('Landed!')
[ 11748, 640, 198, 11748, 479, 81, 14751, 198, 37043, 796, 479, 81, 14751, 13, 8443, 7, 3672, 11639, 7004, 12, 27688, 1287, 5474, 11537, 198, 198, 1158, 741, 796, 48260, 13, 13200, 62, 16159, 13, 5275, 62, 1158, 741, 198, 198, 1158, 7...
2.632877
730
#!/usr/bin/env python3 import numpy as np if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.342857
35
from distutils.version import LooseVersion from itertools import product import numpy as np import pandas as pd from ..model.event import Event from ..model.event import EventTeam from ..model.submission import Submission from ..model.team import Team from .team import get_event_team_by_name from .submission import get_bagged_scores from .submission import get_scores from .submission import get_submission_max_ram from .submission import get_time width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None pd.set_option('display.max_colwidth', width) def _compute_leaderboard(session, submissions, leaderboard_type, event_name, with_links=True): """Format the leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. submissions : list of :class:`ramp_database.model.Submission` The submission to report in the leaderboard. leaderboard_type : {'public', 'private'} The type of leaderboard to built. event_name : str The name of the event. with_links : bool Whether or not the submission name should be clickable. Returns ------- leaderboard : dataframe The leaderboard in a dataframe format. """ record_score = [] event = session.query(Event).filter_by(name=event_name).one() map_score_precision = {score_type.name: score_type.precision for score_type in event.score_types} for sub in submissions: # take only max n bag df_scores_bag = get_bagged_scores(session, sub.id) highest_level = df_scores_bag.index.get_level_values('n_bag').max() df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :] df_scores_bag.index = df_scores_bag.index.droplevel('n_bag') df_scores_bag = df_scores_bag.round(map_score_precision) df_scores = get_scores(session, sub.id) df_scores = df_scores.round(map_score_precision) df_time = get_time(session, sub.id) df_time = df_time.stack().to_frame() df_time.index = df_time.index.set_names(['fold', 'step']) df_time = df_time.rename(columns={0: 'time'}) df_time = df_time.sum(axis=0, level="step").T df_scores_mean = df_scores.groupby('step').mean() df_scores_std = df_scores.groupby('step').std() # select only the validation and testing steps and rename them to # public and private map_renaming = {'valid': 'public', 'test': 'private'} df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())] .rename(index=map_renaming) .stack().to_frame().T) df_scores_std = (df_scores_std.loc[list(map_renaming.keys())] .rename(index=map_renaming) .stack().to_frame().T) df_scores_bag = (df_scores_bag.rename(index=map_renaming) .stack().to_frame().T) df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1, keys=['bag', 'mean', 'std']) df.columns = df.columns.set_names(['stat', 'set', 'score']) # change the multi-index into a stacked index df.columns = df.columns.map(lambda x: " ".join(x)) # add the aggregated time information df_time.index = df.index df_time = df_time.rename( columns={'train': 'train time [s]', 'valid': 'validation time [s]', 'test': 'test time [s]'} ) df = pd.concat([df, df_time], axis=1) if leaderboard_type == 'private': df['submission ID'] = sub.basename.replace('submission_', '') df['team'] = sub.team.name df['submission'] = sub.name_with_link if with_links else sub.name df['contributivity'] = int(round(100 * sub.contributivity)) df['historical contributivity'] = int(round( 100 * sub.historical_contributivity)) df['max RAM [MB]'] = get_submission_max_ram(session, sub.id) df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp) record_score.append(df) # stack all the records df = pd.concat(record_score, axis=0, ignore_index=True, sort=False) # keep only second precision for the time stamp df['submitted at (UTC)'] = df['submitted at (UTC)'].astype('datetime64[s]') # reordered the column stats_order = (['bag', 'mean', 'std'] if leaderboard_type == 'private' else ['bag']) dataset_order = (['public', 'private'] if leaderboard_type == 'private' else ['public']) score_order = ([event.official_score_name] + [score_type.name for score_type in event.score_types if score_type.name != event.official_score_name]) score_list = [ '{} {} {}'.format(stat, dataset, score) for dataset, score, stat in product(dataset_order, score_order, stats_order) ] # Only display train and validation time for the public leaderboard time_list = (['train time [s]', 'validation time [s]', 'test time [s]'] if leaderboard_type == 'private' else ['train time [s]', 'validation time [s]']) col_ordered = ( ['team', 'submission'] + score_list + ['contributivity', 'historical contributivity'] + time_list + ['max RAM [MB]', 'submitted at (UTC)'] ) if leaderboard_type == "private": col_ordered = ["submission ID"] + col_ordered df = df[col_ordered] # check if the contributivity columns are null contrib_columns = ['contributivity', 'historical contributivity'] if (df[contrib_columns] == 0).all(axis=0).all(): df = df.drop(columns=contrib_columns) df = df.sort_values( "bag {} {}".format(leaderboard_type, event.official_score_name), ascending=event.get_official_score_type(session).is_lower_the_better ) # rename the column name for the public leaderboard if leaderboard_type == 'public': df = df.rename(columns={ key: value for key, value in zip(score_list, score_order) }) return df def _compute_competition_leaderboard(session, submissions, leaderboard_type, event_name): """Format the competition leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. submissions : list of :class:`ramp_database.model.Submission` The submission to report in the leaderboard. leaderboard_type : {'public', 'private'} The type of leaderboard to built. event_name : str The name of the event. Returns ------- competition_leaderboard : dataframe The competition leaderboard in a dataframe format. """ event = session.query(Event).filter_by(name=event_name).one() score_type = event.get_official_score_type(session) score_name = event.official_score_name private_leaderboard = _compute_leaderboard(session, submissions, 'private', event_name, with_links=False) time_list = (['train time [s]', 'validation time [s]', 'test time [s]'] if leaderboard_type == 'private' else ['train time [s]', 'validation time [s]']) col_selected_private = (['team', 'submission'] + ['bag private ' + score_name, 'bag public ' + score_name] + time_list + ['submitted at (UTC)']) leaderboard_df = private_leaderboard[col_selected_private] leaderboard_df = leaderboard_df.rename( columns={'bag private ' + score_name: 'private ' + score_name, 'bag public ' + score_name: 'public ' + score_name} ) # select best submission for each team best_df = (leaderboard_df.groupby('team').min() if score_type.is_lower_the_better else leaderboard_df.groupby('team').max()) best_df = best_df[['public ' + score_name]].reset_index() best_df['best'] = True # merge to get a best indicator column then select best leaderboard_df = pd.merge( leaderboard_df, best_df, how='left', left_on=['team', 'public ' + score_name], right_on=['team', 'public ' + score_name] ) leaderboard_df = leaderboard_df.fillna(False) leaderboard_df = leaderboard_df[leaderboard_df['best']] leaderboard_df = leaderboard_df.drop(columns='best') # dealing with ties: we need the lowest timestamp best_df = leaderboard_df.groupby('team').min() best_df = best_df[['submitted at (UTC)']].reset_index() best_df['best'] = True leaderboard_df = pd.merge( leaderboard_df, best_df, how='left', left_on=['team', 'submitted at (UTC)'], right_on=['team', 'submitted at (UTC)']) leaderboard_df = leaderboard_df.fillna(False) leaderboard_df = leaderboard_df[leaderboard_df['best']] leaderboard_df = leaderboard_df.drop(columns='best') # sort by public score then by submission timestamp, compute rank leaderboard_df = leaderboard_df.sort_values( by=['public ' + score_name, 'submitted at (UTC)'], ascending=[score_type.is_lower_the_better, True]) leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1 # sort by private score then by submission timestamp, compute rank leaderboard_df = leaderboard_df.sort_values( by=['private ' + score_name, 'submitted at (UTC)'], ascending=[score_type.is_lower_the_better, True]) leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1 leaderboard_df['move'] = \ leaderboard_df['public rank'] - leaderboard_df['private rank'] leaderboard_df['move'] = [ '{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']] col_selected = ( [leaderboard_type + ' rank', 'team', 'submission', leaderboard_type + ' ' + score_name] + time_list + ['submitted at (UTC)'] ) if leaderboard_type == 'private': col_selected.insert(1, 'move') df = leaderboard_df[col_selected] df = df.rename(columns={ leaderboard_type + ' ' + score_name: score_name, leaderboard_type + ' rank': 'rank' }) df = df.sort_values(by='rank') return df def get_leaderboard(session, leaderboard_type, event_name, user_name=None, with_links=True): """Get a leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. leaderboard_type : {'public', 'private', 'failed', 'new', \ 'public competition', 'private competition'} The type of leaderboard to generate. event_name : str The event name. user_name : None or str, default is None The user name. If None, scores from all users will be queried. This parameter is discarded when requesting the competition leaderboard. with_links : bool, default is True Whether or not the submission name should be clickable. Returns ------- leaderboard : str The leaderboard in HTML format. """ q = (session.query(Submission) .filter(Event.id == EventTeam.event_id) .filter(Team.id == EventTeam.team_id) .filter(EventTeam.id == Submission.event_team_id) .filter(Event.name == event_name)) if user_name is not None: q = q.filter(Team.name == user_name) submissions = q.all() submission_filter = {'public': 'is_public_leaderboard', 'private': 'is_private_leaderboard', 'failed': 'is_error', 'new': 'is_new', 'public competition': 'is_in_competition', 'private competition': 'is_in_competition'} submissions = [sub for sub in submissions if (getattr(sub, submission_filter[leaderboard_type]) and sub.is_not_sandbox)] if not submissions: return None if leaderboard_type in ['public', 'private']: df = _compute_leaderboard( session, submissions, leaderboard_type, event_name, with_links=with_links ) elif leaderboard_type in ['new', 'failed']: if leaderboard_type == 'new': columns = ['team', 'submission', 'submitted at (UTC)', 'state'] else: columns = ['team', 'submission', 'submitted at (UTC)', 'error'] # we rely on the zip function ignore the submission state if the error # column was not appended data = [{ column: value for column, value in zip( columns, [sub.event_team.team.name, sub.name_with_link, pd.Timestamp(sub.submission_timestamp), (sub.state_with_link if leaderboard_type == 'failed' else sub.state)]) } for sub in submissions] df = pd.DataFrame(data, columns=columns) else: # make some extra filtering submissions = [sub for sub in submissions if sub.is_public_leaderboard] if not submissions: return None competition_type = ('public' if 'public' in leaderboard_type else 'private') df = _compute_competition_leaderboard( session, submissions, competition_type, event_name ) df_html = df.to_html(escape=False, index=False, max_cols=None, max_rows=None, justify='left') df_html = '<thead> {} </tbody>'.format( df_html.split('<thead>')[1].split('</tbody>')[0] ) return df_html def update_leaderboards(session, event_name, new_only=False): """Update the leaderboards for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. new_only : bool, default is False Whether or not to update the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event = session.query(Event).filter_by(name=event_name).one() if not new_only: event.private_leaderboard_html = get_leaderboard( session, 'private', event_name ) event.public_leaderboard_html_with_links = get_leaderboard( session, 'public', event_name ) event.public_leaderboard_html_no_links = get_leaderboard( session, 'public', event_name, with_links=False ) event.failed_leaderboard_html = get_leaderboard( session, 'failed', event_name ) event.public_competition_leaderboard_html = get_leaderboard( session, 'public competition', event_name ) event.private_competition_leaderboard_html = get_leaderboard( session, 'private competition', event_name ) event.new_leaderboard_html = get_leaderboard( session, 'new', event_name ) session.commit() def update_user_leaderboards(session, event_name, user_name, new_only=False): """Update the of a user leaderboards for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. user_name : str The user name. If None, scores from all users will be queried. new_only : bool, default is False Whether or not to update the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event_team = get_event_team_by_name(session, event_name, user_name) if not new_only: event_team.leaderboard_html = get_leaderboard( session, 'public', event_name, user_name ) event_team.failed_leaderboard_html = get_leaderboard( session, 'failed', event_name, user_name ) event_team.new_leaderboard_html = get_leaderboard( session, 'new', event_name, user_name ) session.commit() def update_all_user_leaderboards(session, event_name, new_only=False): """Update the leaderboards for all users for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. new_only : bool, default is False Whether or not to update the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event = session.query(Event).filter_by(name=event_name).one() event_teams = session.query(EventTeam).filter_by(event=event).all() for event_team in event_teams: user_name = event_team.team.name if not new_only: event_team.leaderboard_html = get_leaderboard( session, 'public', event_name, user_name ) event_team.failed_leaderboard_html = get_leaderboard( session, 'failed', event_name, user_name ) event_team.new_leaderboard_html = get_leaderboard( session, 'new', event_name, user_name ) session.commit()
[ 6738, 1233, 26791, 13, 9641, 1330, 6706, 577, 14815, 198, 6738, 340, 861, 10141, 1330, 1720, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 11485, 19849, 13, 15596, 1330, 8558, 198, 6738, ...
2.331092
7,735
#Automate the Boring Stuff with Python import time, sys indent = 0 # How many spaces to indent indent_Increasing = True # Whether the indentation is increasing or not try: while True: # The main program loop print(' ' * indent, end='') print('********') time.sleep(0.1) # Pause for 1/10th of a second if indent_Increasing: indent = indent + 1 if indent == 20: indent_Increasing = False else: indent = indent - 1 if indent == 0: indent_Increasing = True except KeyboardInterrupt(): sys.exit()
[ 2, 38062, 378, 262, 347, 3255, 27864, 351, 11361, 198, 198, 11748, 640, 11, 25064, 198, 521, 298, 796, 657, 1303, 1374, 867, 9029, 284, 33793, 198, 521, 298, 62, 15562, 2313, 796, 6407, 1303, 10127, 262, 33793, 341, 318, 3649, 393, ...
2.342105
266
import getpass import sys import json from reflowrestclient.utils import * host = raw_input('Host: ') username = raw_input('Username: ') password = getpass.getpass('Password: ') token = get_token(host, username, password) if token: print "Authentication successful" print '=' * 40 else: print "No token for you!!!" sys.exit() while True: start()
[ 11748, 651, 6603, 198, 11748, 25064, 198, 11748, 33918, 198, 6738, 1006, 9319, 2118, 16366, 13, 26791, 1330, 1635, 198, 198, 4774, 796, 8246, 62, 15414, 10786, 17932, 25, 705, 8, 198, 29460, 796, 8246, 62, 15414, 10786, 5842, 13292, 25,...
2.890625
128
from django.conf import settings from django.conf.urls import url, static from . import views from . import jobs urlpatterns = [ url(r'^choose_company/(?P<company_id>.*)/$', views.choose_company, name='choose_company'), url(r'^cleanlogs/$', jobs.cleanlogs, name='cleanlogs'), url(r'^primecache/$', jobs.primecache, name='primecache'), url(r'^dump_fixtures/$', views.dump_fixtures), ]
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 19016, 11, 9037, 198, 198, 6738, 764, 1330, 5009, 198, 6738, 764, 1330, 3946, 628, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, ...
2.591195
159
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="i3-workspace-swap", description='A python utility swap the content of two workplaces in i3wm', long_description=long_description, long_description_content_type="text/markdown", version="1.1.0", url='https://github.com/einzigartigername/i3-workspace-swap', license='MIT', author='Nelson Gillo', author_email='nelson.gillo@gmx.de', packages=setuptools.find_packages(), scripts=['i3-workspace-swap'], install_requires=['i3ipc'], classifiers=[ "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: MIT License", "Operating System :: POSIX :: Linux", 'Programming Language :: Python :: 3' ], python_requires='>=3.6', )
[ 11748, 900, 37623, 10141, 198, 198, 4480, 1280, 7203, 15675, 11682, 13, 9132, 1600, 366, 81, 4943, 355, 277, 71, 25, 198, 220, 220, 220, 890, 62, 11213, 796, 277, 71, 13, 961, 3419, 198, 198, 2617, 37623, 10141, 13, 40406, 7, 198, ...
2.524096
332
# coding: utf-8 """ Cisco Intersight Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501 The version of the OpenAPI document: 1.0.9-1295 Contact: intersight@cisco.com Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from intersight.configuration import Configuration def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NiaapiVersionRegexAllOf): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, NiaapiVersionRegexAllOf): return True return self.to_dict() != other.to_dict()
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 37811, 198, 220, 220, 220, 28289, 554, 1010, 432, 628, 220, 220, 220, 28289, 554, 1010, 432, 318, 257, 4542, 3859, 6793, 355, 257, 2139, 351, 14553, 23696, 329, 534, 28289, 290, 513, 4372, 2151, ...
3.531802
566
# # Copyright 2004-2016, by the California Institute of Technology. # ALL RIGHTS RESERVED. United States Government Sponsorship # acknowledged. Any commercial use must be negotiated with the Office # of Technology Transfer at the California Institute of Technology. # # This software may be subject to U.S. export control laws and # regulations. By accepting this document, the user agrees to comply # with all U.S. export laws and regulations. User has the # responsibility to obtain export licenses, or other export authority # as may be required before exporting such information to foreign # countries or providing access to foreign persons. # from __future__ import print_function import os from genmsg import MsgGenerationException #from . name import * ## :param type_name outdir: Full path to output directory ## :returns int: status. 0 if successful
[ 2, 198, 2, 220, 220, 15069, 5472, 12, 5304, 11, 416, 262, 3442, 5136, 286, 8987, 13, 198, 2, 220, 220, 11096, 371, 34874, 15731, 1137, 53, 1961, 13, 1578, 1829, 5070, 18972, 11094, 198, 2, 220, 220, 10810, 13, 4377, 5068, 779, 127...
3.973094
223
import pytest from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion def test_raise_on_incomplete_markdown_cell(): ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')]) test = new_notebook(cells=[new_markdown_cell('Cell one')]) with pytest.raises(NotebookDifference): compare_notebooks(ref, test, 'md') def test_does_raise_on_split_markdown_cell(): ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')]) test = new_notebook(cells=[new_markdown_cell('Cell one'), new_markdown_cell('second line')]) with pytest.raises(NotebookDifference): compare_notebooks(ref, test, 'md') def test_raise_on_different_cell_metadata(): ref = new_notebook(cells=[new_code_cell('1+1')]) test = new_notebook(cells=[new_code_cell('1+1', metadata={'metakey': 'value'})]) with pytest.raises(NotebookDifference): compare_notebooks(ref, test, 'py:light')
[ 11748, 12972, 9288, 198, 6738, 299, 65, 18982, 13, 85, 19, 13, 46803, 8692, 1330, 649, 62, 11295, 2070, 11, 649, 62, 4102, 2902, 62, 3846, 11, 649, 62, 8189, 62, 3846, 11, 649, 62, 1831, 62, 3846, 198, 6738, 474, 929, 88, 5239, ...
2.511062
452
import asyncio import discord import random import datetime from discord.ext import commands from Cogs import DisplayName from Cogs import Nullify
[ 11748, 30351, 952, 198, 11748, 36446, 198, 11748, 4738, 198, 11748, 4818, 8079, 198, 6738, 220, 220, 36446, 13, 2302, 1330, 9729, 198, 6738, 220, 220, 327, 18463, 1330, 16531, 5376, 198, 6738, 220, 220, 327, 18463, 1330, 35886, 1958, 19...
3.731707
41
########## # Additional dependencies are needed: # Follow the LOLA installation described in the tune_class_api/lola_pg_official.py file ########## import os import ray from ray import tune import marltoolbox.algos.lola.envs as lola_envs import marltoolbox.algos.lola_dice.envs as lola_dice_envs from marltoolbox.algos.lola import train_cg, train_exact, train_pg from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame from marltoolbox.utils import log if __name__ == "__main__": debug_mode = True main(debug_mode)
[ 7804, 2235, 198, 2, 15891, 20086, 389, 2622, 25, 198, 2, 7281, 262, 35513, 32, 9988, 3417, 287, 262, 14009, 62, 4871, 62, 15042, 14, 75, 5708, 62, 6024, 62, 16841, 13, 9078, 2393, 198, 7804, 2235, 198, 198, 11748, 28686, 198, 11748,...
2.848958
192
import string import random import json from calendar import month_name from django.conf import settings SHORTLINK_MIN = getattr(settings, "SHORTLINK_MIN", 6) def json_data_func(instance): ''' Return json format data, ready for passing into AmCharts. Contains 2 items - name of the month and count of distinct links, which were cut on the website. ''' class_ = instance.__class__ # FIXME. The problem is every next year it will add results above result = [] for month in range(1, len(month_name)): count_use = class_.objects.filter(pub_date__month=month).count() data = dict(month=month_name[month], count=count_use) result.append(data) json_data = json.dumps(result) return json_data
[ 11748, 4731, 198, 11748, 4738, 198, 11748, 33918, 198, 6738, 11845, 1330, 1227, 62, 3672, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 198, 9693, 9863, 43, 17248, 62, 23678, 796, 651, 35226, 7, 33692, 11, 366, 9693, 9863, 43, ...
2.826568
271
#!/usr/bin/python import argparse import ConfigParser import os import sys new_path = [ os.path.join( os.getcwd(), "lib" ) ] new_path.extend( sys.path[1:] ) sys.path = new_path from galaxy import eggs eggs.require( "SQLAlchemy >= 0.4" ) import galaxy.webapps.tool_shed.model.mapping as tool_shed_model from sqlalchemy.exc import ProgrammingError from sqlalchemy.exc import OperationalError from tool_shed.util import xml_util parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config_file', dest='config', action='store', default='config/tool_shed.ini.sample' ) parser.add_argument( '-e', '--execute', dest='method', action='store', default='check_db' ) args = parser.parse_args() if __name__ == '__main__': exit( main( args ) )
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 11748, 1822, 29572, 198, 11748, 17056, 46677, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 3605, 62, 6978, 796, 685, 28686, 13, 6978, 13, 22179, 7, 28686, 13, 1136, 66, 16993, 22784, 366, ...
2.907692
260
from __future__ import unicode_literals from moto.core.responses import BaseResponse from .models import dynamodbstreams_backends from six import string_types
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 285, 2069, 13, 7295, 13, 16733, 274, 1330, 7308, 31077, 198, 198, 6738, 764, 27530, 1330, 6382, 375, 65, 5532, 82, 62, 1891, 2412, 198, 6738, 2237, 1330, 4731...
3.6
45
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np from openvino.tools.mo.front.extractor import FrontExtractorOp from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs from openvino.tools.mo.ops.const import Const
[ 2, 15069, 357, 34, 8, 2864, 12, 1238, 2481, 8180, 10501, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 1280, 85, 2879, 13, 31391, 13, 5908, 13, ...
2.969697
99
from JumpScale import j b = builder() b.do()
[ 198, 198, 6738, 15903, 29990, 1330, 474, 628, 198, 198, 65, 796, 27098, 3419, 198, 65, 13, 4598, 3419, 198 ]
2.5
20
import sys import warnings import matplotlib.pyplot as plt from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot warnings.filterwarnings("ignore") MEM = IMACC(sys.stdin.read()) # Load memory from stdin PC = PROGC(0) # Start from the first instruction RF = REGFLPC() # initialize register and flags EE = ExecE(MEM) IM = IMG() halted = False cycle = 0 if MEM.inst_mem == ["0" * 16 for i in range(256)]: halted = True while not halted: Instruction = MEM.getData(PC) # Get current instruction IM.imgx.append(cycle) IM.imgy.append(PC.PC) halted, new_PC, new_regs = EE.execute(Instruction, RF.asdct(), IM, cycle) # Update RF compute new_PC RF.update(new_regs, new_PC) PC.dump() # Print PC RF.dump() # Print RF state PC.update(new_PC) # Update PC cycle += 1 MEM.dump() # Print memory state # plotting plot(plt, IM)
[ 11748, 25064, 198, 11748, 14601, 198, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 13544, 1039, 1330, 8959, 26861, 11, 8959, 38, 11, 38688, 34, 11, 23337, 3697, 5662, 11, 8393, 36, 11, 7110, 198, 198, 40539...
2.541787
347
import discord import re import emoji import contextlib import typing import datetime from discord.ext import commands from discord.http import Route def generate_snowflake(dt: typing.Optional[datetime.datetime] = None) -> int: """Returns a numeric snowflake pretending to be created at the given date but more accurate and random than time_snowflake. If No dt is not passed, it makes one from the current time using utcnow. Parameters ----------- dt: :class:`datetime.datetime` A datetime object to convert to a snowflake. If naive, the timezone is assumed to be local time. Returns -------- :class:`int` The snowflake representing the time given. """ dt = dt or discord.utils.utcnow() return int(dt.timestamp() * 1000 - 1420070400000) << 22 | 0x3FFFFF # remove if edpy adds my pull request into the master.
[ 11748, 36446, 198, 11748, 302, 198, 11748, 44805, 198, 11748, 4732, 8019, 198, 11748, 19720, 198, 11748, 4818, 8079, 198, 6738, 36446, 13, 2302, 1330, 9729, 198, 6738, 36446, 13, 4023, 1330, 18956, 628, 628, 628, 198, 198, 4299, 7716, 6...
3.072414
290
""" kissim.cli.encode Encode structures (generate fingerprints) from CLI arguments. """ import numpy as np from kissim.api import encode from kissim.cli.utils import configure_logger def encode_from_cli(args): """ Encode structures. Parameters ---------- args : argsparse.Namespace CLI arguments. """ configure_logger(args.output) structure_klifs_ids = _parse_structure_klifs_ids(args.input) encode(structure_klifs_ids, args.output, args.local, args.ncores) def _parse_structure_klifs_ids(args_input): """ Parse structure KLIFS IDs. Parameters ---------- args_input : list of str Either path to txt file with structure KLIFS ID (one ID per row) or one or more structure KLIFS IDs. Returns ------- list of int List of structure KLIFS IDs. """ if len(args_input) == 1: try: structure_klifs_ids = [int(args_input[0])] except ValueError: structure_klifs_ids = np.genfromtxt(fname=args_input[0], dtype=int).tolist() else: structure_klifs_ids = [int(i) for i in args_input] return structure_klifs_ids
[ 37811, 198, 41304, 320, 13, 44506, 13, 268, 8189, 198, 198, 4834, 8189, 8573, 357, 8612, 378, 34290, 8, 422, 43749, 7159, 13, 198, 37811, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 9245, 320, 13, 15042, 1330, 37773, 198,...
2.431535
482
import numpy as np from util import * def naiveDistanceProfile(tsA, idx, m, tsB = None): """Return the distance profile of query against ts. Use the naive all pairs comparison algorithm. >>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3) array([[ 2. , 2.828, 2. ], [ 0. , 0. , 0. ]]) """ selfJoin = False if tsB is None: selfJoin = True tsB = tsA query = tsA[idx : (idx + m)] distanceProfile = [] n = len(tsB) for i in range(n - m + 1): distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m])) if selfJoin: trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB))) distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf return (distanceProfile, np.full(n - m + 1, idx, dtype = float)) def stampDistanceProfile(tsA, idx, m, tsB = None): """ >>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3) array([[ 2. , 2.828, 2. ], [ 0. , 0. , 0. ]]) """ selfJoin = False if tsB is None: selfJoin = True tsB = tsA query = tsA[idx : (idx + m)] n = len(tsB) distanceProfile = mass(query, tsB) if selfJoin: trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB))) distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf return (distanceProfile, np.full(n - m + 1, idx, dtype = float)) if __name__ == "__main__": import doctest doctest.testmod()
[ 11748, 299, 32152, 355, 45941, 198, 6738, 7736, 1330, 1635, 198, 198, 4299, 24354, 45767, 37046, 7, 912, 32, 11, 4686, 87, 11, 285, 11, 40379, 33, 796, 6045, 2599, 198, 220, 220, 220, 37227, 13615, 262, 5253, 7034, 286, 12405, 1028, ...
2.172237
778
import pyplot as plt import numpy as np from sklearn import linear_model
[ 11748, 12972, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 1330, 14174, 62, 19849, 198 ]
3.47619
21
# encoding: UTF-8 from builtins import str import psutil # import sys # PyQt 4/5 compatibility try: from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout from PyQt4 import QtCore except ImportError: from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout from PyQt5 import QtCore from uiBasicWidget import * import uiBasicWidget as wgs #from . import uiBasicWidget as wgs ######################################################################## ########################################################################
[ 2, 21004, 25, 41002, 12, 23, 198, 198, 6738, 3170, 1040, 1330, 965, 198, 198, 11748, 26692, 22602, 198, 198, 2, 1330, 25064, 198, 2, 9485, 48, 83, 604, 14, 20, 17764, 198, 28311, 25, 198, 220, 220, 220, 422, 9485, 48, 83, 19, 13...
3.370558
197
#!/usr/bin/env python # coding: utf-8 # In[ ]: import requests import json import re from flask import Flask, request, abort import mysql.connector as mariadb from mysql.connector import Error from linebot import ( LineBotApi, WebhookHandler ) from linebot.exceptions import ( InvalidSignatureError ) from linebot.models import ( MessageEvent, TextMessage, TextSendMessage, FollowEvent, ) app = Flask(__name__) line_bot_api = LineBotApi('') handler = WebhookHandler('') #line /callbackEvent #lineEvent #notifypost/register # #codenotify-bot postaccess_token def get_token(code): headers = { "Content-Type":"application/x-www-form-urlencoded" } params = { "grant_type":"authorization_code", "code": code, "redirect_uri":"https://line.husan.cc/register", # host_ip "client_id":"client_id", #notify client_id "client_secret":"client_secret" #notify client_secret } r = requests.post('https://notify-bot.line.me/oauth/token',headers=headers,params=params) source = json.loads(r.text) access_token = source['access_token'] return access_token #notify # # # # #notify_access_token # if __name__ == "__main__": app.run('0.0.0.0',port=3000)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 2361, 25, 628, 198, 11748, 7007, 198, 11748, 33918, 198, 11748, 302, 198, 6738, 42903, 1330, 46947, 11, 2581, 11, 15614, 198, ...
2.451923
520
# coding=utf-8 from sfc_models.objects import * from sfc_models.examples.Quick2DPlot import Quick2DPlot register_standard_logs('output', __file__) mod = Model() country = Country(mod, 'CO') Household(country, 'HH') ConsolidatedGovernment(country, 'GOV') FixedMarginBusiness(country, 'BUS', profit_margin=.025) Market(country, 'GOOD') Market(country, 'LAB') TaxFlow(country, 'TAX', taxrate=.2) # At time period 25, cut spending to 17 (from 20) mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20) mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD') mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC') mod.EquationSolver.MaxTime = 40 mod.main() k = mod.GetTimeSeries('k') Rat = mod.GetTimeSeries('DEBT_GDP') Def = mod.GetTimeSeries('GOV__INC') spend = mod.GetTimeSeries('GOV__DEM_GOOD') p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png', run_now=False) p.Legend = ['G', 'Deficit'] p.LegendPos = 'center left' p.DoPlot() Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
[ 2, 19617, 28, 40477, 12, 23, 198, 6738, 264, 16072, 62, 27530, 13, 48205, 1330, 1635, 198, 6738, 264, 16072, 62, 27530, 13, 1069, 12629, 13, 21063, 17, 35, 43328, 1330, 12029, 17, 35, 43328, 628, 198, 30238, 62, 20307, 62, 6404, 82,...
2.474026
462
from importlib import import_module from django.conf import settings from django.core.signals import setting_changed SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount") DEFAULTS = { 'UNIQUE_EMAIL': True, 'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin' 'SERIALIZERS': { # 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer', 'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer', 'USERINFO_SERIALIZER': None }, 'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL, 'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin", # SOCIAL LOGINS 'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground' 'SOCIAL_AUTO_SIGNUP': False, # SIGN UP # 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage 'SIGNUP_USERNAME_REQUIRED': False, 'SIGNUP_USERNAME_VALIDATORS': [], 'USE_PASSWORD_TWICE_VALIDATION': True, # ADVANCES 'USE_PHONENUMBER_FIELD': False, 'USE_CELERY_EMAIL': False, 'USE_ID_TOKEN': True, 'OAUTH_SAVE_ID_TOKEN': False } app_settings = AuthSettings(None, DEFAULTS) setting_changed.connect(reload_app_settings)
[ 6738, 1330, 8019, 1330, 1330, 62, 21412, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 7295, 13, 12683, 874, 1330, 4634, 62, 40985, 198, 198, 50, 4503, 12576, 26861, 28270, 62, 33365, 3698, 796, 651, ...
2.475746
536
from django.db import models from shorty.manager import UrlManager
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 6738, 1790, 88, 13, 37153, 1330, 8799, 75, 13511, 198 ]
3.578947
19
security = """ New Web users get the Roles "User,Nosy" New Email users get the Role "User" Role "admin": User may access the rest interface (Rest Access) User may access the web interface (Web Access) User may access the xmlrpc interface (Xmlrpc Access) User may create everything (Create) User may edit everything (Edit) User may manipulate user Roles through the web (Web Roles) User may restore everything (Restore) User may retire everything (Retire) User may use the email interface (Email Access) User may view everything (View) Role "anonymous": User may access the web interface (Web Access) Role "cc-permission": (Restore for "cost_center_permission_group" only) (Retire for "cost_center_permission_group" only) User is allowed to create cost_center_permission_group (Create for "cost_center_permission_group" only) User is allowed to edit cost_center_permission_group (Edit for "cost_center_permission_group" only) Role "contact": User is allowed to create contact (Create for "contact" only) User is allowed to edit contact (Edit for "contact" only) Role "controlling": User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only) User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'team_lead') only) User is allowed Edit on (Edit for "time_project": ('group_lead', 'team_lead') only) User is allowed Edit on (Edit for "time_wp": ('project',) only) User is allowed View on (View for "user": ('roles',) only) User is allowed View on (View for "user_dynamic": ('id', 'sap_cc', 'user', 'valid_from', 'valid_to') only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access daily_record (View for "daily_record" only) User is allowed to access daily_record_freeze (View for "daily_record_freeze" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access overtime_correction (View for "overtime_correction" only) User is allowed to access query (View for "query" only) User is allowed to access time_project (View for "time_project" only) User is allowed to access time_record (View for "time_record" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create cost_center (Create for "cost_center" only) User is allowed to create cost_center_group (Create for "cost_center_group" only) User is allowed to create cost_center_status (Create for "cost_center_status" only) User is allowed to create department (Create for "department" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to create product_family (Create for "product_family" only) User is allowed to create public_holiday (Create for "public_holiday" only) User is allowed to create query (Create for "query" only) User is allowed to create reporting_group (Create for "reporting_group" only) User is allowed to create sap_cc (Create for "sap_cc" only) User is allowed to create time_activity (Create for "time_activity" only) User is allowed to create time_activity_perm (Create for "time_activity_perm" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create work_location (Create for "work_location" only) User is allowed to edit cost_center (Edit for "cost_center" only) User is allowed to edit cost_center_group (Edit for "cost_center_group" only) User is allowed to edit cost_center_status (Edit for "cost_center_status" only) User is allowed to edit department (Edit for "department" only) User is allowed to edit organisation (Edit for "organisation" only) User is allowed to edit product_family (Edit for "product_family" only) User is allowed to edit public_holiday (Edit for "public_holiday" only) User is allowed to edit query (Edit for "query" only) User is allowed to edit reporting_group (Edit for "reporting_group" only) User is allowed to edit sap_cc (Edit for "sap_cc" only) User is allowed to edit time_activity (Edit for "time_activity" only) User is allowed to edit time_activity_perm (Edit for "time_activity_perm" only) User is allowed to edit time_record (Edit for "time_record" only) User is allowed to edit work_location (Edit for "work_location" only) Role "doc_admin": User is allowed Edit on (Edit for "department": ('doc_num',) only) User is allowed to create artefact (Create for "artefact" only) User is allowed to create doc (Create for "doc" only) User is allowed to create doc_category (Create for "doc_category" only) User is allowed to create doc_status (Create for "doc_status" only) User is allowed to create product_type (Create for "product_type" only) User is allowed to create reference (Create for "reference" only) User is allowed to edit artefact (Edit for "artefact" only) User is allowed to edit doc (Edit for "doc" only) User is allowed to edit doc_category (Edit for "doc_category" only) User is allowed to edit doc_status (Edit for "doc_status" only) User is allowed to edit product_type (Edit for "product_type" only) User is allowed to edit reference (Edit for "reference" only) Role "dom-user-edit-facility": Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['room'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['room'] only) Role "dom-user-edit-gtt": (Search for "user_dynamic" only) May only view/edit records with the correct domain (Edit for "user_dynamic" only) May only view/edit records with the correct domain (View for "user_dynamic" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to create user (Create for "user" only) User is allowed to create user_contact (Create for "user_contact" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only) Role "dom-user-edit-hr": (Search for "user_dynamic" only) May only view/edit records with the correct domain (Edit for "user_dynamic" only) May only view/edit records with the correct domain (View for "user_dynamic" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to create user_contact (Create for "user_contact" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only) Role "dom-user-edit-office": User is allowed to create user_contact (Create for "user_contact" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'position_text', 'room'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'position_text', 'room'] only) Role "external": (Search for "ext_tracker_state": ('id', 'issue') only) (Search for "user": ('id', 'nickname', 'username') only) External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (Edit for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only) External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (View for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only) User is allowed View on (View for "category": ('id', 'name') only) User is allowed View on (View for "user": ('nickname', 'status', 'username') only) User is allowed View on (View for "user_status": ('name',) only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only) User is allowed to access area (View for "area" only) User is allowed to access doc_issue_status (View for "doc_issue_status" only) User is allowed to access ext_tracker (View for "ext_tracker" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to access keyword (View for "keyword" only) User is allowed to access kind (View for "kind" only) User is allowed to access msg_keyword (View for "msg_keyword" only) User is allowed to access safety_level (View for "safety_level" only) User is allowed to access severity (View for "severity" only) User is allowed to access status (View for "status" only) User is allowed to access status_transition (View for "status_transition" only) User is allowed to access test_level (View for "test_level" only) User is allowed to create file (Create for "file" only) User is allowed to create issue (Create for "issue" only) User is allowed to create msg (Create for "msg" only) User is allowed to create query (Create for "query" only) User is allowed to edit their queries (Edit for "query" only) User is allowed to retire their queries (Retire for "query" only) User is allowed to search for their own files (Search for "file" only) User is allowed to search for their own messages (Search for "msg" only) User is allowed to search for their queries (Search for "query" only) User is allowed to search issue (Search for "issue" only) User is allowed to view their own files (View for "file" only) User may access the web interface (Web Access) User may use the email interface (Email Access) Users are allowed to edit some of their details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'password', 'timezone') only) Users are allowed to view some of their details (View for "user": ('activity', 'actor', 'creation', 'creator', 'firstname', 'lastname', 'realname', 'username') only) Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only) Role "facility": (Restore for "room" only) (Retire for "room" only) User is allowed to create room (Create for "room" only) User is allowed to edit room (Edit for "room" only) Role "functional-role": (Restore for "user_functional_role" only) (Retire for "user_functional_role" only) User is allowed Edit on (Edit for "user": ('business_responsible', 'scale_seniority') only) User is allowed View on (View for "user": ('business_responsible', 'planning_role', 'scale_seniority') only) User is allowed to access user_functional_role (View for "user_functional_role" only) User is allowed to create user_functional_role (Create for "user_functional_role" only) User is allowed to edit user_functional_role (Edit for "user_functional_role" only) Role "hr": (Edit for "overtime_period": ('name', 'order') only) (Restore for "room" only) (Retire for "room" only) User is allowed Edit on (Edit for "daily_record": ('required_overtime', 'weekend_allowed') only) User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only) User is allowed Edit on (Edit for "time_project": ('approval_hr', 'approval_required', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'no_overtime', 'no_overtime_day', 'only_hours', 'overtime_reduction') only) User is allowed View on (View for "user": ('contacts',) only) User is allowed to access auto_wp (View for "auto_wp" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access daily_record (View for "daily_record" only) User is allowed to access daily_record_freeze (View for "daily_record_freeze" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access overtime_correction (View for "overtime_correction" only) User is allowed to access time_record (View for "time_record" only) User is allowed to access user_contact (View for "user_contact" only) User is allowed to access user_dynamic (View for "user_dynamic" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create auto_wp (Create for "auto_wp" only) User is allowed to create daily_record_freeze (Create for "daily_record_freeze" only) User is allowed to create location (Create for "location" only) User is allowed to create org_location (Create for "org_location" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to create overtime_correction (Create for "overtime_correction" only) User is allowed to create overtime_period (Create for "overtime_period" only) User is allowed to create product_family (Create for "product_family" only) User is allowed to create public_holiday (Create for "public_holiday" only) User is allowed to create reporting_group (Create for "reporting_group" only) User is allowed to create room (Create for "room" only) User is allowed to create sap_cc (Create for "sap_cc" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create uc_type (Create for "uc_type" only) User is allowed to create user (Create for "user" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit auto_wp (Edit for "auto_wp" only) User is allowed to edit dynamic user data if not frozen in validity span of dynamic user record (Edit for "user_dynamic" only) User is allowed to edit freeze record if not frozen at the given date (Edit for "daily_record_freeze": ('frozen',) only) User is allowed to edit location (Edit for "location" only) User is allowed to edit org_location (Edit for "org_location" only) User is allowed to edit organisation (Edit for "organisation" only) User is allowed to edit overtime correction if the overtime correction is not frozen (Edit for "overtime_correction" only) User is allowed to edit product_family (Edit for "product_family" only) User is allowed to edit public_holiday (Edit for "public_holiday" only) User is allowed to edit reporting_group (Edit for "reporting_group" only) User is allowed to edit room (Edit for "room" only) User is allowed to edit sap_cc (Edit for "sap_cc" only) User is allowed to edit time_record (Edit for "time_record" only) User is allowed to edit uc_type (Edit for "uc_type" only) User may manipulate user Roles through the web (Web Roles) Role "hr-leave-approval": User is allowed Edit on (Edit for "leave_submission": ('status',) only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access vacation_correction (View for "vacation_correction" only) Role "hr-org-location": (Search for "daily_record_freeze" only) (Search for "overtime_correction" only) (Search for "time_activity_perm" only) (Search for "time_record" only) (Search for "user_dynamic" only) User is allowed to view dynamic user data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "user_dynamic" only) User is allowed to view freeze information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "daily_record_freeze" only) User is allowed to view overtime information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "overtime_correction" only) User is allowed to view time record data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "time_record" only) Role "hr-vacation": User is allowed to access contract_type (View for "contract_type" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create contract_type (Create for "contract_type" only) User is allowed to create leave_submission (Create for "leave_submission" only) User is allowed to create vacation_correction (Create for "vacation_correction" only) User is allowed to edit contract_type (Edit for "contract_type" only) User is allowed to edit leave_submission (Edit for "leave_submission" only) User is allowed to edit vacation_correction (Edit for "vacation_correction" only) Role "issue_admin": User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only) User is allowed to access issue (View for "issue" only) User is allowed to create area (Create for "area" only) User is allowed to create category (Create for "category" only) User is allowed to create doc_issue_status (Create for "doc_issue_status" only) User is allowed to create ext_tracker (Create for "ext_tracker" only) User is allowed to create issue (Create for "issue" only) User is allowed to create keyword (Create for "keyword" only) User is allowed to create kind (Create for "kind" only) User is allowed to create msg_keyword (Create for "msg_keyword" only) User is allowed to create safety_level (Create for "safety_level" only) User is allowed to create severity (Create for "severity" only) User is allowed to create status (Create for "status" only) User is allowed to create status_transition (Create for "status_transition" only) User is allowed to create test_level (Create for "test_level" only) User is allowed to edit area (Edit for "area" only) User is allowed to edit category (Edit for "category" only) User is allowed to edit doc_issue_status (Edit for "doc_issue_status" only) User is allowed to edit ext_tracker (Edit for "ext_tracker" only) User is allowed to edit issue (Edit for "issue" only) User is allowed to edit keyword (Edit for "keyword" only) User is allowed to edit kind (Edit for "kind" only) User is allowed to edit msg_keyword (Edit for "msg_keyword" only) User is allowed to edit safety_level (Edit for "safety_level" only) User is allowed to edit severity (Edit for "severity" only) User is allowed to edit status (Edit for "status" only) User is allowed to edit status_transition (Edit for "status_transition" only) User is allowed to edit test_level (Edit for "test_level" only) Role "it": Create (Create for "user_contact" only) User is allowed Edit on (Edit for "file": ('name', 'type') only) User is allowed Edit on (Edit for "location": ('domain_part',) only) User is allowed Edit on (Edit for "organisation": ('domain_part',) only) User is allowed Edit on (Edit for "user": ('ad_domain', 'nickname', 'password', 'pictures', 'roles', 'timetracking_by', 'timezone', 'username') only) User is allowed Edit on (Edit for "user": ('address', 'alternate_addresses', 'nickname', 'password', 'timezone', 'username') only) User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only) User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed to access domain_permission (View for "domain_permission" only) User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) User is allowed to create domain_permission (Create for "domain_permission" only) User is allowed to create it_category (Create for "it_category" only) User is allowed to create it_int_prio (Create for "it_int_prio" only) User is allowed to create it_issue (Create for "it_issue" only) User is allowed to create it_project (Create for "it_project" only) User is allowed to create it_request_type (Create for "it_request_type" only) User is allowed to create mailgroup (Create for "mailgroup" only) User is allowed to edit domain_permission (Edit for "domain_permission" only) User is allowed to edit it_category (Edit for "it_category" only) User is allowed to edit it_int_prio (Edit for "it_int_prio" only) User is allowed to edit it_issue (Edit for "it_issue" only) User is allowed to edit it_project (Edit for "it_project" only) User is allowed to edit it_request_type (Edit for "it_request_type" only) User is allowed to edit mailgroup (Edit for "mailgroup" only) User may manipulate user Roles through the web (Web Roles) Role "itview": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "msgedit": (Search for "msg": ('date', 'id') only) User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only) User is allowed to access ext_msg (View for "ext_msg" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) Role "msgsync": (Search for "msg": ('date', 'id') only) User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only) User is allowed to access ext_msg (View for "ext_msg" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to create ext_msg (Create for "ext_msg" only) User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only) User is allowed to edit ext_msg (Edit for "ext_msg" only) User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only) Role "nosy": User may get nosy messages for doc (Nosy for "doc" only) User may get nosy messages for issue (Nosy for "issue" only) User may get nosy messages for it_issue (Nosy for "it_issue" only) User may get nosy messages for it_project (Nosy for "it_project" only) User may get nosy messages for support (Nosy for "support" only) Role "office": (Restore for "room" only) (Retire for "room" only) User is allowed View on (View for "user": ('contacts',) only) User is allowed to access user_contact (View for "user_contact" only) User is allowed to create absence (Create for "absence" only) User is allowed to create absence_type (Create for "absence_type" only) User is allowed to create room (Create for "room" only) User is allowed to create uc_type (Create for "uc_type" only) User is allowed to edit absence (Edit for "absence" only) User is allowed to edit absence_type (Edit for "absence_type" only) User is allowed to edit room (Edit for "room" only) User is allowed to edit uc_type (Edit for "uc_type" only) Role "organisation": User is allowed to access location (View for "location" only) User is allowed to access org_location (View for "org_location" only) User is allowed to access organisation (View for "organisation" only) User is allowed to create location (Create for "location" only) User is allowed to create org_location (Create for "org_location" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to edit location (Edit for "location" only) User is allowed to edit org_location (Edit for "org_location" only) User is allowed to edit organisation (Edit for "organisation" only) Role "pgp": Role "procurement": (View for "sap_cc" only) (View for "time_project" only) User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'purchasing_agents', 'team_lead') only) User is allowed Edit on (Edit for "time_project": ('group_lead', 'purchasing_agents', 'team_lead') only) Role "project": User is allowed Edit on (Edit for "time_project": ('cost_center', 'department', 'deputy', 'description', 'name', 'nosy', 'organisation', 'responsible', 'status') only) User is allowed Edit on (Edit for "time_project": ('infosec_req', 'is_extern', 'max_hours', 'op_project', 'planned_effort', 'product_family', 'project_type', 'reporting_group', 'work_location') only) User is allowed to access time_project (View for "time_project" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) User is allowed to create time_project (Create for "time_project" only) User is allowed to create time_project_status (Create for "time_project_status" only) User is allowed to create time_wp (Create for "time_wp" only) User is allowed to create time_wp_group (Create for "time_wp_group" only) User is allowed to edit time_project_status (Edit for "time_project_status" only) User is allowed to edit time_wp (Edit for "time_wp" only) User is allowed to edit time_wp_group (Edit for "time_wp_group" only) Role "project_view": User is allowed to access time_project (View for "time_project" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) Role "sec-incident-nosy": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "sec-incident-responsible": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "staff-report": Role "sub-login": Role "summary_view": Role "supportadmin": User is allowed to access analysis_result (View for "analysis_result" only) User is allowed to access contact (View for "contact" only) User is allowed to access customer (View for "customer" only) User is allowed to access customer_agreement (View for "customer_agreement" only) User is allowed to access mailgroup (View for "mailgroup" only) User is allowed to access return_type (View for "return_type" only) User is allowed to access sup_classification (View for "sup_classification" only) User is allowed to access support (View for "support" only) User is allowed to create analysis_result (Create for "analysis_result" only) User is allowed to create contact (Create for "contact" only) User is allowed to create customer (Create for "customer" only) User is allowed to create customer_agreement (Create for "customer_agreement" only) User is allowed to create mailgroup (Create for "mailgroup" only) User is allowed to create return_type (Create for "return_type" only) User is allowed to create sup_classification (Create for "sup_classification" only) User is allowed to create support (Create for "support" only) User is allowed to edit analysis_result (Edit for "analysis_result" only) User is allowed to edit contact (Edit for "contact" only) User is allowed to edit customer (Edit for "customer" only) User is allowed to edit customer_agreement (Edit for "customer_agreement" only) User is allowed to edit mailgroup (Edit for "mailgroup" only) User is allowed to edit return_type (Edit for "return_type" only) User is allowed to edit sup_classification (Edit for "sup_classification" only) User is allowed to edit support (Edit for "support" only) Role "time-report": User is allowed to access time_report (View for "time_report" only) User is allowed to create time_report (Create for "time_report" only) User is allowed to edit time_report (Edit for "time_report" only) User may edit own file (file created by user) (Edit for "file" only) Role "user": (Search for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only) (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'bookers', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) (View for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only) Search (Search for "user_contact" only) User is allowed Edit on (Edit for "msg": ('keywords',) only) User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only) User is allowed Edit on issue if issue is non-confidential or user is on nosy list (Edit for "issue" only) User is allowed Edit on it_issue if it_issue is non-confidential or user is on nosy list (Edit for "it_issue": ('messages', 'files', 'nosy') only) User is allowed Edit on it_project if it_project is non-confidential or user is on nosy list (Edit for "it_project": ('messages', 'files', 'nosy') only) User is allowed Edit on support if support is non-confidential or user is on nosy list (Edit for "support": ('analysis_end', 'analysis_result', 'analysis_start', 'bcc', 'business_unit', 'category', 'cc', 'cc_emails', 'classification', 'closed', 'confidential', 'customer', 'emails', 'execution', 'external_ref', 'files', 'goods_received', 'goods_sent', 'lot', 'messages', 'nosy', 'number_effected', 'numeric_effort', 'prio', 'prodcat', 'product', 'related_issues', 'related_support', 'release', 'responsible', 'return_type', 'sap_ref', 'send_to_customer', 'serial_number', 'set_first_reply', 'status', 'superseder', 'title', 'type', 'warranty') only) User is allowed View on (View for "user": ('activity', 'actor', 'ad_domain', 'address', 'alternate_addresses', 'business_responsible', 'clearance_by', 'creation', 'creator', 'firstname', 'id', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'queries', 'realname', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'title', 'tt_lines', 'username') only) User is allowed View on (View for "user": ('activity', 'actor', 'address', 'alternate_addresses', 'creation', 'creator', 'id', 'queries', 'realname', 'status', 'timezone', 'username') only) User is allowed View on (View for "user": ('business_responsible', 'department_temp', 'timetracking_by', 'vie_user', 'vie_user_bl_override', 'vie_user_ml') only) User is allowed View on (View for "user": ('contacts',) only) User is allowed View on (View for "user_dynamic": ('department', 'org_location') only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed View on issue if issue is non-confidential or user is on nosy list (View for "issue" only) User is allowed View on it_issue if it_issue is non-confidential or user is on nosy list (View for "it_issue" only) User is allowed View on it_project if it_project is non-confidential or user is on nosy list (View for "it_project" only) User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only) User is allowed View on support if support is non-confidential or user is on nosy list (View for "support" only) User is allowed to access absence (View for "absence" only) User is allowed to access absence_type (View for "absence_type" only) User is allowed to access analysis_result (View for "analysis_result" only) User is allowed to access area (View for "area" only) User is allowed to access artefact (View for "artefact" only) User is allowed to access business_unit (View for "business_unit" only) User is allowed to access category (View for "category" only) User is allowed to access contact (View for "contact" only) User is allowed to access contact_type (View for "contact_type" only) User is allowed to access cost_center (View for "cost_center" only) User is allowed to access cost_center_group (View for "cost_center_group" only) User is allowed to access cost_center_permission_group (View for "cost_center_permission_group" only) User is allowed to access cost_center_status (View for "cost_center_status" only) User is allowed to access customer (View for "customer" only) User is allowed to access customer_agreement (View for "customer_agreement" only) User is allowed to access daily record if he is owner or supervisor or timetracking-by user (Edit for "daily_record": ('status', 'time_record') only) User is allowed to access daily record if he is owner or supervisor or timetracking-by user (View for "daily_record" only) User is allowed to access daily_record_status (View for "daily_record_status" only) User is allowed to access department (View for "department" only) User is allowed to access doc (View for "doc" only) User is allowed to access doc_category (View for "doc_category" only) User is allowed to access doc_issue_status (View for "doc_issue_status" only) User is allowed to access doc_status (View for "doc_status" only) User is allowed to access ext_tracker (View for "ext_tracker" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to access functional_role (View for "functional_role" only) User is allowed to access it_category (View for "it_category" only) User is allowed to access it_issue_status (View for "it_issue_status" only) User is allowed to access it_prio (View for "it_prio" only) User is allowed to access it_project_status (View for "it_project_status" only) User is allowed to access it_request_type (View for "it_request_type" only) User is allowed to access keyword (View for "keyword" only) User is allowed to access kind (View for "kind" only) User is allowed to access leave_status (View for "leave_status" only) User is allowed to access location (View for "location" only) User is allowed to access mailgroup (View for "mailgroup" only) User is allowed to access msg_keyword (View for "msg_keyword" only) User is allowed to access org_group (View for "org_group" only) User is allowed to access org_location (View for "org_location" only) User is allowed to access organisation (View for "organisation" only) User is allowed to access overtime_period (View for "overtime_period" only) User is allowed to access prodcat (View for "prodcat" only) User is allowed to access product (View for "product" only) User is allowed to access product_family (View for "product_family" only) User is allowed to access product_type (View for "product_type" only) User is allowed to access project_type (View for "project_type" only) User is allowed to access public_holiday (View for "public_holiday" only) User is allowed to access reference (View for "reference" only) User is allowed to access reporting_group (View for "reporting_group" only) User is allowed to access return_type (View for "return_type" only) User is allowed to access room (View for "room" only) User is allowed to access safety_level (View for "safety_level" only) User is allowed to access sap_cc (View for "sap_cc" only) User is allowed to access severity (View for "severity" only) User is allowed to access sex (View for "sex" only) User is allowed to access status (View for "status" only) User is allowed to access status_transition (View for "status_transition" only) User is allowed to access summary_report (View for "summary_report" only) User is allowed to access summary_type (View for "summary_type" only) User is allowed to access sup_classification (View for "sup_classification" only) User is allowed to access sup_execution (View for "sup_execution" only) User is allowed to access sup_prio (View for "sup_prio" only) User is allowed to access sup_status (View for "sup_status" only) User is allowed to access sup_type (View for "sup_type" only) User is allowed to access sup_warranty (View for "sup_warranty" only) User is allowed to access test_level (View for "test_level" only) User is allowed to access time_activity (View for "time_activity" only) User is allowed to access time_activity_perm (View for "time_activity_perm" only) User is allowed to access time_project_status (View for "time_project_status" only) User is allowed to access time_wp_group (View for "time_wp_group" only) User is allowed to access time_wp_summary_no (View for "time_wp_summary_no" only) User is allowed to access timesheet (View for "timesheet" only) User is allowed to access uc_type (View for "uc_type" only) User is allowed to access user_status (View for "user_status" only) User is allowed to access vac_aliq (View for "vac_aliq" only) User is allowed to access vacation_report (View for "vacation_report" only) User is allowed to access work_location (View for "work_location" only) User is allowed to create daily_record (Create for "daily_record" only) User is allowed to create doc (Create for "doc" only) User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only) User is allowed to create file (Create for "file" only) User is allowed to create issue (Create for "issue" only) User is allowed to create it_issue (Create for "it_issue" only) User is allowed to create leave_submission (Create for "leave_submission" only) User is allowed to create msg (Create for "msg" only) User is allowed to create queries (Create for "query" only) User is allowed to create support (Create for "support" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create time_wp (Create for "time_wp" only) User is allowed to edit (some of) their own user details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'lunch_duration', 'lunch_start', 'password', 'queries', 'realname', 'room', 'subst_active', 'substitute', 'timezone', 'tt_lines') only) User is allowed to edit category if he is responsible for it (Edit for "category": ('nosy', 'default_part_of') only) User is allowed to edit doc (Edit for "doc" only) User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only) User is allowed to edit if he's the owner of the contact (Edit for "user_contact": ('visible',) only) User is allowed to edit several fields if he is Responsible for an it_issue (Edit for "it_issue": ('responsible',) only) User is allowed to edit several fields if he is Stakeholder/Responsible for an it_issue (Edit for "it_issue": ('deadline', 'status', 'title') only) User is allowed to edit their queries (Edit for "query" only) User is allowed to edit time category if the status is "Open" and he is responsible for the time category (Edit for "time_project": ('deputy', 'planned_effort', 'nosy') only) User is allowed to edit workpackage if he is time category owner or deputy (Edit for "time_wp": ('cost_center', 'is_public', 'name', 'responsible', 'time_wp_summary_no', 'wp_no') only) User is allowed to retire their queries (Retire for "query" only) User is allowed to search daily_record (Search for "daily_record" only) User is allowed to search for their own files (Search for "file" only) User is allowed to search for their own messages (Search for "msg" only) User is allowed to search for their queries (Search for "query" only) User is allowed to search issue (Search for "issue" only) User is allowed to search it_issue (Search for "it_issue" only) User is allowed to search it_project (Search for "it_project" only) User is allowed to search leave_submission (Search for "leave_submission" only) User is allowed to search support (Search for "support" only) User is allowed to search time_record (Search for "time_record" only) User is allowed to search time_wp (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'is_extern', 'is_public', 'id', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) User is allowed to search user_status (Search for "user": ('status',) only) User is allowed to see time record if he is allowed to see all details on work package or User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "time_record" only) User is allowed to view (some of) their own user details (View for "user": ('entry_date', 'planning_role') only) User is allowed to view contact if he's the owner of the contact or the contact is marked visible (View for "user_contact" only) User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (Edit for "leave_submission": ('status',) only) User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (View for "leave_submission" only) User is allowed to view selected fields in work package if booking is allowed for this user (also applies to timetracking by, supervisor and approval delegated) (View for "time_wp": ('activity', 'actor', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) User is allowed to view their own files (View for "file" only) User is allowed to view their own messages (View for "msg" only) User is allowed to view their own overtime information (View for "overtime_correction" only) User is allowed to view time record if he is the supervisor or the person to whom approvals are delegated (View for "time_record" only) User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_project": ('name',) only) User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_wp": ('name', 'project') only) User is allowed to view/edit workpackage if he is owner or project responsible/deputy (Edit for "time_wp": ('bookers', 'description', 'epic_key', 'planned_effort', 'time_end', 'time_start', 'time_wp_summary_no') only) User may access the rest interface (Rest Access) User may access the web interface (Web Access) User may access the xmlrpc interface (Xmlrpc Access) User may edit own leave submissions (Edit for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only) User may edit own leave submissions (View for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only) User may see time report if reponsible or deputy of time project or on nosy list of time project (View for "time_report" only) User may use the email interface (Email Access) User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "daily_record" only) User may view their own user functional role (View for "user_functional_role" only) User may view time category if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_project" only) User may view work package if responsible for it, if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_wp" only) User or Timetracking by user may edit time_records owned by user (Edit for "time_record" only) User or Timetracking by user may edit time_records owned by user (Restore for "time_record" only) User or Timetracking by user may edit time_records owned by user (Retire for "time_record" only) User or Timetracking by user may edit time_records owned by user (View for "time_record" only) Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only) Users may see daily record if they may see one of the time_records for that day (View for "daily_record" only) Role "user_view": User is allowed to access user (View for "user" only) Role "vacation-report": """.strip ()
[ 12961, 796, 37227, 198, 3791, 5313, 2985, 651, 262, 371, 4316, 366, 12982, 11, 45, 418, 88, 1, 198, 3791, 9570, 2985, 651, 262, 20934, 366, 12982, 1, 198, 47445, 366, 28482, 1298, 198, 11787, 743, 1895, 262, 1334, 7071, 357, 19452, ...
3.534879
13,676
tc = int(input()) while tc: tc -= 1 best = 0 n, x = map(int, input().split()) for i in range(n): s, r = map(int, input().split()) if x >= s: best = max(best, r) print(best)
[ 23047, 796, 493, 7, 15414, 28955, 198, 4514, 37096, 25, 198, 220, 220, 220, 37096, 48185, 352, 198, 220, 220, 220, 1266, 796, 657, 198, 220, 220, 220, 299, 11, 2124, 796, 3975, 7, 600, 11, 5128, 22446, 35312, 28955, 198, 220, 220, ...
1.964286
112
""" Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper` """ # from unittest import TestCase from tests.base import *
[ 37811, 198, 51, 3558, 3519, 284, 1058, 4871, 25, 44646, 50, 13976, 578, 36918, 2848, 63, 1220, 1058, 4871, 25, 44646, 16281, 36918, 2848, 63, 198, 37811, 198, 2, 422, 555, 715, 395, 1330, 6208, 20448, 198, 6738, 5254, 13, 8692, 1330, ...
3.045455
44
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *
[ 2, 15069, 2211, 12, 42334, 13914, 45036, 3549, 2351, 4765, 11, 11419, 290, 584, 198, 2, 1338, 441, 4935, 34152, 13, 4091, 262, 1353, 12, 5715, 27975, 38162, 9947, 2393, 329, 3307, 13, 198, 2, 198, 2, 30628, 55, 12, 34156, 12, 33234,...
3.47619
63
from typing import Any, Text, Dict, List, Union from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.forms import FormAction from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction # from rasa_core.events import (UserUtteranceReverted, UserUttered, # ActionExecuted, Event) from rasa_sdk.events import AllSlotsReset, SlotSet from rasa.core.constants import REQUESTED_SLOT from rasa.core.slots import Slot import pandas as pd import json from actionserver.utils import utilities as util from actionserver.controllers.faqs.faq import FAQ from actionserver.controllers.constants.orderForm import * import logging from actionserver.utils.utilities import INVALID_VALUE product_list = [] quant_list = [] # takes quantity from user logger = logging.getLogger(__name__) with open(r'./actionserver/custom_payload.json') as f: frendy_product_menu = json.load(f) # Code snippet for global back # return [Restarted(), UserUttered(text="/get_started", parse_data={ # "intent": {"confidence": 1.0, "name": "get_started"}, # "entities": [] # }), FollowupAction(name="utter_greet")]
[ 6738, 19720, 1330, 4377, 11, 8255, 11, 360, 713, 11, 7343, 11, 4479, 198, 6738, 374, 15462, 62, 21282, 74, 1330, 7561, 11, 26885, 198, 6738, 374, 15462, 62, 21282, 74, 13, 18558, 38409, 1330, 9745, 278, 7279, 8071, 2044, 198, 6738, ...
2.90799
413
import plotly.graph_objs as go
[ 11748, 7110, 306, 13, 34960, 62, 672, 8457, 355, 467, 198 ]
2.818182
11
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 198, 2, 5964, 1321...
5.44086
93
"""File generated by TLObjects' generator. All changes will be ERASED""" from ...tl.tlobject import TLRequest from typing import Optional, List, Union, TYPE_CHECKING import os import struct if TYPE_CHECKING: from ...tl.types import TypeInputStickerSet, TypeInputUser, TypeInputStickerSetItem, TypeInputDocument
[ 37811, 8979, 7560, 416, 24811, 10267, 82, 6, 17301, 13, 1439, 2458, 481, 307, 13793, 42827, 37811, 198, 6738, 2644, 28781, 13, 28781, 15252, 1330, 24811, 18453, 198, 6738, 19720, 1330, 32233, 11, 7343, 11, 4479, 11, 41876, 62, 50084, 27...
3.647727
88
import KratosMultiphysics import KratosMultiphysics.KratosUnittest as UnitTest import KratosMultiphysics.ChimeraApplication from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis
[ 11748, 509, 10366, 418, 15205, 13323, 23154, 198, 11748, 509, 10366, 418, 15205, 13323, 23154, 13, 42, 10366, 418, 3118, 715, 395, 355, 11801, 14402, 198, 11748, 509, 10366, 418, 15205, 13323, 23154, 13, 1925, 320, 8607, 23416, 198, 6738,...
3.205882
68
import urllib.request import xml.etree.ElementTree if __name__ == "__main__": import pprint pprint.pprint(RSS10Parser("https://www.youtube.com/feeds/videos.xml?playlist_id=PLrPVslFukDQo7l5RCqAZtKDl6tUyMAFWH").getlist())
[ 11748, 2956, 297, 571, 13, 25927, 198, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 1330, 279, 4798, 198, 220, 279, 4798, 13, 381, 22272, 7, 49, 5432, 940...
2.333333
96
import math,time,random import pepper_interface IP = "192.168.0.147" PORT = 9559 simulation = False with pepper_interface.get(IP,PORT,simulation) as pepper: time.sleep(1.0) values,time_stamp = pepper.laser.get() print print "Front" print values["Front"] print print "Left" print values["Left"] print print "Right" print values["Right"] print
[ 11748, 10688, 11, 2435, 11, 25120, 198, 11748, 13385, 62, 39994, 198, 198, 4061, 796, 366, 17477, 13, 14656, 13, 15, 13, 20198, 1, 198, 15490, 796, 860, 38605, 198, 14323, 1741, 796, 10352, 628, 198, 198, 4480, 13385, 62, 39994, 13, ...
2.503106
161
# Importar a classe da lngua inglesa (English) e criar um objeto nlp from ____ import ____ nlp = ____ # Processar o texto doc = ____("I like tree kangaroos and narwhals.") # Selecionar o primeiro token first_token = doc[____] # Imprimir o texto do primeito token print(first_token.____)
[ 2, 17267, 283, 257, 537, 21612, 12379, 300, 782, 6413, 5347, 829, 64, 357, 15823, 8, 304, 269, 380, 283, 23781, 26181, 27206, 299, 34431, 198, 6738, 220, 1427, 1330, 220, 1427, 198, 21283, 79, 796, 220, 1427, 198, 198, 2, 10854, 283...
2.685185
108
from tests.integration.create_token import create_token from tests.integration.integration_test_case import IntegrationTestCase
[ 6738, 5254, 13, 18908, 1358, 13, 17953, 62, 30001, 1330, 2251, 62, 30001, 198, 6738, 5254, 13, 18908, 1358, 13, 18908, 1358, 62, 9288, 62, 7442, 1330, 38410, 14402, 20448, 628 ]
4.16129
31
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow Hubert model.""" import inspect import warnings from typing import Any, Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput from ...modeling_tf_utils import TFPreTrainedModel, booleans_processing, get_initializer, keras_serializable from ...tf_utils import shape_list from ...tokenization_utils_base import BatchEncoding from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_hubert import HubertConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "HubertConfig" TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/hubert-base-ls960", # See all Hubert models at https://huggingface.co/models?filter=hubert ] LARGE_NEGATIVE = -1e8 # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.input_values_processing def input_values_processing(func, config, input_values, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,), dtype='float32', name="input_values")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (`callable`): The callable function of the TensorFlow model. config ([`PretrainedConfig`]): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_values, (tuple, list)): for i, input in enumerate(input_values): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_values, (dict, BatchEncoding)): if "inputs" in input_values: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.", FutureWarning, ) output["input_values"] = input_values.pop("inputs") if "decoder_cached_states" in input_values: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_values.pop("decoder_cached_states") for k, v in dict(input_values).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_values, tf.Tensor) or input_values is None: output[parameter_names[0]] = input_values else: raise ValueError( f"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_values` output["input_values"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update(booleans_processing(config=config, **boolean_dict)) return output # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement def _sample_without_replacement(distribution, num_samples): """ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1)) _, indices = tf.nn.top_k(distribution + z, num_samples) return indices # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices def _scatter_values_on_batch_indices(values, batch_indices, output_shape): """ Scatter function as in PyTorch with indices in format (batch_dim, indixes) """ indices_shape = shape_list(batch_indices) # broadcast batch dim to indices_shape broad_casted_batch_dims = tf.reshape( tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1] ) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, min_masks: int = 0, ) -> tf.Tensor: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans Adapted from [fairseq's data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376). """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + tf.random.uniform((1,))) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32) # uniform distribution to sample from, make sure that offset samples are < sequence_length uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1))) # get random indices to mask spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans) # expand masked indices to masked spans spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1) spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length)) spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length)) offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :] offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1)) offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask spec_aug_mask = _scatter_values_on_batch_indices( tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, spec_aug_mask.shape ) return spec_aug_mask # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert HUBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with `input_values` only and nothing else: `model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` </Tip> Args: config ([`HubertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ HUBERT_INPUTS_DOCSTRING = r""" Args: input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_values` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 33448, 383, 7011, 41068, 46665, 290, 262, 12905, 2667, 32388, 3457, 13, 1074, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, ...
2.617762
6,925
# 6.00 Problem Set 2 # # Hangman # Name : Solutions # Collaborators : <your collaborators> # Time spent : <total time> # ----------------------------------- # Helper code # You don't need to understand this helper code, # but you will have to know how to use the functions import random import string WORDLIST_FILENAME = "words.txt" def load_words(): """ Returns a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. """ print "Loading word list from file..." # inFile: file inFile = open(WORDLIST_FILENAME, 'r', 0) # line: string line = inFile.readline() # wordlist: list of strings wordlist = string.split(line) print " ", len(wordlist), "words loaded." return wordlist def choose_word(wordlist): """ wordlist (list): list of words (strings) Returns a word from wordlist at random """ return random.choice(wordlist) # end of helper code # ----------------------------------- # load the list of words into the wordlist variable # so that it can be accessed from anywhere in the program wordlist = load_words() def partial_word(secret_word, guessed_letters): """ Return the secret_word in user-visible format, with underscores used to replace characters that have not yet been guessed. """ result = '' for letter in secret_word: if letter in guessed_letters: result = result + letter else: result = result + '_' return result def hangman(): """ Runs the hangman game. """ print 'Welcome to the game, Hangman!' secret_word = choose_word(wordlist) print 'I am thinking of a word that is ' + str(len(secret_word)) + ' letters long.' num_guesses = 8 word_guessed = False guessed_letters = '' available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # Letter-guessing loop. Ask the user to guess a letter and respond to the # user based on whether the word has yet been correctly guessed. while num_guesses > 0 and not word_guessed: print '-------------' print 'You have ' + str(num_guesses) + ' guesses left.' print 'Available letters: ' + ''.join(available_letters) guess = raw_input('Please guess a letter:') if guess not in available_letters: print 'Oops! You\'ve already guessed that letter: ' + partial_word(secret_word, guessed_letters) elif guess not in secret_word: num_guesses -= 1 available_letters.remove(guess) print 'Oops! That letter is not in my word: ' + partial_word(secret_word, guessed_letters) else: available_letters.remove(guess) guessed_letters += guess print 'Good guess: ' + partial_word(secret_word, guessed_letters) if secret_word == partial_word(secret_word, guessed_letters): word_guessed = True if word_guessed: print 'Congratulations, you won!' else: print 'Game over.'
[ 2, 718, 13, 405, 20647, 5345, 362, 198, 2, 220, 198, 2, 24300, 805, 198, 2, 6530, 220, 220, 220, 220, 220, 220, 220, 220, 220, 1058, 23555, 198, 2, 37322, 2024, 1058, 1279, 14108, 37886, 29, 198, 2, 3862, 3377, 220, 220, 220, 10...
2.627229
1,234
''' Created by auto_sdk on 2016.04.13 ''' from top.api.base import RestApi
[ 7061, 6, 198, 41972, 416, 8295, 62, 21282, 74, 319, 1584, 13, 3023, 13, 1485, 198, 7061, 6, 198, 6738, 1353, 13, 15042, 13, 8692, 1330, 8324, 32, 14415, 198 ]
2.5
30
# Copyright 2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_args(batch_size=8, image_size=256, max_iter=100000): """ Get command line arguments. Arguments set the default values of command line arguments. """ import argparse import os description = "Example of Lightweight GAN." parser = argparse.ArgumentParser(description) parser.add_argument("-d", "--device-id", type=str, default="0", help="Device id.") parser.add_argument("-c", "--context", type=str, default="cudnn", help="Context.") parser.add_argument("--type-config", "-t", type=str, default='float', help='Type of computation. e.g. "float", "half".') parser.add_argument("--img-path", type=str, default="~/AnimalFace-dog", help="Image path.") parser.add_argument("--image-size", type=int, default=image_size, help="Image size.") parser.add_argument("--batch-size", "-b", type=int, default=batch_size, help="Batch size.") parser.add_argument("--max-iter", "-i", type=int, default=max_iter, help="Max iterations.") parser.add_argument("--save-interval", type=int, default=50000, help="Interval for saving models.") parser.add_argument("--test-interval", type=int, default=5000, help="Interval for testing models.") parser.add_argument("--latent", type=int, default=256, help="Number of latent variables.") parser.add_argument("--monitor-path", type=str, default="./result/tmp", help="Monitor path.") parser.add_argument("--model-load-path", type=str, default=".", help="Path to load parameters from") parser.add_argument("--train-samples", type=int, default=-1, help="Number of data to be used. When -1 is set all data is used.") parser.add_argument("--lr", type=float, default=2e-4, help="Learning rate") parser.add_argument("--aug-list", nargs="+", default=["lrflip", "translation", "color"]) args = parser.parse_args() return args
[ 2, 15069, 33448, 10184, 10501, 13, 198, 2, 15069, 33448, 10184, 4912, 10501, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, ...
2.459087
1,161
#!/usr/bin/env python from __future__ import print_function # READ #### f = open("my_file.txt") print("\nLoop directly over file") print("-" * 60) for line in f: print(line.strip()) print("-" * 60) f.seek(0) my_content = f.readlines() print("\nUse readlines method") print("-" * 60) for line in my_content: print(line.strip()) print("-" * 60) f.seek(0) my_content = f.read() print("\nUse read + splitlines") print("-" * 60) for line in my_content.splitlines(): print(line) print("-" * 60) f.close() with open("my_file.txt") as f: print("\nUse with and loop over file") print("-" * 60) for line in f: print(line.strip()) print("-" * 60) # WRITE #### print("\nWriting file.") f = open("new_file.txt", "w") f.write("whatever2\n") f.close() # APPEND #### print("\nAppending file.") with open("new_file.txt", "a") as f: f.write("something else\n") print()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 2, 20832, 1303, 21017, 198, 69, 796, 1280, 7203, 1820, 62, 7753, 13, 14116, 4943, 198, 4798, 7203, 59, 77, 39516, 3264, 625, ...
2.467033
364
import uuid from typing import List, Dict, Any import unittest from selfhost_client import SelfHostClient, DatasetType
[ 11748, 334, 27112, 198, 6738, 19720, 1330, 7343, 11, 360, 713, 11, 4377, 198, 198, 11748, 555, 715, 395, 198, 198, 6738, 2116, 4774, 62, 16366, 1330, 12189, 17932, 11792, 11, 16092, 292, 316, 6030, 628 ]
3.388889
36
from setuptools import setup, find_packages setup( name="soccergen", version="0.1", packages=find_packages(), # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine install_requires=["gfootball>=2.8",], # metadata to display on PyPI author="Peter Xenopoulos", author_email="xenopoulos@nyu.edu", description="Soccer trajectory and event data generation", keywords="soccer data-generation foootball", url="https://github.com/pnxenopoulos/soccer-data-gen", # project home page, if any project_urls={ "Issues": "https://github.com/pnxenopoulos/soccer-data-gen/issues", "Documentation": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/", "Github": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/", }, classifiers=["License :: OSI Approved :: MIT License"], )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 35634, 2189, 5235, 1600, 198, 220, 220, 220, 2196, 2625, 15, 13, 16, 1600, 198, 220, 220, 220, 10392, 28, 19796, 62, 43789...
2.754545
330
from itertools import product from unittest.mock import patch import pytest import numpy as np import pandas as pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair assert ( len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta) for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair min_count = len(formulas) * len(target_modifiers) max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size) assert min_count < len(ions) <= max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]) expected_fdr = n_decoys / n_targets expected_fdr_ros = (n_decoys + 1) / (n_targets + 1) expected_fdr_mono = pd.Series( [0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all() assert np.isclose(fdr_mono, expected_fdr_mono).all()
[ 6738, 340, 861, 10141, 1330, 1720, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 11748, 12972, 9288, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 19798, 292, 13, 22602, 13, 33407,...
2.019179
2,242
from logging import getLogger getLogger('flake8').propagate = False
[ 6738, 18931, 1330, 651, 11187, 1362, 198, 198, 1136, 11187, 1362, 10786, 47597, 23, 27691, 22930, 37861, 796, 10352, 198 ]
3.45
20
import math import numpy as np import pandas as pd def fixed_time_horizon(df, column='close', lookback=20): """ Fixed-time Horizon As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method. Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing financial time series for machine learning. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine learning, 2018 5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017 """ price = df[column] label = (price.shift(-lookback) / price > 1).astype(int) return label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): """ Triple Barrier The idea is to consider the full dynamics of a trading strategy and not a simple performance proxy. The rationale for this extension is that often money managers implement P&L triggers that cash in when gains are sufficient or opt out to stop their losses. Upon inception of the strategy, three barriers are fixed (De Prado, 2018). Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." ub: float It stands for upper bound, e.g. 0.07 is a 7% profit taking. lb: float It stands for lower bound, e.g. 0.03 is a 3% stop loss. lookback: str Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial machine learning, 2018 """ ub = 1 + ub lb = 1- lb r = np.array(range(lookback)) price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for i, k in enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p > ub] = 1 label.loc[p < lb] = -1 if binary_classification: label = np.where(label == 1, 1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): """ Continuous Trading Signal A hybrid stock trading framework integrating technical analysis with machine learning techniques. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid stock trading framework integrating technical analysis with machine learning techniques, 2016 """ price = df.data[column] OTr = [] trends = [] for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index)
[ 11748, 10688, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 628, 198, 4299, 5969, 62, 2435, 62, 17899, 8637, 7, 7568, 11, 5721, 11639, 19836, 3256, 804, 1891, 28, 1238, 2599, 198, 220, 220, 220, 37227, 198,...
2.576561
1,698
from __future__ import absolute_import import hashlib import logging import os from django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__)
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 11748, 12234, 8019, 198, 11748, 18931, 198, 11748, 28686, 198, 198, 6738, 42625, 14208, 13, 26791, 13, 12685, 7656, 1330, 4451, 62, 2536, 198, 198, 6738, 2219, 13, 10414, 13, 336...
3
231
import numpy as np import pandas as pd import matplotlib.pyplot as plt import utils if __name__ == '__main__': main()
[ 11748, 299, 32152, 355, 45941, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 11748, 3384, 4487, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 361, 11593, ...
2.305085
59
import functools from collections import OrderedDict from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast import torch from ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing import Timer
[ 11748, 1257, 310, 10141, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 19720, 1330, 4377, 11, 4889, 540, 11, 360, 713, 11, 7343, 11, 337, 5912, 11, 45835, 11, 309, 29291, 11, 4479, 11, 3350, 198, 198, 11748, 28034, 198, 1...
3.57971
69
""" Bellman Ford Arbitrage implementation over websocket API. """ from __future__ import annotations from collections import namedtuple from datetime import datetime from decimal import Decimal from math import log import pandas as pd import numpy as np import asyncio import typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker import networkx as nx client = RestClient() def test_pricematrix_mapping(): # testing with string for simplicity for now pm = PriceMatrix(["EUR", "BTC"]) pm["EUR"]["BTC"] = Decimal(1.234) pm["BTC"]["EUR"] = Decimal(4.321) assert pm["EUR"]["BTC"] == Decimal(1.234) assert pm["BTC"]["EUR"] == Decimal(4.321) if __name__ == '__main__': asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
[ 37811, 198, 36488, 805, 8092, 33619, 8394, 7822, 625, 2639, 5459, 7824, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 32465, 1330, ...
2.794798
346