content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
import sys from nltk.stem import PorterStemmer from nltk.tokenize import TreebankWordTokenizer stemmer = PorterStemmer() # Expected format of db_info_dic # db_id: database id. # table_names: vector of table names. # table_names_original: vector of original table names. # column_names: vector of [ table index - column names ]. 0-th column is padding for "count all" things. # column_names_original # column_types: types of the column. text / time / number / others # primary_keys: index of primary keys. # foreign_keys: vector of foreign key pairs. # Example of a data: # # db_id: product_catalog # column_names: [[-1, '*'], [0, 'attribute id'], [0, 'attribute name'], [0, 'attribute data type'], [1, 'catalog id'], [1, 'catalog name'], [1, 'catalog publisher'], [1, 'date of publication'], [1, 'date of latest revision'], [2, 'catalog level number'], [2, 'catalog id'], [2, 'catalog level name'], [3, 'catalog entry id'], [3, 'catalog level number'], [3, 'parent entry id'], [3, 'previous entry id'], [3, 'next entry id'], [3, 'catalog entry name'], [3, 'product stock number'], [3, 'price in dollars'], [3, 'price in euros'], [3, 'price in pounds'], [3, 'capacity'], [3, 'length'], [3, 'height'], [3, 'width'], [4, 'catalog entry id'], [4, 'catalog level number'], [4, 'attribute id'], [4, 'attribute value']] # primary_keys: [1, 4, 9, 12] # column_types: ['text', 'number', 'text', 'text', 'number', 'text', 'text', 'time', 'time', 'number', 'number', 'text', 'number', 'number', 'number', 'number', 'number', 'text', 'text', 'number', 'number', 'number', 'text', 'text', 'text', 'text', 'number', 'number', 'number', 'text'] # foreign_keys: [[10, 4], [13, 9], [27, 9], [26, 12]] # column_names_original: [[-1, '*'], [0, 'attribute_id'], [0, 'attribute_name'], [0, 'attribute_data_type'], [1, 'catalog_id'], [1, 'catalog_name'], [1, 'catalog_publisher'], [1, 'date_of_publication'], [1, 'date_of_latest_revision'], [2, 'catalog_level_number'], [2, 'catalog_id'], [2, 'catalog_level_name'], [3, 'catalog_entry_id'], [3, 'catalog_level_number'], [3, 'parent_entry_id'], [3, 'previous_entry_id'], [3, 'next_entry_id'], [3, 'catalog_entry_name'], [3, 'product_stock_number'], [3, 'price_in_dollars'], [3, 'price_in_euros'], [3, 'price_in_pounds'], [3, 'capacity'], [3, 'length'], [3, 'height'], [3, 'width'], [4, 'catalog_entry_id'], [4, 'catalog_level_number'], [4, 'attribute_id'], [4, 'attribute_value']] # table_names_original: ['Attribute_Definitions', 'Catalogs', 'Catalog_Structure', 'Catalog_Contents', 'Catalog_Contents_Additional_Attributes'] # table_names: ['attribute definitions', 'catalogs', 'catalog structure', 'catalog contents', 'catalog contents additional attributes']
[ 11748, 25064, 198, 198, 6738, 299, 2528, 74, 13, 927, 220, 220, 220, 220, 220, 1330, 20890, 1273, 368, 647, 198, 6738, 299, 2528, 74, 13, 30001, 1096, 220, 1330, 12200, 17796, 26449, 30642, 7509, 198, 198, 927, 647, 220, 796, 20890, ...
2.626529
1,063
# Copyright (c) 2020 Intel Corporation # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import pathlib from buildbot.changes.gitpoller import GitPoller from buildbot.plugins import schedulers, util, worker, reporters sys.path.append(str(pathlib.Path(__file__).resolve().parents[2])) import bb.master.config as config import bb.utils c = BuildmasterConfig = {} # Add workers c["workers"] = [] ALL_WORKERS_NAMES = [] for worker_ in config.WORKERS.values(): for w_name, prop in worker_.items(): ALL_WORKERS_NAMES.append(w_name) c["workers"].append(worker.Worker(w_name, config.WORKER_PASS, properties=prop, # To disable parallel builds on one worker max_builds=prop.get('max_builds') or 1)) # Basic config c["protocols"] = {"pb": {"port": config.WORKER_PORT}} c["buildbotNetUsageData"] = config.BUILDBOT_NET_USAGE_DATA c["title"] = config.BUILDBOT_TITLE c["titleURL"] = config.REPO_URL c["buildbotURL"] = config.BUILDBOT_URL # Create schedulers and builders for builds c["builders"] = [] c["schedulers"] = [ schedulers.SingleBranchScheduler(name=config.TRIGGER, change_filter=util.ChangeFilter(), treeStableTimer=config.BUILDBOT_TREE_STABLE_TIMER, builderNames=[config.TRIGGER])] for builder_name, properties in config.FLOW.get_prepared_builders().items(): if properties.get('add_triggerable_sheduler', True): c["schedulers"].append(schedulers.Triggerable(name=builder_name, builderNames=[builder_name])) c["builders"].append(util.BuilderConfig(name=builder_name, workernames=get_workers(properties.get("worker")), factory=properties['factory'])) class GitHubStatusPushFilter(reporters.GitHubStatusPush): """ This class extend filtering options for reporters.GitHubStatusPush """ # Push status of build to the Github c["services"] = [ GitHubStatusPushFilter(token=config.GITHUB_TOKEN, context=util.Interpolate("buildbot/%(prop:buildername)s"), startDescription="Started", endDescription="Done", verbose=True)] # Get changes c["change_source"] = [] CI_REPOSITORIES = [ {'name': config.MEDIASDK_REPO, 'organization': config.MEDIASDK_ORGANIZATION, # All changes with limited number of commits 'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)}, {'name': config.DRIVER_REPO, 'organization': config.INTEL_ORGANIZATION, 'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)}, {'name': config.LIBVA_REPO, 'organization': config.INTEL_ORGANIZATION, 'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)}, {'name': config.PRODUCT_CONFIGS_REPO, 'organization': config.MEDIASDK_ORGANIZATION, # Pull requests only for members of Intel-Media-SDK organization with limited number of commits # This filter is needed for security, because via product configs can do everything 'change_filter': bb.utils.ChangeChecker(config.GITHUB_TOKEN)}, {'name': config.INFRASTRUCTURE_REPO, 'organization': config.MEDIASDK_ORGANIZATION, # All changes with limited number of commits 'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)} ] for repo in CI_REPOSITORIES: repo_url = f"https://github.com/{repo['organization']}/{repo['name']}.git" c["change_source"].append(GitPoller( repourl=repo_url, # Dir for the output of git remote-ls command workdir=f"gitpoller-{repo['name']}", # Poll master, release branches and open pull request branches # Filters performs in following order: # branches (discard all not release branches) # pull_request (add branches of open pull request) # *fetch branches* # change_filter (checking changes) branches=lambda branch: bb.utils.is_release_branch(branch), pull_request_branches=bb.utils.get_open_pull_request_branches(repo['organization'], repo['name'], token=config.GITHUB_TOKEN), change_filter=repo['change_filter'], category="media", pollInterval=config.POLL_INTERVAL, pollAtLaunch=True)) # TODO: All repos should be declared in cycle above, but need to improve filtration changes # to avoid affecting efficiency for repo in config.AUTO_UPDATED_REPOSITORIES: repo_url = f"https://github.com/{config.INTEL_ORGANIZATION}/{repo}.git" c["change_source"].append(GitPoller( repourl=repo_url, workdir=f"gitpoller-{repo}", branches=['master'], category="auto_update", change_filter=MediasdkChangeChecker(), pollInterval=config.POLL_INTERVAL, pollAtLaunch=True)) # Web Interface c["www"] = dict(port=int(config.PORT), plugins={"console_view": True, "grid_view": True}) # Database c["db"] = {"db_url": config.DATABASE_URL} # It disables automatic merging of requests (to build EACH commit) c["collapseRequests"] = False
[ 2, 15069, 357, 66, 8, 12131, 8180, 10501, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2, 286, 428, 3788, 290, 3917, 10314, 3696, 357, 1169, 366, 25423, 12340, 284, ...
2.395066
2,716
from torchvision import transforms # CenterCrop # RandomCrop # transforms.RandomCrop(size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant') # RandomResizedCrop # transforms.RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(3/4, 4/3), interpolation) # FiveCrop, TenCrop # transforms.FiveCrop(size) # transforms.TenCrop(size, vertical_flip=False) # RandomHorizontalFlip, RandomVerticalFlip # RandomRotation # transforms.RandomRotation(degrees, resample=False, expand=False, center=None) # Pad # transforms.Pad(padding, fill=0, padding_mode='constant') # ColorJitter # transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0) # Grayscale, RandomGrayscale # transforms.Grayscale(num_output_channels) # transforms.RandomGrayscale(num_output_channels, p=0.1) # RandomAffine # transforms.RandomAffine(degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0) # LinearTransformation # RandomErasing: operate on tensor # transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False) # Lambda # tranforms.Lambda(lambd) # Resize # ToTensor # Normalize: output = (input - mean) / std # transforms.Normalize(mean, std, inplace=False) # RandomChoice # transforms.RandomChoice([transforms1, transforms2, transforms3]) # RandomApply # transforms.RandomApply([transforms1, transforms2, transforms3], p=0.5) # RandomOrder # transforms.RandomOrder([transforms1, transforms2, transforms3]) # Compose def custom_transform(): """ class YourTransforms(object): def __init__(self, ...): ... def __call__(self, img): ... return img """ class AddPepperNoise(object): """ Args: snr (float): signal noise rate p (float): probility """ def __call__(self, img): """ Args: img (PIL Image): PIL Image Returns: img (PIL Image): PIL Image """ if random.uniform(0, 1) < self.p: img_ = np.array(img).copy() h, w, c = img_.shape singal_pct = self.snr noise_pat = 1 - self.snr mask = np.random.choice((0, 1, 2), size=(h, w, 1), p=[signal_pct, noise_pct/2., noise_pct/2.]) mask = np.repeat(mask, c, axis=2) img_[mask == 1] = 255 img_[mask == 2] = 0 return Image.fromarray(img_.astype('uint8')).convert('RGB') else: return img
[ 6738, 28034, 10178, 1330, 31408, 628, 220, 220, 220, 1303, 3337, 34, 1773, 628, 220, 220, 220, 1303, 14534, 34, 1773, 198, 220, 220, 220, 1303, 31408, 13, 29531, 34, 1773, 7, 7857, 11, 24511, 28, 14202, 11, 14841, 62, 361, 62, 27938...
2.281873
1,153
from flask import Flask, Blueprint, render_template, request, url_for, redirect from flask_sqlalchemy import SQLAlchemy from flask_login import login_required, current_user from .models import User, Scores, Board from datetime import datetime from . import db main = Blueprint('main', __name__) @main.route("/") @main.route("/submit", methods=["POST"]) @main.route("/home_leaderboard")
[ 6738, 42903, 1330, 46947, 11, 39932, 11, 8543, 62, 28243, 11, 2581, 11, 19016, 62, 1640, 11, 18941, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 198, 6738, 42903, 62, 38235, 1330, 17594, 62, 35827, 11, 1459, 62, ...
3.378151
119
# Generated by Django 3.1 on 2008-12-31 19:48 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 319, 3648, 12, 1065, 12, 3132, 678, 25, 2780, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.892857
28
class WheelValidationError(Exception): """Superclass for all wheel validation errors raised by this package""" pass class RecordValidationError(WheelValidationError): """ Superclass for all validation errors raised due to a wheel's :file:`RECORD` being inaccurate or incomplete """ pass class RecordSizeMismatchError(RecordValidationError): """ Raised when the size of a file as declared in a wheel's :file:`RECORD` does not match the file's actual size """ class RecordDigestMismatchError(RecordValidationError): """ Raised when a file's digest as declared in a wheel's :file:`RECORD` does not match the file's actual digest """ class FileMissingError(RecordValidationError): """ Raised when a file listed in a wheel's :file:`RECORD` is not found in the wheel """ class ExtraFileError(RecordValidationError): """ Raised when a wheel contains a file that is not listed in the :file:`RECORD` (other than :file:`RECORD.jws` and :file:`RECORD.p7s`) """ class MalformedRecordError(WheelValidationError): """ Superclass for all validation errors raised due to a wheel's :file:`RECORD` being malformed """ pass class UnknownDigestError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` uses a digest not listed in `hashlib.algorithms_guaranteed` """ class WeakDigestError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` uses a digest weaker than sha256 """ class MalformedDigestError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` contains a malformed or invalid digest """ class MalformedSizeError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` contains a malformed or invalid file size """ class RecordConflictError(MalformedRecordError): """ Raised when a wheel's :file:`RECORD` contains two or more conflicting entries for the same path """ class EmptyDigestError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` has a size but not a digest """ class EmptySizeError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` has a digest but not a size """ class EmptyPathError(MalformedRecordError): """Raised when an entry in a wheel's :file:`RECORD` has an empty path""" class RecordLengthError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` has the wrong number of fields """ class NullEntryError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` lacks both digest and size and the entry is not for the :file:`RECORD` itself """ class NonNormalizedPathError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` has a non-normalized path """ class AbsolutePathError(MalformedRecordError): """ Raised when an entry in a wheel's :file:`RECORD` has an absolute path """ class DistInfoError(WheelValidationError): """ Raised when a wheel's :file:`*.dist-info` directory cannot be found or determined """ pass class MissingDistInfoFileError(WheelValidationError): """ Raised when a given file is not found in the wheel's :file:`*.dist-info` directory """ class MissingMetadataError(MissingDistInfoFileError): """Raised when a wheel does not contain a :file:`METADATA` file""" class MissingRecordError(MissingDistInfoFileError): """Raised when a wheel does not contain a :file:`RECORD` file""" class MissingWheelInfoError(MissingDistInfoFileError): """Raised when a wheel does not contain a :file:`WHEEL` file"""
[ 4871, 15810, 7762, 24765, 12331, 7, 16922, 2599, 198, 220, 220, 220, 37227, 12442, 4871, 329, 477, 7825, 21201, 8563, 4376, 416, 428, 5301, 37811, 628, 220, 220, 220, 1208, 628, 198, 4871, 13266, 7762, 24765, 12331, 7, 45307, 7762, 2476...
3.013396
1,269
from js9 import j JSConfigFactory = j.tools.configmanager.base_class_configs JSConfigClient = j.tools.configmanager.base_class_config TEMPLATE = """ host = "localhost" port = 27017 username = "" password_ = "" alias = "" db = "" authentication_source = "" authentication_mechanism = "" ssl = false # Boolean replicaset = "" """
[ 6738, 44804, 24, 1330, 474, 198, 198, 20120, 16934, 22810, 796, 474, 13, 31391, 13, 11250, 37153, 13, 8692, 62, 4871, 62, 11250, 82, 198, 20120, 16934, 11792, 796, 474, 13, 31391, 13, 11250, 37153, 13, 8692, 62, 4871, 62, 11250, 198, ...
3.084112
107
#!/usr/bin/python2.7 import re import numpy light_matrix = numpy.zeros((1000,1000), dtype= numpy.int) regex = '^([a-z ]+)(\d+)(?:,)(\d+)(?: \w+ )(\d+)(?:,)(\d+)' with open('input.txt') as fp: for line in fp: instructions = re.search(regex, line) for x_axis in range(int(instructions.group(2)), int(instructions.group(4))+1): for y_axis in range(int(instructions.group(3)), int(instructions.group(5))+1): if instructions.group(1) == 'turn on ': light_matrix[x_axis, y_axis] += 1 elif instructions.group(1) == 'turn off ' and light_matrix[x_axis, y_axis] > 0: light_matrix[x_axis, y_axis] -= 1 elif instructions.group(1) == 'toggle ': light_matrix[x_axis, y_axis] += 2 print numpy.sum(light_matrix)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 17, 13, 22, 198, 198, 11748, 302, 198, 11748, 299, 32152, 198, 198, 2971, 62, 6759, 8609, 796, 299, 32152, 13, 9107, 418, 19510, 12825, 11, 12825, 828, 288, 4906, 28, 299, 32152, 13, 600, 8, 1...
2.20597
335
import unittest from flask_monitoringdashboard.core.profiler.util import PathHash from flask_monitoringdashboard.database import StackLine, CodeLine FN = 'filename0' LN = 42 STACK_LINES = [ StackLine( request_id=0, position=0, indent=0, duration=1010, code=CodeLine(filename=FN, line_number=0, function_name='None', code='f()'), ), StackLine( request_id=0, position=1, indent=1, duration=500, code=CodeLine(filename=FN, line_number=1, function_name='f', code='sleep(1)'), ), StackLine( request_id=0, position=1, indent=1, duration=510, code=CodeLine(filename=FN, line_number=2, function_name='f', code='sleep(1.01)'), ), ]
[ 11748, 555, 715, 395, 198, 198, 6738, 42903, 62, 41143, 278, 42460, 3526, 13, 7295, 13, 5577, 5329, 13, 22602, 1330, 10644, 26257, 198, 6738, 42903, 62, 41143, 278, 42460, 3526, 13, 48806, 1330, 23881, 13949, 11, 6127, 13949, 198, 198, ...
2.147632
359
from time import sleep contador(1, 10, 1) print() contador(10, 0, 2) print() print('<>'*15) i = int(input('Número inicial: ')) f = int(input('Número final: ')) p = abs(int(input('Passo: '))) # Pega o número inteiro, sem sinal (positivo) contador(i, f, p) print() print('<>'*15)
[ 6738, 640, 1330, 3993, 628, 198, 198, 3642, 7079, 7, 16, 11, 838, 11, 352, 8, 198, 4798, 3419, 198, 3642, 7079, 7, 940, 11, 657, 11, 362, 8, 198, 4798, 3419, 198, 4798, 10786, 27, 29, 6, 9, 1314, 8, 198, 72, 796, 493, 7, 154...
2.169231
130
# Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # buildifier: disable=module-docstring load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") load("//proto/raze:crates.bzl", "rules_rust_proto_fetch_remote_crates") # buildifier: disable=unnamed-macro def rust_proto_repositories(register_default_toolchain = True): """Declare dependencies needed for proto compilation. Args: register_default_toolchain (bool, optional): If True, the default [rust_proto_toolchain](#rust_proto_toolchain) (`@rules_rust//proto:default-proto-toolchain`) is registered. This toolchain requires a set of dependencies that were generated using [cargo raze](https://github.com/google/cargo-raze). These will also be loaded. """ maybe( http_archive, name = "rules_proto", sha256 = "bc12122a5ae4b517fa423ea03a8d82ea6352d5127ea48cb54bc324e8ab78493c", strip_prefix = "rules_proto-af6481970a34554c6942d993e194a9aed7987780", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/af6481970a34554c6942d993e194a9aed7987780.tar.gz", "https://github.com/bazelbuild/rules_proto/archive/af6481970a34554c6942d993e194a9aed7987780.tar.gz", ], patch_args = ["-p1"], patches = [ Label("//proto/patches:rules_proto-bzl_visibility.patch"), ], ) maybe( http_archive, name = "com_google_protobuf", sha256 = "758249b537abba2f21ebc2d02555bf080917f0f2f88f4cbe2903e0e28c4187ed", strip_prefix = "protobuf-3.10.0", urls = [ "https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v3.10.0.tar.gz", "https://github.com/protocolbuffers/protobuf/archive/v3.10.0.tar.gz", ], patch_args = ["-p1"], patches = [ Label("//proto/patches:com_google_protobuf-v3.10.0-bzl_visibility.patch"), ], ) rules_rust_proto_fetch_remote_crates() # Register toolchains if register_default_toolchain: native.register_toolchains(str(Label("//proto:default-proto-toolchain")))
[ 2, 15069, 2864, 383, 347, 41319, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, ...
2.403296
1,153
import unittest import json from pyanalysis.mysql import Conn from pyanalysis.moment import moment as m from pyghostbt.strategy import Strategy from pyghostbt.tool.order import * from pyghostbt.tool.runtime import Runtime from pyghostbt.const import * from dateutil import tz strategy_1st_config = { "mode": "backtest", "symbol": "btc_usd", "exchange": "okex", "contract_type": "quarter", "trade_type": "future", "unit_amount": 100, "lever": 10, "interval": "1min", "db_name": "test", "db_name_kline": "ghost-etl", "timezone": "Asia/Shanghai", "param": { "position": 0.5, "max_abs_loss": 0.05, }, "order": {} } # def test_tool_kline(self): # pass
[ 11748, 555, 715, 395, 198, 11748, 33918, 198, 198, 6738, 279, 4121, 8767, 13, 28744, 13976, 1330, 20776, 198, 6738, 279, 4121, 8767, 13, 32542, 298, 1330, 2589, 355, 285, 198, 6738, 12972, 38933, 18347, 13, 2536, 4338, 1330, 20561, 198,...
2.354633
313
name = '_'.join(__name__.split('.')[-2:]) VERSION = '$Id: mcz.py,v 1.1 2009/04/07 16:35:31 kocolosk Exp $'[5:-2] import ROOT class_ = ROOT.TProfile binning = { 'nbinsx': 60, 'xbins': (0.0, 15.0) } props = { 'SetXTitle': ('MC #pi p_{T}',), 'SetYTitle': ('<MC #pi p_{T} / particle jet p_{T}>',) } branches = ('mMcJets*', 'mMcTracks*') vec = ROOT.TVector3()
[ 3672, 220, 220, 220, 796, 705, 62, 4458, 22179, 7, 834, 3672, 834, 13, 35312, 10786, 2637, 38381, 12, 17, 25, 12962, 198, 43717, 796, 705, 3, 7390, 25, 285, 26691, 13, 9078, 11, 85, 352, 13, 16, 3717, 14, 3023, 14, 2998, 1467, 2...
1.93401
197
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jun 21 01:51:33 2020 @author: liujiachen """ from ray.tune.schedulers import FIFOScheduler, TrialScheduler from ray.tune.trial import Trial from ray.tune.trial_runner import TrialRunner from ray.tune.web_server import TuneServer
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3825, 7653, 2310, 5534, 25, 4349, 25, 2091, 12131, 198, 198, 31, 9800, 25, 7649, 84, 7285...
2.75
108
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle.fluid as fluid import paddle.distributed.fleet.base.role_maker as role_maker import paddle.distributed.fleet as fleet import unittest import paddle import os paddle.enable_static() # For Net base_lr = 0.2 emb_lr = base_lr * 3 dict_dim = 1500 emb_dim = 128 hid_dim = 128 margin = 0.1 sample_rate = 1 batch_size = 4 if __name__ == '__main__': os.environ["GLOG_v"] = "4" os.environ["GLOG_logtostderr"] = "1" unittest.main()
[ 2, 220, 220, 15069, 357, 66, 8, 2864, 350, 37382, 47, 37382, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, ...
3.157143
350
# Define values in the following fields. Arguments can also be passed via the command line USER = "" # Your email address KEY = "" # Your API Key ZONE = "" # Your zone ID (optional) ORG = "" # Your organization ID (optional) MAXP = 10 # Number of pages in the traffic tab the script will inspect
[ 2, 2896, 500, 3815, 287, 262, 1708, 7032, 13, 20559, 2886, 460, 635, 307, 3804, 2884, 262, 3141, 1627, 198, 198, 29904, 796, 13538, 1303, 3406, 3053, 2209, 198, 20373, 220, 796, 13538, 1303, 3406, 7824, 7383, 198, 57, 11651, 796, 1353...
3.772152
79
# Code to reproduce the regression experiments in the paper import numpy as np from fair_dummies.cf_regression import run_experiment # parameters are tuned on training set # choose test=True to evaluate training performance # test seed random_state_train_test = 123 # repeat the experiments for num_experiments times num_experiments = 20 test_methods = [] dataset_names = [] batch_size = [] lr = [] steps = [] mu_val = [] second_scale = [] epochs = [] model_type = [] reg_type = [] ################################################################################ ## Fairness-unaware baseline methods ################################################################################ test_methods += ['FairDummies'] dataset_names += ['meps'] batch_size += [10000] lr += [0.01] steps += [50] mu_val += list(np.linspace(0,0.99,100)) second_scale += [20] epochs += [20] model_type += ["linear_model"] reg_type += ["mreg"] for exp_id in range(1): for mu_val_id in range(100): cur_test_method = test_methods[exp_id] cur_dataset_name = dataset_names[exp_id] cur_batch_size = batch_size[exp_id] cur_lr_loss = lr[exp_id] cur_lr_dis = lr[exp_id] cur_loss_steps = steps[exp_id] cur_dis_steps = steps[exp_id] cur_mu_val = mu_val[mu_val_id] cur_epochs = epochs[exp_id] cur_random_state = random_state_train_test cur_model_type = model_type[exp_id] cur_regression_type = reg_type[exp_id] cur_second_scale = second_scale[exp_id] # run an experiment and save average results to CSV file run_experiment(cur_test_method, cur_dataset_name, cur_batch_size, cur_lr_loss, cur_lr_dis, cur_loss_steps, cur_dis_steps, cur_mu_val, cur_epochs, cur_model_type, cur_regression_type, random_state_train_test, cur_second_scale, num_experiments)
[ 198, 2, 6127, 284, 22919, 262, 20683, 10256, 287, 262, 3348, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 3148, 62, 67, 39578, 13, 12993, 62, 2301, 2234, 1330, 1057, 62, 23100, 3681, 198, 198, 2, 10007, 389, 16524, 319, 3047, 900, ...
2.037804
1,111
from flask import current_app from coronastats import db, cache app = current_app @app.route("/api/infected_log/", methods=["GET"]) @cache.cached(timeout=3600) @app.route("/api/infected_increase_log/", methods=["GET"]) @cache.cached(timeout=3600) @app.route("/api/get_last_log_by_location/", methods=["GET"]) @cache.cached(timeout=3600)
[ 6738, 42903, 1330, 1459, 62, 1324, 198, 198, 6738, 26920, 459, 1381, 1330, 20613, 11, 12940, 198, 198, 1324, 796, 1459, 62, 1324, 628, 198, 31, 1324, 13, 38629, 7203, 14, 15042, 14, 27816, 276, 62, 6404, 14, 1600, 5050, 28, 14692, 1...
2.574627
134
# -*- encoding: utf-8 -*- '''Database model for the "unittest" application This module provides the definition of the database tables and fields used by "unittest", the application which keeps track of the raw data and the analysis results of the polarimetric unit tests done in Bicocca. The core of the module is the "PolarimeterTest" class, which is typically associated to one datafile acquired using the electronic board or to one set of Excel files acquired using the Keithley apparatus. All the other classes provide additional (and often optional) information about the test.abs The main reason why the structure of tables seems so complicated stems from the need of great versatility. Since no housekeeping information was recorded during the tests, the code cannot assume that information such as amplifier biases, housekeeping temperatures, and so on is available and ready to be put in the database. ''' from io import BytesIO import logging import os from tempfile import NamedTemporaryFile from django.conf import settings from django.db import models from django.conf import settings from django.core.urlresolvers import reverse from django.core.files import File from django.core.files.base import ContentFile import h5py import matplotlib as mpl from jsonfield import JSONField from .file_conversions import convert_data_file_to_h5 from .validators import validate_report_file_ext mpl.use('Agg') import matplotlib.pylab as plt # Get an instance of a logger LOGGER = logging.getLogger(__name__) class TestType(models.Model): 'Kind of test (e.g., Y-factor)' description = models.CharField(max_length=80) class Operator(models.Model): 'Person who has run a test' name = models.CharField(max_length=256) BAND_CHOICES = ( ('Q', 'Q'), ('W', 'W'), ) PHSW_STATES = ( ('N/A', 'N/A'), ('0101', '0101'), ('0110', '0110'), ('switching', 'switching'), ('1010', '1010'), ('1001', '1001'), ) def create_pwr_plot(hdf5_file_name, dpi=80): 'Plot PWR data from an HDF5 file into an image' plt.figure(figsize=(512 / dpi, 384 / dpi), dpi=dpi) with h5py.File(hdf5_file_name, 'r') as h5_file: if not 'time_series' in h5_file: # Do not attempt to make plots from a file containing Keithley data return None dataset = h5_file['time_series'] time = dataset['time_s'] for idx, detector in enumerate(['Q1', 'U1', 'U2', 'Q2']): column = 'pwr_{0}_ADU'.format(detector) pwr_name = 'PWR{0}'.format(idx) plt.plot( time, dataset[column], label='{0} ({1})'.format(pwr_name, detector)) plt.xlabel('Time [s]') plt.ylabel('Output [ADU]') plt.legend() # Save the image buffer = BytesIO() plt.savefig(buffer, format='png', bbox_inches='tight', dpi=dpi) return ContentFile(buffer.getvalue()) def update_hdf5_test_file_attrs(file_name, poltest): 'Update HDF5 file attributes with information from a PolarimeterTest obj' if settings.HTTP_HOST and settings.HTTP_HOST != '': abs_url = ''.join([settings.HTTP_HOST, poltest.get_absolute_url()]) else: abs_url = poltest.get_absolute_url() with h5py.File(file_name, 'r+') as h5_file: for key, value in [('url', abs_url), ('polarimeter', poltest.polarimeter_name), ('cryogenic', poltest.cryogenic), ('acquisition_date', poltest.acquisition_date.strftime('%Y-%m-%d')), ('band', poltest.band), ('test_type', str(poltest.test_type))]: h5_file.attrs[key] = value class PolarimeterTest(models.Model): 'A dedicated test done on one polarimeter' polarimeter_number = models.IntegerField( verbose_name='Number of the polarimeter') cryogenic = models.BooleanField(verbose_name='Cryogenic test') acquisition_date = models.DateField( verbose_name='Date of acquisition (YYYY-MM-DD)') data_file = models.FileField(max_length=1024, upload_to='unit_test_data/') short_description = models.CharField(max_length=140, blank=True, verbose_name='Short description (optional)') notes = models.TextField(verbose_name='Notes', blank=True) phsw_state = models.CharField( max_length=12, default='N/A', choices=PHSW_STATES) band = models.CharField(max_length=1, choices=BAND_CHOICES) pwr_plot = models.ImageField( max_length=1024, upload_to='plots/', blank=True) test_type = models.ForeignKey(TestType, on_delete=models.CASCADE) operators = models.ManyToManyField(Operator, related_name='tests') author = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='tests_owned') creation_date = models.DateField(auto_now_add=True) @property @property def to_dict(self): 'Create a dictionary containing a summary of the test (useful for the REST API)' return { 'id': self.pk, 'polarimeter_number': self.polarimeter_number, 'test_type': self.test_type.pk, 'description': self.test_description, 'absolute_url': self.get_absolute_url(), 'download_url': self.get_download_url(), 'json_url': self.get_json_url(), } class AdcOffset(models.Model): 'Offset configuration used for the four ADCs' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) q1_adu = models.IntegerField(verbose_name='PWR0 (Q1) offset [ADU]') u1_adu = models.IntegerField(verbose_name='PWR1 (U1) offset [ADU]') u2_adu = models.IntegerField(verbose_name='PWR2 (U2) offset [ADU]') q2_adu = models.IntegerField(verbose_name='PWR3 (Q2) offset [ADU]') class DetectorOutput(models.Model): 'Average output of the four detectors' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) q1_adu = models.IntegerField(verbose_name='PW0 (Q1)') u1_adu = models.IntegerField(verbose_name='PW1 (U1)') u2_adu = models.IntegerField(verbose_name='PW2 (U2)') q2_adu = models.IntegerField(verbose_name='PW3 (Q2)') class Biases(models.Model): 'Biases used to polarize the HEMTs' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) drain_voltage_ha1_V = models.FloatField( verbose_name='H0 drain voltage [V]') drain_current_ha1_mA = models.FloatField( verbose_name='H0 drain current [mA]') gate_voltage_ha1_mV = models.FloatField( verbose_name='H0 gate voltage [mV]') drain_voltage_hb1_V = models.FloatField( verbose_name='H1 drain voltage [V]') drain_current_hb1_mA = models.FloatField( verbose_name='H1 drain current [mA]') gate_voltage_hb1_mV = models.FloatField( verbose_name='H1 gate voltage [mV]') drain_voltage_ha2_V = models.FloatField( verbose_name='H2 drain voltage [V]') drain_current_ha2_mA = models.FloatField( verbose_name='H2 drain current [mA]') gate_voltage_ha2_mV = models.FloatField( verbose_name='H2 gate voltage [mV]') drain_voltage_hb2_V = models.FloatField( verbose_name='H3 drain voltage [V]') drain_current_hb2_mA = models.FloatField( verbose_name='H3 drain current [mA]') gate_voltage_hb2_mV = models.FloatField( verbose_name='H3 gate voltage [mV]') drain_voltage_ha3_V = models.FloatField( verbose_name='H4 drain voltage [V]') drain_current_ha3_mA = models.FloatField( verbose_name='H4 drain current [mA]') gate_voltage_ha3_mV = models.FloatField( verbose_name='H4 gate voltage [mV]') drain_voltage_hb3_V = models.FloatField( verbose_name='H5 drain voltage [V]') drain_current_hb3_mA = models.FloatField( verbose_name='H5 drain current [mA]') gate_voltage_hb3_mV = models.FloatField( verbose_name='H5 gate voltage [mV]') class Temperatures(models.Model): 'Temperatures of the cryochamber' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) t_load_a_1 = models.FloatField(verbose_name='Tload_A1 [K]') t_load_a_2 = models.FloatField(verbose_name='Tload_A2 [K]') t_load_b_1 = models.FloatField(verbose_name='Tload_B1 [K]') t_load_b_2 = models.FloatField(verbose_name='Tload_B2 [K]') t_cross_guide_1 = models.FloatField(verbose_name='T_Cross1 [K]') t_cross_guide_2 = models.FloatField(verbose_name='T_Cross2 [K]') t_polarimeter_1 = models.FloatField(verbose_name='T_Pol1 [K]') t_polarimeter_2 = models.FloatField(verbose_name='T_Pol2 [K]') class NoiseTemperatureAnalysis(models.Model): 'Result of a noise temperature analysis' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) analysis_results = JSONField(blank=True) report_file = models.FileField( verbose_name='Report', upload_to='reports/', validators=[validate_report_file_ext], blank=True) author = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='tnoise_owned') class SpectralAnalysis(models.Model): 'Results of the analysis of a long-acquisition test' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) analysis_results = JSONField(blank=True) report_file = models.FileField( verbose_name='Report', upload_to='reports/', validators=[validate_report_file_ext], blank=True) author = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='spectral_owned') class BandpassAnalysis(models.Model): 'Results of the analysis of a bandpass test' test = models.ForeignKey(to=PolarimeterTest, on_delete=models.CASCADE) analysis_results = JSONField(blank=True) report_file = models.FileField( verbose_name='Report', upload_to='reports/', validators=[validate_report_file_ext], blank=True) author = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='bandpass_owned')
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 7061, 6, 38105, 2746, 329, 262, 366, 403, 715, 395, 1, 3586, 198, 198, 1212, 8265, 3769, 262, 6770, 286, 262, 6831, 8893, 290, 7032, 973, 416, 198, 1, 403, 715,...
2.453686
4,124
from mindsdb.api.mongo.classes import Responder from mindsdb.utilities.with_kwargs_wrapper import WithKWArgsWrapper responder = Responce()
[ 6738, 9017, 9945, 13, 15042, 13, 76, 25162, 13, 37724, 1330, 10328, 8623, 198, 6738, 9017, 9945, 13, 315, 2410, 13, 4480, 62, 46265, 22046, 62, 48553, 1330, 2080, 42, 54, 42035, 36918, 2848, 628, 198, 198, 5546, 263, 796, 10328, 27078...
3.227273
44
# # Copyright (c) 2019-2020 Google LLC. All Rights Reserved. # Copyright (c) 2016-2018 Nest Labs Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Description: # This file effects a Weave Data Language (WDL) test for the # validator that validates and enforces object addition or type # change across two schema corpus revisions. # """Checks if any objects were removed or changed type in the schema.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from gwv import schema from gwv import validator from nwv.validators import added_object_validator class AddedObjectValidatorTest(validator.ComparisonValidatorTestCase): """Checks if any objects were removed or changed type in the schema.""" NEW_OBJECT_ON_PROD_ERR_MSG = (r'.*Objects cannot be added to PROD .* without ' r'incrementing the .* version.*') NEW_OBJECT_ADDED_ERR_MSG = (r'.*New objects added to .* with version > 1 must' r' specify min_version.*') if __name__ == '__main__': unittest.main()
[ 2, 198, 2, 220, 220, 220, 15069, 357, 66, 8, 13130, 12, 42334, 3012, 11419, 13, 1439, 6923, 33876, 13, 198, 2, 220, 220, 220, 15069, 357, 66, 8, 1584, 12, 7908, 21420, 23500, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220,...
3.001776
563
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. import json from typing import List from cdm.enums import CdmObjectType from cdm.persistence import PersistenceLayer from cdm.objectmodel import CdmCorpusContext, CdmDocumentDefinition, CdmEntityDefinition, CdmManifestDefinition from cdm.utilities import CopyOptions, ResolveOptions, copy_data_utils, logger from cdm.enums import CdmLogCode from .attribute_group_persistence import AttributeGroupPersistence from .constant_entity_persistence import ConstantEntityPersistence from .data_type_persistence import DataTypePersistence from .entity_persistence import EntityPersistence from .import_persistence import ImportPersistence from .purpose_persistence import PurposePersistence from .trait_persistence import TraitPersistence from .trait_group_persistence import TraitGroupPersistence from .types import DocumentContent from cdm.persistence.syms.models import ColumnRelationshipInformation, DataColumn, DataSource, DatabaseEntity, DatabaseProperties, FormatInfo, Namespace, PartitionInfo, PartitionInfoNamespace, PartitionInfoProperties, RelationshipEntity, RelationshipProperties, SASEntityType, ScalarTypeInfo, SchemaEntity, StorageDescriptor, TableEntity, TableNamespace, TablePartitioning, TableProperties, TypeInfo _TAG = 'DocumentPersistence'
[ 171, 119, 123, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 198, 11748, 33918, 198, 6738, 19720, 1330, 73...
4.008523
352
''' Some of below services are referring: http://face-recognition.readthedocs.io/en/latest/_modules/face_recognition/api.html ''' import numpy as np from configs import configs
[ 7061, 6, 198, 4366, 286, 2174, 2594, 389, 9759, 25, 198, 4023, 1378, 2550, 12, 26243, 653, 13, 961, 83, 704, 420, 82, 13, 952, 14, 268, 14, 42861, 47835, 18170, 14, 2550, 62, 26243, 653, 14, 15042, 13, 6494, 198, 198, 7061, 6, 1...
3.016667
60
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re from django.utils.translation import ugettext_lazy as _ from backend.components import prometheus as prom from backend.packages.blue_krill.data_types.enum import EnumField, StructuredEnum # 没有指定时间范围的情况下,默认获取一小时的数据 METRICS_DEFAULT_TIMEDELTA = 3600 # 默认查询的命名空间(所有) METRICS_DEFAULT_NAMESPACE = '.*' # 查询容器指标可不指定特定的 Pod(不推荐) METRICS_DEFAULT_POD_NAME = '.*' # 默认查询 POD 下所有的容器 METRICS_DEFAULT_CONTAINER_LIST = ['.*'] class MetricDimension(str, StructuredEnum): """ 指标维度 """ CpuUsage = EnumField('cpu_usage', label=_('CPU 使用率')) MemoryUsage = EnumField('memory_usage', label=_('内存使用率')) DiskUsage = EnumField('disk_usage', label=_('磁盘使用率')) DiskIOUsage = EnumField('diskio_usage', label=_('磁盘 IO 使用率')) # 节点各指标维度获取方法 NODE_DIMENSIONS_FUNC = { MetricDimension.CpuUsage: prom.get_node_cpu_usage, MetricDimension.MemoryUsage: prom.get_node_memory_usage, MetricDimension.DiskUsage: prom.get_node_disk_usage, MetricDimension.DiskIOUsage: prom.get_node_diskio_usage, } # 集群各指标维度获取方法 CLUSTER_DIMENSIONS_FUNC = { MetricDimension.CpuUsage: prom.get_cluster_cpu_usage, MetricDimension.MemoryUsage: prom.get_cluster_memory_usage, MetricDimension.DiskUsage: prom.get_cluster_disk_usage, } # 节点普通指标 NODE_UNAME_METRIC = [ 'dockerVersion', 'osVersion', # from cadvisor 'domainname', 'machine', 'nodename', 'release', 'sysname', 'version', # from node-exporter ] # 节点使用率类指标 NODE_USAGE_METRIC = ['cpu_count', 'memory', 'disk'] # 需要被过滤的注解 匹配器 FILTERED_ANNOTATION_PATTERN = re.compile(r'__meta_kubernetes_\w+_annotation') # Job 名称 匹配器 JOB_PATTERN = re.compile(r'^(?P<namespace>[\w-]+)/(?P<name>[\w-]+)/(?P<port_idx>\d+)$') # Service 不返回给前端的字段 INNER_USE_SERVICE_METADATA_FIELDS = [ 'annotations', 'selfLink', 'uid', 'resourceVersion', 'initializers', 'generation', 'deletionTimestamp', 'deletionGracePeriodSeconds', 'clusterName', ] # 不展示给前端的 Label(符合前缀的) INNER_USE_LABEL_PREFIX = [ 'io_tencent_bcs_', 'io.tencent.paas.', 'io.tencent.bcs.', 'io.tencent.bkdata.', 'io.tencent.paas.', ] # 默认 Endpoint 路径 DEFAULT_ENDPOINT_PATH = '/metrics' # 默认 Endpoint 时间间隔(单位:s) DEFAULT_ENDPOINT_INTERVAL = 30 # Service Monitor 存放 Service Name 的 Label 键名 SM_SERVICE_NAME_LABEL = 'io.tencent.bcs.service_name' # Service Monitor 无权限的命名空间 及对应的权限结构 SM_NO_PERM_NAMESPACE = ['thanos'] SM_NO_PERM_MAP = { 'view': True, 'use': False, 'edit': False, 'delete': False, 'view_msg': '', 'edit_msg': _('不允许操作系统命名空间'), 'use_msg': _('不允许操作系统命名空间'), 'delete_msg': _('不允许操作系统命名空间'), } # Service Monitor 名称格式 SM_NAME_PATTERN = re.compile(r'^[a-z][-a-z0-9]*$') # 可选的 Service Monitor 时间间隔 ALLOW_SM_INTERVAL = [30, 60, 120] # 样本行数限制最大最小值 SM_SAMPLE_LIMIT_MAX = 100000 SM_SAMPLE_LIMIT_MIN = 1
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 24893, 1087, 318, 10607, 284, 1104, 262, 1280, 2723, 2055, 416, 1642, 5525, 241, 251, 165, 110, 116, 162, 247, 118, 12859, 239, 47, 7252, 50, 33176, 111, 2...
1.844881
1,934
# # Copyright (C) 2016 UAVCAN Development Team <uavcan.org> # # This software is distributed under the terms of the MIT License. # # Author: Pavel Kirienko <pavel.kirienko@zubax.com> # from . import pyqtgraph
[ 2, 198, 2, 15069, 357, 34, 8, 1584, 220, 471, 10116, 44565, 7712, 4816, 220, 1279, 84, 615, 5171, 13, 2398, 29, 198, 2, 198, 2, 770, 3788, 318, 9387, 739, 262, 2846, 286, 262, 17168, 13789, 13, 198, 2, 198, 2, 6434, 25, 49612, ...
2.826667
75
# Copyright (c) 2021 The Regents of the University of California # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .kernel_disk_workload import KernelDiskWorkload from ...resources.resource import AbstractResource from ...utils.override import overrides from .abstract_board import AbstractBoard from ...isas import ISA from m5.objects import ( Pc, AddrRange, X86FsLinux, Addr, X86SMBiosBiosInformation, X86IntelMPProcessor, X86IntelMPIOAPIC, X86IntelMPBus, X86IntelMPBusHierarchy, X86IntelMPIOIntAssignment, X86E820Entry, Bridge, IOXBar, IdeDisk, CowDiskImage, RawDiskImage, BaseXBar, Port, ) from m5.util.convert import toMemorySize from ..processors.abstract_processor import AbstractProcessor from ..memory.abstract_memory_system import AbstractMemorySystem from ..cachehierarchies.abstract_cache_hierarchy import AbstractCacheHierarchy from ...utils.requires import requires from typing import List, Sequence class X86Board(AbstractBoard, KernelDiskWorkload): """ A board capable of full system simulation for X86. **Limitations** * Currently, this board's memory is hardcoded to 3GB * Much of the I/O subsystem is hard coded """ @overrides(AbstractBoard) def _setup_io_devices(self): """ Sets up the x86 IO devices. Note: This is mostly copy-paste from prior X86 FS setups. Some of it may not be documented and there may be bugs. """ # Constants similar to x86_traits.hh IO_address_space_base = 0x8000000000000000 pci_config_address_space_base = 0xC000000000000000 interrupts_address_space_base = 0xA000000000000000 APIC_range_size = 1 << 12 # Setup memory system specific settings. if self.get_cache_hierarchy().is_ruby(): self.pc.attachIO(self.get_io_bus(), [self.pc.south_bridge.ide.dma]) else: self.bridge = Bridge(delay="50ns") self.bridge.mem_side_port = self.get_io_bus().cpu_side_ports self.bridge.cpu_side_port = ( self.get_cache_hierarchy().get_mem_side_port() ) # # Constants similar to x86_traits.hh IO_address_space_base = 0x8000000000000000 pci_config_address_space_base = 0xC000000000000000 interrupts_address_space_base = 0xA000000000000000 APIC_range_size = 1 << 12 self.bridge.ranges = [ AddrRange(0xC0000000, 0xFFFF0000), AddrRange( IO_address_space_base, interrupts_address_space_base - 1 ), AddrRange(pci_config_address_space_base, Addr.max), ] self.apicbridge = Bridge(delay="50ns") self.apicbridge.cpu_side_port = self.get_io_bus().mem_side_ports self.apicbridge.mem_side_port = ( self.get_cache_hierarchy().get_cpu_side_port() ) self.apicbridge.ranges = [ AddrRange( interrupts_address_space_base, interrupts_address_space_base + self.get_processor().get_num_cores() * APIC_range_size - 1, ) ] self.pc.attachIO(self.get_io_bus()) # Add in a Bios information structure. self.workload.smbios_table.structures = [X86SMBiosBiosInformation()] # Set up the Intel MP table base_entries = [] ext_entries = [] for i in range(self.get_processor().get_num_cores()): bp = X86IntelMPProcessor( local_apic_id=i, local_apic_version=0x14, enable=True, bootstrap=(i == 0), ) base_entries.append(bp) io_apic = X86IntelMPIOAPIC( id=self.get_processor().get_num_cores(), version=0x11, enable=True, address=0xFEC00000, ) self.pc.south_bridge.io_apic.apic_id = io_apic.id base_entries.append(io_apic) pci_bus = X86IntelMPBus(bus_id=0, bus_type="PCI ") base_entries.append(pci_bus) isa_bus = X86IntelMPBus(bus_id=1, bus_type="ISA ") base_entries.append(isa_bus) connect_busses = X86IntelMPBusHierarchy( bus_id=1, subtractive_decode=True, parent_bus=0 ) ext_entries.append(connect_busses) pci_dev4_inta = X86IntelMPIOIntAssignment( interrupt_type="INT", polarity="ConformPolarity", trigger="ConformTrigger", source_bus_id=0, source_bus_irq=0 + (4 << 2), dest_io_apic_id=io_apic.id, dest_io_apic_intin=16, ) base_entries.append(pci_dev4_inta) assignISAInt(0, 2) assignISAInt(1, 1) for i in range(3, 15): assignISAInt(i, i) self.workload.intel_mp_table.base_entries = base_entries self.workload.intel_mp_table.ext_entries = ext_entries entries = [ # Mark the first megabyte of memory as reserved X86E820Entry(addr=0, size="639kB", range_type=1), X86E820Entry(addr=0x9FC00, size="385kB", range_type=2), # Mark the rest of physical memory as available X86E820Entry( addr=0x100000, size=f"{self.mem_ranges[0].size() - 0x100000:d}B", range_type=1, ), ] # Reserve the last 16kB of the 32-bit address space for m5ops entries.append( X86E820Entry(addr=0xFFFF0000, size="64kB", range_type=2) ) self.workload.e820_table.entries = entries @overrides(AbstractBoard) @overrides(AbstractBoard) @overrides(AbstractBoard) @overrides(AbstractBoard) @overrides(AbstractBoard) @overrides(AbstractBoard) @overrides(AbstractBoard) @overrides(KernelDiskWorkload) @overrides(KernelDiskWorkload) @overrides(KernelDiskWorkload)
[ 2, 15069, 357, 66, 8, 33448, 383, 3310, 658, 286, 262, 2059, 286, 3442, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 10431, 28...
2.271135
3,312
import copy import math import typing import jax import jax._src.util as util from jax import lax, numpy as jnp from src.backend import get_param, shard, dims_to_shape, INT_OR_TUPLE, dot, matmul, transpose from src.context import Context REVERSIBLE_CTX = typing.Tuple[typing.Dict[str, jnp.ndarray], jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]
[ 11748, 4866, 198, 11748, 10688, 198, 11748, 19720, 198, 198, 11748, 474, 897, 198, 11748, 474, 897, 13557, 10677, 13, 22602, 355, 7736, 198, 6738, 474, 897, 1330, 36919, 11, 299, 32152, 355, 474, 37659, 198, 198, 6738, 12351, 13, 1891, ...
2.626761
142
#/u/GoldenSights import praw # simple interface to the reddit API, also handles rate limiting of requests import time import sqlite3 import random '''USER CONFIGURATION''' USERNAME = "" #This is the bot's Username. In order to send mail, he must have some amount of Karma. PASSWORD = "" #This is the bot's Password. USERAGENT = "" #This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot" SUBREDDIT = "GoldTesting" #This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..." NAMES = ["Abraham Lincoln", "George Washington", "Bill Gates", "Rosa Parks", "GoldenSights", "Unidan", "Napoleon Bonaparte"] #Famous People MAXPOSTS = 100 #This is how many posts you want to retrieve all at once. PRAW can download 100 at a time. MAXLENGTH = 150 #To avoid bot abuse, do not generate any quotes longer than this many characters. WAIT = 20 #This is how many seconds you will wait between cycles. The bot is completely inactive during this time. '''All done!''' WAITS = str(WAIT) try: import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials USERNAME = bot.getuG() PASSWORD = bot.getpG() USERAGENT = bot.getaG() except ImportError: pass cutoff = len(USERNAME) + 4 sql = sqlite3.connect('sql.db') print('Loaded SQL Database') cur = sql.cursor() cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)') cur.execute('CREATE INDEX IF NOT EXISTS oldpost_index ON oldposts(id)') print('Loaded Completed table') sql.commit() r = praw.Reddit(USERAGENT) r.login(USERNAME, PASSWORD) while True: try: scanSub() except Exception as e: print('An error has occured:', str(e)) print('Running again in ' + WAITS + ' seconds \n') sql.commit() time.sleep(WAIT)
[ 2, 14, 84, 14, 32378, 50, 2337, 198, 11748, 279, 1831, 1303, 2829, 7071, 284, 262, 18374, 7824, 11, 635, 17105, 2494, 15637, 286, 7007, 198, 11748, 640, 198, 11748, 44161, 578, 18, 198, 11748, 4738, 198, 198, 7061, 6, 29904, 25626, ...
3.044586
628
import string from pyparsing import * from ..constants.punctuation.deff import DASH, COMMA, EOL from ..constants.types.deff import FREQUENCY from decl import * frequency << FREQUENCY expcode << Word(string.digits + string.uppercase) expansion << expcode + Suppress(DASH) + frequency
[ 11748, 4731, 198, 198, 6738, 279, 4464, 945, 278, 1330, 1635, 198, 6738, 11485, 9979, 1187, 13, 79, 16260, 2288, 13, 2934, 487, 1330, 360, 11211, 11, 9440, 5673, 11, 412, 3535, 198, 6738, 11485, 9979, 1187, 13, 19199, 13, 2934, 487, ...
3.108696
92
import sqlite3 from scrapy.exceptions import DropItem
[ 11748, 44161, 578, 18, 198, 198, 6738, 15881, 88, 13, 1069, 11755, 1330, 14258, 7449, 628, 198 ]
3.352941
17
#!/usr/bin/env python # -*- coding: utf-8 -*- # common  import os import os.path as op # pip import netCDF4 as nc # tk from ..io.matlab import ReadMatfile def description(p): 'returns description of the file' print('getting info... {0}'.format(p)) txt = '\n\n\n-->{0}'.format(p) if op.isdir(p): txt += '\n*** FOLDER ***\n\nfiles:\n' txt += ', '.join(os.listdir(p)) txt += '\n' elif p.endswith('.nc'): txt += '\n*** NETCDF File ***\n\n' txt += str(nc.Dataset(p)) elif p.endswith('.mat'): txt += '\n*** MATLAB File ***\n\nvariables:\n' try: txt += ', '.join(ReadMatfile(p).keys()) except: txt +='\n couldn\'t read file.' txt += '\n' return txt
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 2219, 1849, 198, 11748, 28686, 198, 11748, 28686, 13, 6978, 355, 1034, 198, 198, 2, 7347, 198, 11748, 2010, 3...
1.932668
401
# print "This is testing value in for loop\n\n" # fruits = '<div id="b87bfc25-3cce-44f0-9948-a4f4f55a1b9f" style="height: 525; width: 100%;" class="plotly-graph-div"></div><script type="text/javascript">window.PLOTLYENV=window.PLOTLYENV || {};window.PLOTLYENV.BASE_URL="https://plot.ly";Plotly.newPlot("b87bfc25-3cce-44f0-9948-a4f4f55a1b9f", [{"y": [2, 166, 193, 4, 136, 0, 112], "x": ["\u9b4f\u6b66", "\u5433\u5c71", "\u8cf4\u7d72", "\u6797\u742a", "\u6797\u8cb3", "\u90dd\u7409", "\u738b\u4f9d"], "type": "bar", "name": "assigned"}], {"barmode": "stack"}, {"linkText": "", "showLink": false})</script>' # for index in range(len(fruits)): # print 'Current fruit :', fruits[index] fruits = "<div style='height: 525; width=100px; background: black;' ></div>" print fruits
[ 198, 2, 3601, 366, 1212, 318, 4856, 1988, 287, 329, 9052, 59, 77, 59, 77, 1, 198, 198, 2, 15921, 796, 705, 27, 7146, 4686, 2625, 65, 5774, 65, 16072, 1495, 12, 18, 66, 344, 12, 2598, 69, 15, 12, 2079, 2780, 12, 64, 19, 69, 1...
2.223496
349
""" tests.unit.xmlutil_test ~~~~~~~~~~~~~~~~~~~~ """ import xml.etree.ElementTree as ET import salt.utils.xmlutil as xml from tests.support.unit import TestCase class XMLUtilTestCase(TestCase): """ Tests that salt.utils.xmlutil properly parses XML data and returns as a properly formatted dictionary. The default method of parsing will ignore attributes and return only the child items. The full method will include parsing attributes. """
[ 37811, 198, 220, 220, 220, 5254, 13, 20850, 13, 19875, 22602, 62, 9288, 198, 220, 220, 220, 220, 27156, 8728, 198, 37811, 198, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 12152, 198, 198, 11748, 8268, 13, 26791, 13, 19875, 2260...
3.541353
133
"""Meetup API.""" from flask import Flask, render_template, request, send_from_directory import json import os app = Flask(__name__, static_folder='img') @app.route('/img/<path:path>') def send_img(path): """Send image from dir.""" app.logger.debug(path) return send_from_directory('', path) @app.route('/') def secret(): """Secret.""" return render_template('secret.html') @app.route('/check', methods=['POST', 'GET']) def chec(): """Check secret data is correct.""" if request.method == 'POST': result = request.form vault_data = '/credentials/app.json' data = {'msg': 'Vault token is not valid'} if os.path.isfile(vault_data): with open(vault_data) as data_file: response = json.load(data_file) app.logger.debug(response) if 'data' in response: data = response['data'] if 'password' in data: if data['password'] == result['secret']: return render_template('bond.html', data=data) return render_template('notbond.html', data=data) if __name__ == '__main__': app.run( host='0.0.0.0', port=5000, debug=True, threaded=True)
[ 37811, 29318, 929, 7824, 526, 15931, 198, 198, 6738, 42903, 1330, 46947, 11, 8543, 62, 28243, 11, 2581, 11, 3758, 62, 6738, 62, 34945, 198, 11748, 33918, 198, 11748, 28686, 198, 198, 1324, 796, 46947, 7, 834, 3672, 834, 11, 9037, 62, ...
2.324478
527
''' Created on Aug 3, 2009 @author: rb ''' import os; import os.path; import tempfile; if __name__ == '__main__': blastPath = "/export/lab/programs/blast"; qryPath = os.path.join(blastPath, "q/mtgi9not_az"); outPath = os.path.join(blastPath, "out/mtgi9not_az2.txt"); batchSize = 20; main(blastPath, qryPath, outPath, batchSize);
[ 7061, 6, 198, 41972, 319, 2447, 513, 11, 3717, 198, 198, 31, 9800, 25, 374, 65, 198, 7061, 6, 198, 198, 11748, 28686, 26, 198, 11748, 28686, 13, 6978, 26, 198, 11748, 20218, 7753, 26, 198, 197, 198, 198, 361, 11593, 3672, 834, 662...
2.365517
145
import operator from haystack.query import SearchQuerySet, SQ query = 'lil way' sqs = SearchQuerySet().filter(reduce(operator.__and__, [SQ(name=word.strip()) for word in query.split(' ')]))
[ 11748, 10088, 198, 6738, 27678, 25558, 13, 22766, 1330, 11140, 20746, 7248, 11, 49747, 198, 198, 22766, 796, 705, 75, 346, 835, 6, 198, 31166, 82, 796, 11140, 20746, 7248, 22446, 24455, 7, 445, 7234, 7, 46616, 13, 834, 392, 834, 11, ...
3.064516
62
#!/usr/bin/env python3 """ Conditional imports depending on whether the AMD version is installed or not """ from lib.utils import get_backend if get_backend() == "amd": from . import losses_plaid as losses else: from . import losses_tf as losses
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 9724, 1859, 17944, 6906, 319, 1771, 262, 10324, 2196, 318, 6589, 393, 407, 37227, 198, 198, 6738, 9195, 13, 26791, 1330, 651, 62, 1891, 437, 198, 198, 361, 651, 62, 1891, ...
3.324675
77
import socket import json from pynput import keyboard # нужно установить для управления с клавиатуры import time msg = { "x_dir": 0, "y_dir": 0, "shot": 0 } host = ("192.168.1.1", 5000) sendFreq = 15 # слать 15 пакетов в секунду def onPress(key): """ вызывается при нажатии какой либо клавиши на клавиатуре """ global msg if key == keyboard.Key.up: # управление стрелочками msg["y_dir"] = 1 elif key == keyboard.Key.down: msg["y_dir"] = -1 elif key == keyboard.Key.left: msg["x_dir"] = -1 elif key == keyboard.Key.right: msg["x_dir"] = 1 elif key == keyboard.Key.space: msg["shot"] = 1 def onRelease(key): """ вызывается при отпускании какой либо клавиши на клавиатуре """ global msg if (key == keyboard.Key.up) or (key == keyboard.Key.down): # управление стрелочками msg["y_dir"] = 0 elif (key == keyboard.Key.left) or (key == keyboard.Key.right): # управление стрелочками msg["x_dir"] = 0 elif key == keyboard.Key.space: msg["shot"] = 0 if __name__ == '__main__': listener = keyboard.Listener( on_press=onPress, on_release=onRelease) listener.start() # запускаем обработчик нажатия клавиш в неблокирующем режиме sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: while True: sock.sendto(json.dumps(msg, ensure_ascii=False).encode("utf8"), host) # отправляем сообщение в виде json файла time.sleep(1 / sendFreq) except KeyboardInterrupt: sock.close() listener.stop()
[ 11748, 17802, 198, 11748, 33918, 198, 6738, 279, 2047, 1996, 1330, 10586, 220, 220, 220, 220, 1303, 12466, 121, 35072, 140, 114, 22177, 15166, 220, 35072, 21727, 20375, 16142, 22177, 25443, 110, 18849, 20375, 45367, 12466, 112, 30143, 40623...
1.650761
985
# Generated by Django 3.0.2 on 2020-06-06 23:24 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 17, 319, 12131, 12, 3312, 12, 3312, 2242, 25, 1731, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.818182
44
from django.db import models from permissions.models import ENVIRONMENT_PERMISSION_TYPE
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 6738, 21627, 13, 27530, 1330, 12964, 53, 4663, 1340, 10979, 62, 18973, 44, 40373, 62, 25216, 628 ]
3.461538
26
from django.db import models # Create your models here.
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 628, 198, 2, 13610, 534, 4981, 994, 13, 628 ]
3.6875
16
import numpy as np import matplotlib.pyplot as plt from scipy.integrate import simps from read_env import read_env1 from abi_read import abi_read from coordsys import CoordSys if __name__ == "__main__": F = compose_wf(kk, bands, eigen_val, eigen_vec) plt.imshow(F[0]) plt.draw() plt.savefig("out.png")
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 629, 541, 88, 13, 18908, 4873, 1330, 985, 862, 198, 6738, 1100, 62, 24330, 1330, 1100, 62, 24330, 16, 198, 6738, 450, 72, 62, 96...
2.439394
132
""" CTD parses the CTD data file and yields a generated dictionary of record values. For a description of the CTD file format see the following link: http://ctdbase.org/downloads/;jsessionid=0BD8D8C07B7661002359C02D7C0275F8 Source Project: biothings.interactions Author: Greg Taylor: greg.k.taylor@gmail.com """ import json import logging import operator import re from hub.dataload.BiointeractParser import BiointeractParser from biothings.utils.dataload import dict_sweep
[ 37811, 198, 4177, 35, 13544, 274, 262, 16356, 35, 1366, 2393, 290, 19299, 198, 64, 7560, 22155, 286, 1700, 3815, 13, 198, 198, 1890, 257, 6764, 286, 262, 16356, 35, 2393, 5794, 766, 262, 198, 27780, 278, 2792, 25, 198, 198, 4023, 13...
3.0125
160
#!/usr/bin/env python3 import sys import os # Change path so we find Xlib sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from Xlib import X, display, Xutil from Xlib.protocol import request if __name__ == '__main__': main(sys.argv) # vim: noet ts=4
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 25064, 198, 11748, 28686, 198, 198, 2, 9794, 3108, 523, 356, 1064, 1395, 8019, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 22179, 7, 418, 13, 6978, 13, 1...
2.53271
107
# idcount : enumerate identifiers in files. import os import os.path rwordSet = set(["asm", "auto", "bool", "break", "case", "catch", "char", "class", "const", "constexpr", "const_cast", "continue", "decltype", "default", "delete", "do", "double", "dynamic_cast", "else", "enum", "explicit", "extern", "false", "float", "for", "friend", "goto", "if", "inline", "int", "long", "mutable", "namespace", "new", "noexcept", "nullptr", "operator", "private", "protected", "public", "register", "reinterpret_cast", "return", "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", "template", "this", "throw", "true", "try", "typename", "using", "void", "volatile"]) identifierSet = set() symbolList = ["!", "@", "#", "$", "%", "^", "&", "*", "(", ")", "-", "+", "=", "[", "]", "{", "}", "|", "\\", ":", ";", "\"", "'", ",", ".", "<", ">", "/", "?", "~"] print("Path to solve : ", end="$ ") path = input() IterateFileSystem(path, ProcessFile) identifierList = list(identifierSet) identifierList.sort() i = 0 for identifier in identifierList : print("%-30s"%(identifier), end="") i = i + 1 if i == 3 : print("", end='\n') i = 0 print("", end='\n')
[ 2, 4686, 9127, 1058, 27056, 378, 42814, 287, 3696, 13, 201, 198, 201, 198, 11748, 28686, 201, 198, 11748, 28686, 13, 6978, 201, 198, 201, 198, 81, 4775, 7248, 796, 900, 7, 14692, 8597, 1600, 366, 23736, 1600, 366, 30388, 1600, 366, ...
2.235679
611
''' Created on 25.05.2011 @author: SIGIESEC ''' from timeit import Timer import platform KEY_COLUMN="key_column" VALUE_COLUMN="value_column" if __name__ == "__main__": print "Python version %s (%s)" % (platform.python_version(), getattr(platform, "python_implementation", lambda: "Unknown")()) print platform.platform() measure("DictReaderTools.transform_to_set_valued_dict", iterations=100) measure("DictReaderTools.transform_to_set_valued_dict_sorted", iterations=100)
[ 7061, 6, 198, 41972, 319, 1679, 13, 2713, 13, 9804, 198, 198, 31, 9800, 25, 33993, 11015, 2943, 198, 7061, 6, 198, 198, 6738, 640, 270, 1330, 5045, 263, 198, 11748, 3859, 198, 198, 20373, 62, 25154, 5883, 45, 2625, 2539, 62, 28665, ...
2.922619
168
#!/usr/bin/env python #filename: dinkum-install-from-git.py #path: project/bin/ #repo: http://github.com/dinkumsoftware/dinkum.git """ This installs all the dinkumsoftware programs & code in ~/.dinkum/git-copy by copying files from the git clone of dinkumsoftware. It puts symbolic links in ~/doc/dinkum/* to all the documentation files under ~/.dinkum/git-copy It makes mild alteration to ~/.bashrc Puts ~/dinkum/git-copy/bin on the PATH Puts ~/.dinkum/git-copy/python on PYTHONPATH It does NOT require sudo. It assumes you have done a full git checkout of the source. (See EXAMPLES) It's OK to run this as many times as you like. If it was previously installed, it will be removed and reinstalled. If a new version of something shows up in git, you can rerun this to get it on your machine Any existing installation from git is UNINSTALLED and then reinstalled. This is due to limitations of some underlying tools. EXAMPLES cd ~/<somewhere> git clone http://github.com/dinkumsoftware/dinkum.git <somewhere>/bin/dinkum-install-from-git [optional] rm -rf ~/<somewhere>/dinkum # Don't need git copy after install # But feel free to keep it To undo these actions: rm -rf ~/.dinkum [optional] edit ~/.bashrc (see end of file) to remove dinkum stuff. # This is not required, won't break anything if you leave it in <todo> fix this USAGE dinkum-install-from-git optional arguments: -h, --help show this help message and exit -v, --verbose Announce what it is doing -d, --dry-run Announce what WOULD do, but don't do it You will have to log out and log in to pick up the .bashrc changes. If you want to just try it in a terminal window: bash EXIT STATUS 0 All is good 1 Something went wrong, maybe error printout 2 Some kind of exception tossed AUTHOR dinkumsoftware.com/tc LICENSE Copyright(c) 2019 Dinkum Software Licensed under Apache Version 2.0, January 2004 http://www.apache.org/licenses/ Full license text at end of file. VERSION {program_version} """ program_version = 0.0 __doc__=__doc__.format(program_version=program_version) # history: # 2019-04-30 tc Initial # 2019-04-30 tc Switched to shutil.copytree() # 2019-05-05 tc refactoring # 2020-02-03 tc Convert to python3, print ==> print() import sys, os, traceback, argparse import textwrap # dedent # Other imports from dinkum.x.y.z must wait a bit # until PYTHONPATH is set up # Support code def find_dinkum_git_root_dir(file_or_dir=sys.argv[0]) : ''' Starting with "file_or_dir", walks UP the file tree until it finds a directory containing a .git. That directory MUST be named "dinkum" for python imports to work properly. Stops looking if hits / directory An omitted "file_or_dir" starts with currently running executable. Returns the dinkum_git_root_dir On error, throws an exception: BadFileorDirArg file_or_dir is non-valid path NoDotGitDirectory Could not find parent dir with .git in it GitRootDirHasWrongName Found a git root dir, but not named "dinkum" ''' # We expect we are running from a git clone copy of dinkumsoftware. # file_or_dir: <x>/dinkum/a/b/c/whatever # dinkum_git_root_dir: <x>/dinkum # Validate the argument of where we start looking wrk_dir = file_or_dir wrk_dir = os.path.expanduser(wrk_dir) # ~ expansion wrk_dir = os.path.abspath(wrk_dir) if not os.path.isdir(wrk_dir) : # Passed a File instead of a directory? wrk_dir = os.path.dirname(wrk_dir) # Yes, get enclosing directory # Walk up the filetree, stopping when: # hit the / directory -or- # hit a directory with a .git subdir root = '/' git_dirname = '.git' while True : # Validate wrk_dir if not os.path.isdir(wrk_dir) : err_msg = """\ Not a directory: {wrk_dir} arg file_or_dir:{file_or_dir} was NOT a valid path. PROBABLE SOFTWARE ERROR.""" err_msg = textwrap.dedent(err_msg.format(wrk_dir=wrk_dir, file_or_dir=file_or_dir)) raise BadFileorDirArg(err_msg) # Hit the top of the file tree ? if wrk_dir == root or not wrk_dir : # Failed to find the dinkum git root err_msg = '''\ FAILED to locate dinkum git root dir. Starting looking upward in file system from: {file_or_dir} Did NOT find a directory with {git_dirname} in it.''' err_msg = textwrap.dedent(err_msg.format(file_or_dir=file_or_dir, git_dirname=git_dirname)) raise NoDotGitDirectory(err_msg) # Is this a git root? i.e contains a .git sub directory if os.path.isdir ( os.path.join( wrk_dir, git_dirname)) : # Yes, it is a git root break # wrk_dir is NOT a git root dir. # Look upward wrk_dir = os.path.dirname(wrk_dir) # We found a git root directory: wrk_dir # Make sure it has the required name dinkum_git_root_reqd_name = "dinkum" if os.path.basename(wrk_dir) != dinkum_git_root_reqd_name : err_msg = '''\ Found a git root dir (has a .git subdir): {wrk_dir} BUT it is NOT named: {dinkum_git_root_reqd_name} It must have that name for python package imports to work. Sure you are running from a good git clone of dinkumsoftware?''' err_msg = textwrap.dedent(err_msg.format(wrk_dir=wrk_dir, dinkum_git_root_reqd_name=dinkum_git_root_reqd_name)) raise GitRootDirHasWrongName(err_msg) # Life is good, found the required git directory and it is properly named return wrk_dir # returned "err_msg" will be printed for user def main (): ''' dinkum-install-from-git optional arguments: -h, --help show this help message and exit -v, --verbose Announce what it is doing -d, --dry-run Announce what WOULD do, but don't do it Normally returns None. On error, return "err_msg" . We install by copying from a git clone of dinkumsoftware to the directory ~/.dinkum. Find the git root dir and validate it Find and use python library code in git Enumerate all the files and subdirs to publish Copy files and recursively copy subdirs ''' # Specify and parse the command line arguments parser = argparse.ArgumentParser( # print document string "as is" on --help formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(__doc__)) parser.add_argument("-v", "--verbose", help="Announce what it is doing", action="store_true") parser.add_argument("-d", "--dry-run", help="Announce what WOULD do, but don't do it", action="store_true") parser.parse_args() args = parser.parse_args() verbose = args.verbose dry_run = args.dry_run # these are fragile times as we are a dinkum install # program. Can't make assumptions about where to find # stuff, in particular import of dinkum packages or location # of executables # We expect we are running from a git clone copy of dinkumsoftware # Find the root, the one with the .git in it git_root_dir = find_dinkum_git_root_dir() # all the dinkum executables live in various dinkum/.../.../bin dirs # dinkum/bin has symbolic links to all the executables # We put dinkum/bin on front of the path os.environ['PATH'] = os.path.join(git_root_dir, "bin") + os.pathsep + os.environ['PATH'] # diddle PYTHONPATH so that dinkum python imports work. # our python packages in git live in the git_root_dir (which is named dinkum) # git_root_dir is known to exist and be properly named pkg_dir = git_root_dir # sys.path must have the PARENT of the dinkum dir # insert that at head of search path sys.path.insert(0, os.path.dirname(pkg_dir)) # We can now use dinkum python package from dinkum.project.install import install_from_git install_from_git(git_root_dir, verbose, dry_run) # Life is good print ("Successfully installed DinkumSoftware's software from a git clone.") # Warn them if we didn't do anything if dry_run : print ("** This was a DRY-RUN. Nothing was written or removed. **") return None if __name__ == '__main__': try: # Invoke the actual program # It's an error if it returns anything main_return = main() if main_return : print (main_return) # ERROR: print whatever it returns # Pass back to the OS the proper exit code. 0 is good sys.exit( 1 if main_return else 0) except KeyboardInterrupt as e: # Ctrl-C raise e except SystemExit as e: # sys.exit() raise e except Exception as e: print ('ERROR: uncaught EXCEPTION. Msg after traceback.') traceback.print_exc() # stack dump (which prints err msg) os._exit(2) # full-license: ''' Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. '''
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 34345, 25, 288, 676, 388, 12, 17350, 12, 6738, 12, 18300, 13, 9078, 198, 2, 6978, 25, 1628, 14, 8800, 14, 198, 2, 260, 7501, 25, 2638, 1378, 12567, 13, 785, 14, 67, 676, 388, ...
2.973264
6,957
import tweepy import logging import os
[ 198, 11748, 4184, 538, 88, 198, 11748, 18931, 198, 11748, 28686, 628 ]
3.416667
12
""" Face Mesh Module By : JikanDev Website : https://jikandev.xyz/ """ import cv2 import mediapipe as mp class FaceMeshDetector(): """ Find 468 Landmarks using the mediapipe library. Exports the landmarks in pixel format. """ def __init__(self, mode=False, maxFaces=1, refine_lm=False, minDetectCon=0.5, minTrackCon=0.5): """ :param mode: In static mode, detection is done on each image: slower. :param maxFaces: Maximum number of faces to detect. :param refine_lm: Whether to further refine the landmark coordinates around the eyes and lips, and output additional landmarks around the irises. :param minDetectCon: Minimum Detection Confidence Threshold. :param minTrackCon: Minimum Tracking Confidence Threshold. """ self.mode = mode self.maxFaces = maxFaces self.refine_lm = refine_lm self.minDetectCon = minDetectCon self.minTrackCon = minTrackCon self.mpDraw = mp.solutions.drawing_utils self.mpDrawingStyles = mp.solutions.drawing_styles self.faceMesh = mp.solutions.face_mesh self.meshDetection = self.faceMesh.FaceMesh(mode, maxFaces, refine_lm, minDetectCon, minTrackCon) def findFaces(self, img, draw=True, drawTesselation=True): """ Find faces in an image and return the bbox info :param img: Image to find the faces in. :param draw: Flag to draw the output contours of the mesh on the image. :param drawTesselation: Flag to draw the output tesselation of the mesh on the image. :return: Image with or without drawings. """ imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) self.results = self.meshDetection.process(imgRGB) allFaces = [] h, w, c = img.shape if self.results.multi_face_landmarks: for faceLms in self.results.multi_face_landmarks: myMesh = {} mylmList = [] for id, lm in enumerate(faceLms.landmark): px, py = int(lm.x * w), int(lm.y * h) mylmList.append([px, py]) myMesh["lmList"] = mylmList if draw: self.mpDraw.draw_landmarks(img, faceLms, self.faceMesh.FACEMESH_CONTOURS, None) if drawTesselation: self.mpDraw.draw_landmarks(img, faceLms, self.faceMesh.FACEMESH_TESSELATION, None, self.mpDrawingStyles.get_default_face_mesh_tesselation_style()) allFaces.append(myMesh) return allFaces, img def main(): """ Example code to use the module. """ cap = cv2.VideoCapture(0) # Get your camera detector = FaceMeshDetector() # Call the FaceMeshDetector class while True: success, img = cap.read() # If success, img = read your camera image meshes, img = detector.findFaces(img) # meshes & img call the findFaces() function of FaceMeshDetector if meshes: # Mesh 1 mesh1 = meshes[0] lmList1 = mesh1["lmList"] # List of 21 Landmark points if len(meshes) == 2: # Mesh 2 mesh2 = meshes[1] lmList2 = mesh2["lmList"] # List of 21 Landmark points cv2.imshow("Face Mesh Module", img) cv2.waitKey(1) if __name__ == "__main__": main()
[ 37811, 201, 198, 32388, 47529, 19937, 201, 198, 3886, 1058, 449, 49894, 13603, 201, 198, 33420, 1058, 3740, 1378, 73, 1134, 392, 1990, 13, 5431, 89, 14, 201, 198, 37811, 201, 198, 11748, 269, 85, 17, 201, 198, 11748, 16957, 499, 3757,...
2.10814
1,683
import tempfile import uuid from enum import Enum from pathlib import Path from typing import NamedTuple, Dict from django.contrib.auth.models import User from django.contrib.postgres.fields import JSONField from django.db import models from django.db.models import Sum, Max, QuerySet import ppe.dataclasses as dc from ppe.data_mapping.types import DataFile class Demand(ImportedDataModel): """Real demand data from NYC""" item = ChoiceField(dc.Item) demand = models.IntegerField() # both start and end are inclusive start_date = models.DateField() end_date = models.DateField()
[ 11748, 20218, 7753, 198, 11748, 334, 27112, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 34441, 51, 29291, 11, 360, 713, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530...
3.167513
197
from django.test import TestCase from libs.crypto.encrypt import CryptoHelper
[ 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 198, 6738, 9195, 82, 13, 29609, 78, 13, 12685, 6012, 1330, 36579, 47429, 628 ]
3.478261
23
#!/usr/bin/env python3 import gym import ptan import numpy as np from tensorboardX import SummaryWriter import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim GAMMA = 0.99 LEARNING_RATE = 0.01 EPISODES_TO_TRAIN = 4 if __name__ == "__main__": env = gym.make("CartPole-v0") writer = SummaryWriter(logdir='', comment="-cartpole-reinforce") net = PGN(env.observation_space.shape[0], env.action_space.n) print('Network architecture:\n') print(net) agent = ptan.agent.PolicyAgent( net, preprocessor=ptan.agent.float32_preprocessor, apply_softmax=True ) exp_source = ptan.experience.ExperienceSourceFirstLast( env, agent, gamma=GAMMA) optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE) total_rewards = [] step_idx = 0 done_episodes = 0 batch_episodes = 0 batch_states, batch_actions, batch_qvals = [], [], [] cur_rewards = [] for step_idx, exp in enumerate(exp_source): batch_states.append(exp.state) batch_actions.append(int(exp.action)) cur_rewards.append(exp.reward) if exp.last_state is None: batch_qvals.extend(calc_qvals(cur_rewards)) cur_rewards.clear() batch_episodes += 1 # handle new rewards new_rewards = exp_source.pop_total_rewards() if new_rewards: done_episodes += 1 reward = new_rewards[0] total_rewards.append(reward) mean_rewards = float(np.mean(total_rewards[-100:])) print("%d: reward: %6.2f, mean_100: %6.2f, episodes: %d" % ( step_idx, reward, mean_rewards, done_episodes)) writer.add_scalar("reward", reward, step_idx) writer.add_scalar("reward_100", mean_rewards, step_idx) writer.add_scalar("episodes", done_episodes, step_idx) if mean_rewards > 195: print("Solved in %d steps and %d episodes!" % (step_idx, done_episodes)) break if batch_episodes < EPISODES_TO_TRAIN: continue optimizer.zero_grad() states_v = torch.FloatTensor(batch_states) batch_actions_t = torch.LongTensor(batch_actions) batch_qvals_v = torch.FloatTensor(batch_qvals) logits_v = net(states_v) log_prob_v = F.log_softmax(logits_v, dim=1) log_prob_actions_v = ( batch_qvals_v * log_prob_v[range(len(batch_states)), batch_actions_t] ) loss_v = -log_prob_actions_v.mean() loss_v.backward() optimizer.step() batch_episodes = 0 batch_states.clear() batch_actions.clear() batch_qvals.clear() writer.close()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 11550, 198, 11748, 279, 38006, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 11192, 273, 3526, 55, 1330, 21293, 34379, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 3...
2.085393
1,335
# future from __future__ import annotations # stdlib from typing import Any from typing import Dict from typing import List from typing import Optional # third party from google.protobuf.reflection import GeneratedProtocolMessageType # syft absolute import syft as sy # relative from .....proto.core.node.common.action.smpc_action_message_pb2 import ( SMPCActionMessage as SMPCActionMessage_PB, ) from ....common.message import ImmediateSyftMessageWithoutReply from ....common.serde.serializable import serializable from ....common.uid import UID from ....io.address import Address @serializable()
[ 2, 2003, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 2, 14367, 8019, 198, 6738, 19720, 1330, 4377, 198, 6738, 19720, 1330, 360, 713, 198, 6738, 19720, 1330, 7343, 198, 6738, 19720, 1330, 32233, 198, 198, 2, 2368, 2151, 198, 6...
3.619048
168
from __future__ import print_function, unicode_literals from datetime import datetime from pathlib import Path from PyQt5 import QtWidgets from PyQt5.QtCore import QThread from PyQt5.QtGui import QTextDocument, QTextBlock import tracra_export from mailbox import MailboxAnalyzeObject from oauth_utils import openOauthWebsite, generateOauthString import os, sys from multiprocessing import freeze_support from PyQt5.QtWidgets import * from utils_resource import ResourceManager from whois_worker import WhoisWorker if __name__ == "__main__": freeze_support() app = QApplication(sys.argv) tracra_main = TracraMain() tracra_main.show() exit_code = app.exec_() sys.exit(exit_code)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 6738, 9485, 48, 83, 20, 1330, 33734, 54, 312, 11407, 198, 6738...
2.876
250
#!/usr/bin/env python from gnuradio import gr from gnuradio import audio from gnuradio import trellis, digital, filter, blocks from gnuradio import eng_notation import math import sys import random import fsm_utils try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) if __name__ == '__main__': main (sys.argv[1:])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 19967, 333, 324, 952, 1330, 1036, 198, 6738, 19967, 333, 324, 952, 1330, 6597, 198, 6738, 19967, 333, 324, 952, 1330, 2054, 297, 271, 11, 4875, 11, 8106, 11, 7021, 198, 67...
2.736486
148
import math def get_chunks(xs, chunk_count=3): """ Helper function to split a list into roughly equally sized chunks. """ chunk_width = math.ceil(len(xs) / chunk_count) ranges = range(0, len(xs), chunk_width) return [xs[x:x + chunk_width] for x in ranges]
[ 11748, 10688, 628, 198, 4299, 651, 62, 354, 14125, 7, 34223, 11, 16058, 62, 9127, 28, 18, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 5053, 525, 2163, 284, 6626, 257, 1351, 656, 7323, 8603, 19943, 22716, 13, 198, 220, 220, ...
2.635514
107
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ ####################################### # Script que permite crear gráfica histórica # a partir de datos de precipitación de la NASA # Author: Jorge Mauricio # Email: jorge.ernesto.mauricio@gmail.com # Date: 2018-02-01 # Version: 1.0 ####################################### Created on Mon Jul 17 16:17:25 2017 @author: jorgemauricio """ # librerias import pandas as pd import os import math import numpy as np import matplotlib.pyplot as plt # Programa principal # declarar función main if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 29113, 4242, 21017, 198, 2, 12327, 8358, 9943, 578, 1126, 283, 1036, 6557, 69, 3970, 1554, 10205, 30997...
2.919192
198
# region [Imports] # * Standard Library Imports ----------------------------------------------------------------------------> import os import asyncio from io import BytesIO from pathlib import Path from datetime import datetime from tempfile import TemporaryDirectory from textwrap import dedent # * Third Party Imports ---------------------------------------------------------------------------------> import discord from PIL import Image, ImageEnhance from pytz import timezone from discord.ext import commands, flags # * Gid Imports -----------------------------------------------------------------------------------------> import gidlogger as glog # * Local Imports ---------------------------------------------------------------------------------------> from antipetros_discordbot.utility.misc import make_config_name from antipetros_discordbot.utility.enums import WatermarkPosition from antipetros_discordbot.utility.checks import allowed_channel_and_allowed_role_2, command_enabled_checker, allowed_requester from antipetros_discordbot.utility.embed_helpers import make_basic_embed from antipetros_discordbot.utility.gidtools_functions import loadjson, pathmaker from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper from antipetros_discordbot.utility.poor_mans_abc import attribute_checker from antipetros_discordbot.utility.enums import CogState from antipetros_discordbot.utility.replacements.command_replacement import auto_meta_info_command # endregion[Imports] # region [TODO] # TODO: create regions for this file # TODO: Document and Docstrings # endregion [TODO] # region [Logging] log = glog.aux_logger(__name__) glog.import_notification(log, __name__) # endregion[Logging] # region [Constants] APPDATA = ParaStorageKeeper.get_appdata() BASE_CONFIG = ParaStorageKeeper.get_config('base_config') COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config') THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # location of this file, does not work if app gets compiled to exe with pyinstaller COG_NAME = "ImageManipulationCog" CONFIG_NAME = make_config_name(COG_NAME) get_command_enabled = command_enabled_checker(CONFIG_NAME) # endregion [Constants] class ImageManipulatorCog(commands.Cog, command_attrs={'hidden': False, "name": COG_NAME}): """ Soon """ # region [ClassAttributes] config_name = CONFIG_NAME allowed_stamp_formats = set(loadjson(APPDATA["image_file_extensions.json"])) stamp_positions = {'top': WatermarkPosition.Top, 'bottom': WatermarkPosition.Bottom, 'left': WatermarkPosition.Left, 'right': WatermarkPosition.Right, 'center': WatermarkPosition.Center} docattrs = {'show_in_readme': True, 'is_ready': (CogState.WORKING | CogState.OPEN_TODOS | CogState.UNTESTED | CogState.FEATURE_MISSING | CogState.NEEDS_REFRACTORING | CogState.DOCUMENTATION_MISSING, "2021-02-06 05:09:20", "f166431cb83ae36c91d70d7d09020e274a7ebea84d5a0c724819a3ecd2230b9eca0b3e14c2d473563d005671b7a2bf9d87f5449544eb9b57bcab615035b0f83d")} required_config_data = dedent(""" avatar_stamp = ASLOGO1 avatar_stamp_fraction = 0.2 stamps_margin = 5 stamp_fraction = 0.3""") # endregion[ClassAttributes] # region [Init] # endregion[Init] # region [Setup] # endregion[Setup] # region [Properties] @property @property @property @property # endregion[Properties] @staticmethod @flags.add_flag("--stamp-image", "-si", type=str, default='ASLOGO1') @flags.add_flag("--first-pos", '-fp', type=str, default="bottom") @flags.add_flag("--second-pos", '-sp', type=str, default="right") @flags.add_flag("--stamp-opacity", '-so', type=float, default=1.0) @flags.add_flag('--factor', '-f', type=float, default=None) @auto_meta_info_command(enabled=get_command_enabled("stamp_image"), cls=flags.FlagCommand) @allowed_channel_and_allowed_role_2(in_dm_allowed=False) @commands.max_concurrency(1, per=commands.BucketType.guild, wait=True) async def stamp_image(self, ctx, **flags): """ Stamps an image with a small image from the available stamps. Usefull for watermarking images. Get all available stamps with '@AntiPetros available_stamps' """ async with ctx.channel.typing(): if len(ctx.message.attachments) == 0: # TODO: make as embed await ctx.send('! **there is NO image to antistasify** !') return if flags.get('stamp_image') not in self.stamps: # TODO: make as embed await ctx.send("! **There is NO stamp with that name** !") return first_pos = self.stamp_positions.get(flags.get("first_pos").casefold(), None) second_pos = self.stamp_positions.get(flags.get("second_pos").casefold(), None) if any(_pos is None for _pos in [first_pos, second_pos]) or first_pos | second_pos not in self.stamp_pos_functions: # TODO: make as embed await ctx.send("! **Those are NOT valid position combinations** !") return for _file in ctx.message.attachments: # TODO: maybe make extra attribute for input format, check what is possible and working. else make a generic format list if any(_file.filename.endswith(allowed_ext) for allowed_ext in self.allowed_stamp_formats): _stamp = self._get_stamp_image(flags.get('stamp_image'), flags.get('stamp_opacity')) _stamp = _stamp.copy() with TemporaryDirectory(prefix='temp') as temp_dir: temp_file = Path(pathmaker(temp_dir, 'temp_file.png')) log.debug("Tempfile '%s' created", temp_file) await _file.save(temp_file) in_image = await self.bot.execute_in_thread(Image.open, temp_file) in_image = await self.bot.execute_in_thread(in_image.copy) factor = self.target_stamp_fraction if flags.get('factor') is None else flags.get('factor') pos_function = self.stamp_pos_functions.get(first_pos | second_pos) in_image = await self.bot.execute_in_thread(pos_function, in_image, _stamp, factor) name = 'antistasified_' + os.path.splitext(_file.filename)[0] await ctx.message.delete() # TODO: make as embed await self._send_image(ctx, in_image, name, f"__**{name}**__") @auto_meta_info_command(enabled=get_command_enabled("available_stamps")) @allowed_channel_and_allowed_role_2(in_dm_allowed=False) @commands.cooldown(1, 120, commands.BucketType.channel) async def available_stamps(self, ctx): """ Posts all available stamps. """ await ctx.message.delete() await ctx.send(embed=await make_basic_embed(title="__**Currently available Stamps are:**__", footer="These messages will be deleted in 120 seconds", symbol='photo'), delete_after=120) for name, image_path in self.stamps.items(): thumb_image = Image.open(image_path) thumb_image.thumbnail((128, 128)) with BytesIO() as image_binary: await asyncio.sleep(0) thumb_image.save(image_binary, 'PNG', optimize=True) image_binary.seek(0) _file = discord.File(image_binary, filename=name + '.png') embed = discord.Embed(title="Available Stamp") embed.add_field(name='Stamp Name:', value=name) embed.set_image(url=f"attachment://{name}.png") await ctx.send(embed=embed, file=_file, delete_after=120) @auto_meta_info_command(enabled=get_command_enabled("member_avatar")) @allowed_channel_and_allowed_role_2(in_dm_allowed=False) @commands.cooldown(1, 300, commands.BucketType.member) async def member_avatar(self, ctx): """ Stamps the avatar of a Member with the Antistasi Crest. Returns the new stamped avatar as a .PNG image that the Member can save and replace his orginal avatar with. """ avatar_image = await self.get_avatar_from_user(ctx.author) stamp = self.avatar_stamp modified_avatar = await self.bot.execute_in_thread(self._to_bottom_right, avatar_image, stamp, self.avatar_stamp_fraction) name = f"{ctx.author.name}_Member_avatar" await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**") # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"? # @commands.command(aliases=get_aliases("map_changed"), enabled=get_command_enabled("map_changed")) # @allowed_channel_and_allowed_role_2(in_dm_allowed=False) # @commands.max_concurrency(1, per=commands.BucketType.guild, wait=False) # async def map_changed(self, ctx, marker, color): # """ # Proof of concept for future real time server map. # """ # log.info("command was initiated by '%s'", ctx.author.name) # with BytesIO() as image_binary: # self.base_map_image, image_binary = await self.bot.execute_in_thread(self.map_image_handling, self.base_map_image, marker, color, image_binary) # if self.old_map_message is not None: # await self.old_map_message.delete() # delete_time = None # embed = discord.Embed(title='Current Server Map State', color=self.support.green.discord_color, timestamp=datetime.now(tz=timezone("Europe/Berlin")), type="image") # embed.set_author(name='Antistasi Community Server 1', icon_url="https://s3.amazonaws.com/files.enjin.com/1218665/site_logo/NEW%20LOGO%20BANNER.png", url="https://a3antistasi.enjin.com/") # embed.set_image(url="attachment://map.png") # self.old_map_message = await ctx.send(embed=embed, file=discord.File(fp=image_binary, filename="map.png"), delete_after=delete_time) # log.debug("finished 'map_changed' command") # region [SpecialMethods] # endregion[SpecialMethods] def setup(bot): """ Mandatory function to add the Cog to the bot. """ bot.add_cog(attribute_checker(ImageManipulatorCog(bot)))
[ 198, 198, 2, 3814, 685, 3546, 3742, 60, 198, 198, 2, 1635, 8997, 10074, 1846, 3742, 16529, 10541, 29, 198, 11748, 28686, 198, 11748, 30351, 952, 198, 6738, 33245, 1330, 2750, 4879, 9399, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, ...
2.446888
4,274
from tkinter import * from math import sqrt root = Tk() root.title("Calculadora") root['bg'] = "cyan" f_num = DoubleVar() l_num = DoubleVar() operation = StringVar() label_value = StringVar() singal = IntVar() singal.set(0) # declaring widgets label = Label(root, textvariable = label_value, bg = "cyan", font = ("anonymous", 10)) entry = Entry(root, font = ('Arial', 16), bg = "cyan") sqrbtn = Button(root, text= "√",relief = FLAT, bg = "lightgreen", command = sqroot) blopenbtn = Button(root, text= "(",relief = FLAT, bg = "light coral", command = lambda: type_number("(")) blclosebtn = Button(root, text= ")",relief = FLAT, bg = "light coral", command = lambda: type_number(")")) percetbtn = Button(root, text= "%",relief = FLAT, bg = "lightgray", command = percent) delbtn = Button(root, text= "DEL", relief = FLAT, bg = "lightpink", command = delete_entry) clrbtn = Button(root, text= "CLR", relief = FLAT, bg = "tomato", command = clean) btn0 = Button(root, text= "0", relief = FLAT, bg = "lightblue", command = lambda: type_number(0)) btn1 = Button(root, text= "1", relief = FLAT, bg = "lightblue", command = lambda: type_number(1)) btn2 = Button(root, text= "2", relief = FLAT, bg = "lightblue", command = lambda: type_number(2)) btn3 = Button(root, text= "3", relief = FLAT, bg = "lightblue", command = lambda: type_number(3)) btn4 = Button(root, text= "4", relief = FLAT, bg = "lightblue", command = lambda: type_number(4)) btn5 = Button(root, text= "5", relief = FLAT, bg = "lightblue", command = lambda: type_number(5)) btn6 = Button(root, text= "6", relief = FLAT, bg = "lightblue", command = lambda: type_number(6)) btn7 = Button(root, text= "7", relief = FLAT, bg = "lightblue", command = lambda: type_number(7)) btn8 = Button(root, text= "8", relief = FLAT, bg = "lightblue", command = lambda: type_number(8)) btn9 = Button(root, text= "9", relief = FLAT, bg = "lightblue", command = lambda: type_number(9)) spotbtn = Button(root, text= ".", relief = FLAT, bg = "lightblue", command = lambda: type_number(".")) devbtn = Button(root, text= "÷", relief = FLAT, bg = "lightgray", command = lambda: type_number("÷")) multbtn = Button(root, text= "x", relief = FLAT, bg = "lightgray", command = lambda: type_number("x")) addbtn = Button(root, text= "+", relief = FLAT, bg = "lightgray", command = lambda: type_number("+")) subbtn = Button(root, text= "-", relief = FLAT, bg = "lightgray", command = lambda: type_number("-")) iqualbtn = Button(root, text= "=", relief = FLAT, bg = "lightgreen", command = iqual) # give position for widgets label.grid(row = 0, column = 0,columnspan = 4, sticky = W) entry.grid(row = 1, column = 0, columnspan = 4, sticky = W+E, ipady = 30) btn7.grid(row = 3, column = 0, sticky = W+E, ipadx = 20, ipady = 30) btn8.grid(row = 3, column = 1, sticky = W+E, ipadx = 20, ipady = 30) btn9.grid(row =3, column = 2, sticky = W+E, ipadx = 20, ipady = 30) clrbtn.grid(row =3, column = 3, sticky = W+E+N, ipadx = 20, ipady = 8) delbtn.grid(row =3, column = 3, sticky = W+E+S, ipadx = 20, ipady = 8) devbtn.grid(row =4, column = 3, sticky = W+E+N, ipadx = 20, ipady = 8) btn4.grid(row = 4, column = 0, sticky = W+E, ipadx = 20, ipady = 30) btn5.grid(row = 4, column = 1, sticky = W+E, ipadx = 20, ipady = 30) btn6.grid(row =4, column = 2, sticky = W+E, ipadx = 20, ipady = 30) multbtn.grid(row =4, column = 3, sticky = W+E+S, ipadx = 20, ipady = 8) subbtn.grid(row =5, column = 3, sticky = W+E+N, ipadx = 20, ipady = 8) btn1.grid(row = 5, column = 0, sticky = W+E, ipadx = 20, ipady = 30) btn2.grid(row = 5, column = 1, sticky = W+E, ipadx = 20, ipady = 30) btn3.grid(row = 5, column = 2, sticky = W+E, ipadx = 20, ipady = 30) addbtn.grid(row =5, column = 3, sticky = W+E+S, ipadx = 20, ipady = 8) btn0.grid(row = 6, column = 1, sticky = W+E, ipadx = 20, ipady = 15) spotbtn.grid(row = 6, column = 0, sticky = W+E, ipadx = 20, ipady = 15) iqualbtn.grid(row =6, columnspan = 2, column = 2, sticky = W+E, ipadx = 20, ipady = 15) sqrbtn.grid(row =2, column = 2, sticky = W+E, ipadx = 20, ipady = 8) blopenbtn.grid(row =2, column = 0, sticky = W+E, ipadx = 20, ipady = 8) blclosebtn.grid(row =2, column = 1, sticky = W+E, ipadx = 20, ipady = 8) percetbtn.grid(row =2, column = 3, sticky = W+E, ipadx = 20, ipady = 8) root.mainloop()
[ 6738, 256, 74, 3849, 1330, 1635, 198, 6738, 10688, 1330, 19862, 17034, 198, 198, 15763, 796, 309, 74, 3419, 198, 15763, 13, 7839, 7203, 9771, 3129, 324, 5799, 4943, 198, 15763, 17816, 35904, 20520, 796, 366, 948, 272, 1, 198, 198, 69,...
2.547676
1,678
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import attr from attr.validators import instance_of as is_a from attr.validators import optional @attr.s(frozen=True) class OntologyTerm(object): """ This represents a single term in a specific ontology. """ ontology = attr.ib(validator=is_a(str), converter=str) ontology_id = attr.ib(validator=is_a(str), converter=str) name = attr.ib(validator=is_a(str), converter=str) definition = attr.ib(validator=optional(is_a(str))) synonyms = attr.ib(validator=is_a(list)) insdc_qualifier = attr.ib( validator=optional(is_a(str)), default=None, )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 15269, 685, 10531, 12, 5539, 60, 17228, 9148, 12, 22030, 16024, 259, 18982, 873, 5136, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15...
2.97543
407
from django.conf import settings from django.contrib import messages from django.contrib.auth.decorators import permission_required from django.template.response import TemplateResponse from django.shortcuts import get_object_or_404, redirect from django.utils.translation import pgettext_lazy from ....core.utils import get_paginator_items from ....widget.models import Scene, Spotlight from ...views import staff_member_required from .filters import SceneFilter from . import forms @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings') @staff_member_required @permission_required('site.manage_settings')
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 6218, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 7170, 62, 35827, 198, 6738, 42625, 14208, 13, 28243, 13, 26209...
3.471761
301
SACCHAROMYCES_CEREVISIAE_TAXONOMY_ID = 4932 DROSOPHILA_MELANOGASTER_TAXONOMY_ID = 7227 HOMO_SAPIENS_TAXONOMY_ID = 9606 GENE_PLACEHOLDER = 'MYGENETOKEN' DISEASE_PLACEHOLDER = 'MYDISEASETOKEN'
[ 50, 2246, 38019, 2662, 44816, 1546, 62, 34, 9338, 29817, 3539, 36, 62, 5603, 55, 1340, 2662, 56, 62, 2389, 796, 5125, 2624, 198, 7707, 2640, 3185, 39, 47164, 62, 44, 3698, 1565, 7730, 1921, 5781, 62, 5603, 55, 1340, 2662, 56, 62, ...
1.828571
105
import torch import torch.nn as nn from model.ctr.fm import FM from model.basic.mlp import MLP from model.basic.output_layer import OutputLayer """ Model: FNN: Factorization-machine supported Neural Network Version: arXiv [v1] Mon, 11 Jan 2016 10:04:40 UTC Reference: Zhang, W., Du, T., & Wang, J. (2016). Deep Learning over Multi-field Categorical Data: A Case Study on User Response Prediction. arXiv: Learning,. """
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 2746, 13, 24087, 13, 38353, 1330, 18695, 198, 6738, 2746, 13, 35487, 13, 4029, 79, 1330, 10373, 47, 198, 6738, 2746, 13, 35487, 13, 22915, 62, 29289, 1330, 25235, 49...
2.966667
150
n1 = int(input("Digite um número inteiro: ")) n2 = int(input("Digite um número inteiro: ")) n3 = int(input("Digite um número inteiro: ")) n4 = int(input("Digite um número inteiro: ")) n5 = int(input("Digite um número inteiro: ")) lista1 = [n1, n2, n3, n4, n5] print(lista1) lista2 = [1, 2, 3, 4, 5] print(lista2)
[ 77, 16, 796, 493, 7, 15414, 7203, 19511, 578, 23781, 299, 21356, 647, 78, 493, 68, 7058, 25, 366, 4008, 198, 77, 17, 796, 493, 7, 15414, 7203, 19511, 578, 23781, 299, 21356, 647, 78, 493, 68, 7058, 25, 366, 4008, 198, 77, 18, 79...
2.00641
156
# ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- from unittest import TestCase, main from os import close, remove from os.path import basename, join from tempfile import mkstemp from qiita_core.util import qiita_test_checker from qiita_db.reference import Reference from qiita_db.util import get_db_files_base_dir @qiita_test_checker() if __name__ == '__main__': main()
[ 2, 16529, 32501, 198, 2, 15069, 357, 66, 8, 1946, 438, 11, 383, 21924, 5350, 7712, 4816, 13, 198, 2, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 347, 10305, 513, 12, 565, 682, 13789, 13, 198, 2, 198, 2, 383, 1336, 5964, 318, ...
3.885714
175
from .Athena import Athena from .Configuration import configuration from .console import Console from .Embeds import embeds from .PugTools import PugSession from .Database import GuildDatabase, Guild
[ 6738, 764, 2953, 831, 64, 1330, 21341, 201, 198, 6738, 764, 38149, 1330, 8398, 201, 198, 6738, 764, 41947, 1330, 24371, 201, 198, 6738, 764, 31567, 5379, 1330, 11525, 82, 201, 198, 6738, 764, 47, 1018, 33637, 1330, 33260, 36044, 201, ...
3.961538
52
import numpy as np import pylab as plt import os from gastrometry import biweight_median, biweight_mad from gastrometry import median_check_finite import pickle if __name__ == "__main__": #stat = plot_distribution_residuals(path='../data/', stat = 'stat.pkl', mas=3600.*1e3) #plt.savefig('../../../../Dropbox/hsc_astro/figures/histo_stds_full_survey_no_corrections.pdf') plot_eb_mode_full_survey('../data/final_gp_outputs_all.pkl')
[ 11748, 299, 32152, 355, 45941, 198, 11748, 279, 2645, 397, 355, 458, 83, 198, 11748, 28686, 198, 6738, 21956, 398, 11973, 1330, 3182, 6551, 62, 1150, 666, 11, 3182, 6551, 62, 9937, 198, 6738, 21956, 398, 11973, 1330, 14288, 62, 9122, ...
2.497238
181
FORMER_TEAM_NAME_MAP = { 'AFC Bournemouth': 'AFC Bournemouth', 'Accrington FC': 'Accrington FC', 'Arsenal FC': 'Arsenal FC', 'Aston Villa': 'Aston Villa', 'Barnsley FC': 'Barnsley FC', 'Birmingham City': 'Birmingham City', 'Birmingham FC': 'Birmingham City', 'Blackburn Rovers': 'Blackburn Rovers', 'Blackpool FC': 'Blackpool FC', 'Bolton Wanderers': 'Bolton Wanderers', 'Bradford City': 'Bradford City', 'Bradford Park Avenue': 'Bradford Park Avenue', 'Brentford FC': 'Brentford FC', 'Brighton & Hove Albion': 'Brighton & Hove Albion', 'Bristol City': 'Bristol City', 'Burnley FC': 'Burnley FC', 'Bury FC': 'Bury FC', 'Cardiff City': 'Cardiff City', 'Riverside A.F.C.': 'Cardiff City', 'Carlisle United': 'Carlisle United', 'Charlton Athletic': 'Charlton Athletic', 'Chelsea FC': 'Chelsea FC', 'Coventry City': 'Coventry City', 'Singers F.C.': 'Coventry City', 'Crystal Palace': 'Crystal Palace', 'Darwen': 'Darwen', 'Derby County': 'Derby County', 'Everton FC': 'Everton FC', 'St. Domingo FC': 'Everton FC', 'Fulham FC': 'Fulham FC', 'Glossop North End': 'Glossop North End', 'Grimsby Town': 'Grimsby Town', 'Huddersfield Town': 'Huddersfield Town', 'Hull City': 'Hull City', 'Ipswich Town': 'Ipswich Town', 'Leeds United': 'Leeds United', 'Leicester City': 'Leicester City', 'Leicester Fosse': 'Leicester City', 'Leyton Orient': 'Leyton Orient', 'Clapton Orient': 'Leyton Orient', 'Liverpool FC': 'Liverpool FC', 'Luton Town': 'Luton Town', 'Manchester City': 'Manchester City', 'St. Marks': 'Manchester City', 'Ardwick A.F.C.': 'Manchester City', 'Manchester United': 'Manchester United', 'Middlesbrough FC': 'Middlesbrough FC', 'Millwall FC': 'Millwall FC', 'Millwall Rovers': 'Millwall FC', 'Millwall Athletic': 'Millwall FC', 'Newcastle United': 'Newcastle United', 'Newcastle East End F.C.': 'Newcastle United', 'Newton Heath FC': 'Manchester United', 'Northampton Town': 'Northampton Town', 'Norwich City': 'Norwich City', 'Nottingham Forest': 'Nottingham Forest', 'Notts County': 'Notts County', 'Oldham Athletic': 'Oldham Athletic', 'Pine Villa F.C.': 'Oldham Athletic', 'Oxford United': 'Oxford United', 'Headington United': 'Oxford United', 'Portsmouth FC': 'Portsmouth FC', 'Portsmouth Royal Navy': 'Portsmouth FC', 'Preston North End': 'Preston North End', 'Queens Park Rangers': 'Queens Park Rangers', 'Reading FC': 'Reading FC', 'Sheffield United': 'Sheffield United', 'Sheffield Wednesday': 'Sheffield Wednesday', 'Wednesday Football Club': 'Sheffield Wednesday', 'Small Heath Birmingham': 'Birmingham City', 'Southampton FC': 'Southampton FC', "St. Mary's F.C": 'Southampton FC', "Southampton St. Mary's": 'Southampton FC', 'Stoke City': 'Stoke City', 'Stoke Ramblers': 'Stoke City', 'Stoke F.C.': 'Stoke City', 'Sunderland AFC': 'Sunderland AFC', 'Sunderland and District Teachers AFC': 'Sunderland AFC', 'Swansea City': 'Swansea City', 'Swansea Town': 'Swansea City', 'Swindon Town': 'Swindon Town', 'Tottenham Hotspur': 'Tottenham Hotspur', 'Hotspur FC': 'Tottenham Hotspur', 'Watford FC': 'Watford FC', 'Watford Rovers': 'Watford FC', 'West Hertfordshire': 'Watford FC', 'West Bromwich Albion': 'West Bromwich Albion', 'West Bromwich Strollers': 'West Bromwich Albion', 'West Ham United': 'West Ham United', 'Thames Ironworks F.C.': 'West Ham United', 'Wigan Athletic': 'Wigan Athletic', 'Wimbledon FC': 'Wimbledon FC', 'Wolverhampton Wanderers': 'Wolverhampton Wanderers', "St Luke's F.C": 'Wolverhampton Wanderers', 'Woolwich Arsenal': 'Arsenal FC' }
[ 21389, 1137, 62, 9328, 2390, 62, 20608, 62, 33767, 796, 1391, 198, 220, 220, 220, 705, 32, 4851, 347, 1798, 46880, 10354, 705, 32, 4851, 347, 1798, 46880, 3256, 198, 220, 220, 220, 705, 17320, 24833, 10029, 10354, 705, 17320, 24833, 1...
2.607167
1,479
import py import pytest from _pytest.tmpdir import tmpdir, TempdirHandler @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'), reason="symlink not available on this platform")
[ 11748, 12972, 198, 11748, 12972, 9288, 198, 198, 6738, 4808, 9078, 9288, 13, 22065, 15908, 1330, 45218, 15908, 11, 24189, 15908, 25060, 198, 198, 31, 9078, 9288, 13, 4102, 13, 48267, 361, 7, 1662, 468, 35226, 7, 9078, 13, 6978, 13, 12...
2.506024
83
import os import sys import win32gui import win32con import time win32gui.EnumWindows( winEnumHandler, sys.argv[1] ) print(sys.argv) # Manuscript_Dec_30.pdf - Adobe Reader sys.exit()
[ 11748, 28686, 198, 11748, 25064, 198, 198, 11748, 1592, 2624, 48317, 198, 11748, 1592, 2624, 1102, 198, 11748, 640, 628, 198, 5404, 2624, 48317, 13, 4834, 388, 11209, 7, 1592, 4834, 388, 25060, 11, 25064, 13, 853, 85, 58, 16, 60, 1267...
2.647887
71
from eth_utils import ( encode_hex, ) from eth.typing import ( BaseOrSpoofTransaction, ) from eth.vm.computation import BaseComputation from eth.vm.forks.homestead.state import ( HomesteadState, HomesteadTransactionExecutor, ) from .computation import SpuriousDragonComputation from ._utils import collect_touched_accounts
[ 6738, 4555, 62, 26791, 1330, 357, 198, 220, 220, 220, 37773, 62, 33095, 11, 198, 8, 198, 198, 6738, 4555, 13, 774, 13886, 1330, 357, 198, 220, 220, 220, 7308, 5574, 4561, 37711, 48720, 11, 198, 8, 198, 198, 6738, 4555, 13, 14761, ...
2.974138
116
# Normal Incremental Function # Lambda Expression incrementLamba = lambda x : x * 3 print("The Increment Normal : ", incrementNormal(50)) print("The Increment Lamba : ", incrementLamba(15)) productLamba = lambda x, y : x * y print("The Produt Lamba : ", productLamba(15, 3)) # # Printing The Type # print(type(incrementNormal)) # print(type(incrementLamba))
[ 2, 14435, 10791, 37098, 15553, 198, 198, 2, 21114, 6814, 41986, 198, 24988, 434, 43, 31842, 796, 37456, 2124, 1058, 2124, 1635, 513, 198, 198, 4798, 7203, 464, 10791, 434, 14435, 1058, 33172, 18703, 26447, 7, 1120, 4008, 198, 4798, 7203...
3.13913
115
from logging import getLogger from django.db.models.signals import pre_save, pre_delete from django.dispatch import receiver from dynamic_databases.models import DynamicDatabaseConfig logger = getLogger('dynamic_databases.receivers') @receiver([pre_save, pre_delete], sender=DynamicDatabaseConfig)
[ 198, 6738, 18931, 1330, 651, 11187, 1362, 198, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 12683, 874, 1330, 662, 62, 21928, 11, 662, 62, 33678, 198, 6738, 42625, 14208, 13, 6381, 17147, 1330, 9733, 198, 198, 6738, 8925, 62, 196...
3.426966
89
import exceptions import requests class Client(object): """Client for the API. :param string session: Keystoneauth session. :param string token: Token used for HuaWei VBS access. :param string endpoint: HuaWei VBS endpoint url. :param string tenant_id: The project ID used for context. """ def __init__(self, token=None, tenant_id=None, endpoint=None, **kwargs): """Initialize a new client for the API.""" self.token = token self.tenant_id = tenant_id self.endpoint = endpoint self.headers = { 'Content-Type': 'application/json;charset=utf8', 'X-Auth-Token': token, } def _request(self, method, url, **kwargs): """Base request.""" res = requests.request(method, url, headers=self.headers, verify=False, **kwargs) if res.status_code != 200: print res.text res.raise_for_status() result = res.json() if 'error' in result: raise exceptions.ServiceError( result['error']['code'], result['error']['message']) return result def backup_list(self): """Backup list api.""" url = self.endpoint + '/v2/%s/backups' % self.tenant_id return self._get(url) def backup_create(self, volume_id, name, description=None): """Backup create api.""" data = { 'backup': { 'volume_id': volume_id, 'name': name, } } if description: data['backup']['description'] = description url = self.endpoint + '/v2/%s/cloudbackups' % self.tenant_id return self._post(url, json=data) def backup_delete(self, backup_id): """Backup delete api.""" url = self.endpoint + '/v2/%s/cloudbackups/%s' % \ (self.tenant_id, backup_id) return self._post(url) def backup_restore(self, backup_id, volume_id): """Backup restore api.""" url = self.endpoint + '/v2/%s/cloudbackups/%s/restore' % \ (self.tenant_id, backup_id) data = { 'restore': { 'volume_id': volume_id } } return self._post(url, json=data) def backup_query_status(self, job_id): """Backup query job status api.""" url = self.endpoint + '/v1/%s/jobs/%s' % \ (self.tenant_id, job_id) return self._get(url)
[ 11748, 13269, 198, 11748, 7007, 628, 198, 4871, 20985, 7, 15252, 2599, 198, 220, 220, 220, 37227, 11792, 329, 262, 7824, 13, 198, 220, 220, 220, 1058, 17143, 4731, 6246, 25, 29055, 18439, 6246, 13, 198, 220, 220, 220, 1058, 17143, 473...
2.061738
1,231
""" Created on 08 Okt. 2021 @author: Bas van Stein This example shows how you can use MiP-EGO in order to perform hyper-parameter optimization for machine learning tasks. """ #import packages from sklearn.datasets import load_iris from sklearn.svm import SVC from sklearn.model_selection import cross_val_score, KFold import numpy as np #import our package, the surrogate model and the search space classes from mipego import ParallelBO from mipego.Surrogate import RandomForest from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace # Load the dataset iris = load_iris() X_iris = iris.data y_iris = iris.target # First we need to define the Search Space # the search space consists of one continues variable # one ordinal (integer) variable # and two categorical (nominal) variables. Cvar = ContinuousSpace([1.0, 20.0],'C') # one integer variable with label C degree = OrdinalSpace([2,6], 'degree') gamma = NominalSpace(['scale', 'auto'], 'gamma') kernel = NominalSpace(['linear', 'poly', 'rbf', 'sigmoid'], 'kernel') #the complete search space is just the sum of the parameter spaces search_space = Cvar + gamma + degree + kernel #now we define the objective function (the model optimization) model = RandomForest(levels=search_space.levels) opt = ParallelBO( search_space=search_space, obj_fun=train_model, model=model, max_FEs=6, DoE_size=5, # the initial DoE size eval_type='dict', acquisition_fun='MGFI', acquisition_par={'t' : 2}, n_job=3, # number of processes n_point=3, # number of the candidate solution proposed in each iteration verbose=True # turn this off, if you prefer no output ) xopt, fopt, stop_dict = opt.run() print('xopt: {}'.format(xopt)) print('fopt: {}'.format(fopt)) print('stop criteria: {}'.format(stop_dict))
[ 37811, 198, 41972, 319, 8487, 6762, 83, 13, 33448, 198, 198, 31, 9800, 25, 6455, 5719, 15215, 198, 198, 1212, 1672, 2523, 703, 345, 460, 779, 13756, 47, 12, 7156, 46, 287, 1502, 284, 1620, 8718, 12, 17143, 2357, 23989, 329, 4572, 46...
3.063439
599
# -*- python -*- # This software was produced by NIST, an agency of the U.S. government, # and by statute is not subject to copyright in the United States. # Recipients of this software assume all responsibilities associated # with its operation, modification and maintenance. However, to # facilitate maintenance we ask that before distributing modified # versions of this software, you first contact the authors at # oof_manager@nist.gov. from ooflib.SWIG.common import config from ooflib.common import strfunction from ooflib.common import utils from ooflib.common.IO import parameter from ooflib.common.IO import xmlmenudump import struct import types # String-based function object used by boundary profiles -- user types # in a string, and one of these objects gets built. Profiles' # __call__ methods provide the relevant arguments. xmlmenudump.XMLObjectDoc( 'ProfileFunction', xmlmenudump.loadFile('DISCUSSIONS/engine/object/profilefunction.xml')) # The ProfileFunctionParameter can accept either a string or a # ProfileFunction object, but it only stores a ProfileFunction object. # Like XYStrFunctionParameter, it needs a special "set" function. utils.OOFdefine('ProfileFunctionX', ProfileFunctionX) utils.OOFdefine('ProfileFunctionXT', ProfileFunctionXT)
[ 2, 532, 9, 12, 21015, 532, 9, 12, 198, 198, 2, 770, 3788, 373, 4635, 416, 399, 8808, 11, 281, 4086, 286, 262, 471, 13, 50, 13, 1230, 11, 198, 2, 290, 416, 14195, 318, 407, 2426, 284, 6634, 287, 262, 1578, 1829, 13, 198, 2, 3...
3.769006
342
from celery import Task, shared_task from celery.utils.log import get_logger from spaceone.core.error import ERROR_TASK_LOCATOR, ERROR_TASK_METHOD from spaceone.core.locator import Locator from spaceone.core.logger import set_logger from spaceone.core.transaction import Transaction _LOGGER = get_logger(__name__) @shared_task(bind=True) @shared_task(bind=True, base=BaseTask)
[ 6738, 18725, 1924, 1330, 15941, 11, 4888, 62, 35943, 198, 6738, 18725, 1924, 13, 26791, 13, 6404, 1330, 651, 62, 6404, 1362, 198, 198, 6738, 2272, 505, 13, 7295, 13, 18224, 1330, 33854, 62, 51, 1921, 42, 62, 29701, 25633, 11, 33854, ...
2.992248
129
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 11, 15720, 602, 198, 11748, 4818, 8079, 198, 6738, 42625, ...
3.037736
53
import io import os.path from setuptools import find_packages, setup version = '0.0.1-alpha' def parse_requirements(filename): ''' load requirements from a pip requirements file''' lineiter = (line.strip() for line in open(filename)) return [line for line in lineiter if line and not line.startswith('#')] with io.open('README.md', 'rt', encoding='utf8') as f: readme = f.read() reqs = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt')) setup( name='strivial', version=version, author='Sean Watson', url='https://github.com/watsosc/strivial', license='Apache', description='Does some strava stuff', long_description=readme, packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=reqs, extras_require={ 'test': [ 'pytest', 'coverage', ] } )
[ 11748, 33245, 198, 11748, 28686, 13, 6978, 198, 6738, 900, 37623, 10141, 1330, 1064, 62, 43789, 11, 9058, 198, 198, 9641, 796, 705, 15, 13, 15, 13, 16, 12, 26591, 6, 198, 198, 4299, 21136, 62, 8897, 18883, 7, 34345, 2599, 198, 220, ...
2.519337
362
from __future__ import print_function import numpy as np import warnings from keras.layers import merge, Input from keras.layers import Dense, Activation, Flatten, Dropout from keras.layers import Convolution3D, MaxPooling3D, ZeroPadding3D, AveragePooling3D, UpSampling3D from keras.layers import BatchNormalization from keras.models import Model import keras.backend as K from keras.utils.layer_utils import convert_all_kernels_in_model from keras.utils.data_utils import get_file
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 14601, 198, 198, 6738, 41927, 292, 13, 75, 6962, 1330, 20121, 11, 23412, 198, 6738, 41927, 292, 13, 75, 6962, 1330, 360, 1072, 11, 131...
3.225166
151
import logging from pprint import pprint, pformat # noqa from flask import request from pantomime.types import PDF, CSV from banal import ensure_list from followthemoney import model from followthemoney.types import registry from followthemoney.helpers import entity_filename from aleph.core import url_for from aleph.logic import resolver from aleph.logic.entities import check_write_entity, transliterate_values from aleph.logic.util import collection_url, entity_url, archive_url from aleph.model import Role, Collection, Document, Entity, Events from aleph.model import Alert, EntitySet, EntitySetItem, Export from aleph.views.util import jsonify, clean_object log = logging.getLogger(__name__)
[ 11748, 18931, 198, 6738, 279, 4798, 1330, 279, 4798, 11, 279, 18982, 220, 1303, 645, 20402, 198, 6738, 42903, 1330, 2581, 198, 6738, 279, 11456, 524, 13, 19199, 1330, 12960, 11, 44189, 198, 6738, 3958, 282, 1330, 4155, 62, 4868, 198, ...
3.544554
202
from django.db.models import fields from django.db.models.expressions import ExpressionWrapper from django.db.models.fields import IntegerField from django.http.response import HttpResponse from django.shortcuts import redirect, render from django.views.generic import TemplateView from django.utils import timezone from django.utils.translation import ugettext from django.views.generic import CreateView, ListView from Outils.models import Equipe, Evaluation, Question, Profile from ..forms import QuestionForm, FormulaireForm, ProfileForm, EquipeForm #from stories.filters import CasesFilter from django.contrib import messages from django.contrib.auth import update_session_auth_hash from django.contrib.auth.forms import PasswordChangeForm from django.shortcuts import get_object_or_404, redirect, render from django.http import Http404 from django.db.models import Avg, Sum, F, Value, Count from django.db.models.functions import Length, Upper from django.http import JsonResponse import numpy as np from rest_framework.views import APIView from rest_framework.response import Response # View pour la création du Profile de l'expert # View pour la création de l'équipe # Equipe name # View pour la création du Profile de l'expert # View pour la création formulaire
[ 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 7032, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 42712, 507, 1330, 41986, 36918, 2848, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 25747, 1330, 34142, 15878, 198, 6738, 42625, ...
3.476563
384
import unittest import torch from rising.transforms import Pad
[ 11748, 555, 715, 395, 198, 198, 11748, 28034, 198, 198, 6738, 7396, 13, 7645, 23914, 1330, 15744, 628 ]
3.666667
18
from grafo import Grafo vertices = ['J', 'C', 'E', 'P', 'M', 'T', 'Z'] arestas = {'a1': 'J-C', 'a2': 'C-E', 'a3': 'C-E', 'a4': 'C-P', 'a5': 'C-P', 'a6': 'C-M', 'a7': 'C-T', 'a8': 'M-T', 'a9': 'T-Z'} paraiba = Grafo(vertices, arestas) grafo = Grafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'], {'1': 'A-B', '2': 'A-G', '3': 'A-J', '4': 'G-K', '5': 'K-J', '6': 'J-G', '7': 'J-I', '8': 'I-G', '9': 'G-H', '10': 'H-F', '11': 'F-B', '12': 'B-G', '13': 'B-C', '14': 'C-D', '15': 'D-E', '16': 'D-B', '17': 'B-E'}) test1 = Grafo(["A", "B", "C"], {'1': "A-B"}) print(paraiba) # #testando ciclo # print(test1.ciclo()) # print(paraiba.ciclo()) # print(grafo.ciclo()) # # #testando caminho entre dois vertices # print(test1.caminho_entre_dois("A", "C")) # print(paraiba.caminho_entre_dois("J", "Z")) # print(grafo.caminho_entre_dois("A", "K")) # # #testando conexão # print(paraiba.conexo()) # print(grafo.conexo()) # print(test1.conexo())
[ 6738, 7933, 6513, 1330, 7037, 6513, 198, 198, 1851, 1063, 796, 37250, 41, 3256, 705, 34, 3256, 705, 36, 3256, 705, 47, 3256, 705, 44, 3256, 705, 51, 3256, 705, 57, 20520, 198, 12423, 292, 796, 1391, 6, 64, 16, 10354, 705, 41, 12, ...
1.722034
590
"""scrapli_community.scrapli.genericdriver.sync""" import time from scrapli.driver import NetworkDriver def default_sync_on_open(conn: NetworkDriver) -> None: """ scrapli_genericdriver default on_open callable This is tested with a cisco wlc using auth_bypass so we have to send creds during on open Args: conn: NetworkDriver object Returns: N/A # noqa: DAR202 Raises: N/A """ time.sleep(0.25) conn.channel.write(channel_input=conn.transport.auth_username) conn.channel.send_return() time.sleep(0.25) conn.channel.write(channel_input=conn.transport.auth_password) conn.channel.send_return() def default_sync_on_close(conn: NetworkDriver) -> None: """ scrapli_genericdriver default on_close callable Args: conn: NetworkDriver object Returns: N/A # noqa: DAR202 Raises: N/A """ conn.channel.write(channel_input="logout") conn.channel.send_return()
[ 37811, 1416, 430, 489, 72, 62, 28158, 13, 1416, 430, 489, 72, 13, 41357, 26230, 13, 27261, 37811, 198, 11748, 640, 198, 198, 6738, 19320, 489, 72, 13, 26230, 1330, 7311, 32103, 628, 198, 4299, 4277, 62, 27261, 62, 261, 62, 9654, 7, ...
2.494975
398
__all__ = ['instructions']
[ 834, 439, 834, 796, 37250, 259, 7249, 507, 20520 ]
2.888889
9
#!/usr/bin/python # coding=utf-8 # # This scripts reformats old metric files by adding a version number and a date
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 19617, 28, 40477, 12, 23, 198, 2, 198, 2, 770, 14750, 4975, 1381, 1468, 18663, 3696, 416, 4375, 257, 2196, 1271, 290, 257, 3128, 628, 198 ]
3.342857
35
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import os bench_result = f'{os.path.dirname(__file__)}/bench_result.txt' if __name__ == '__main__': load_df()
[ 11748, 384, 397, 1211, 355, 3013, 82, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 28686, 198, 198, 26968, 62, 20274, 796, 277, 6, 90, 418, 13, 6978, 13, 15908, 3672...
2.493506
77
#! /usr/bin/env python # -*- coding: utf-8 -*- import setuptools with open('README.md') as f: long_description = f.read() setuptools.setup( name='jumper', packages=["jumper"], description="Discontinuous transcript assembly for coronaviruses", long_description=long_description, long_description_content_type='text/markdown', version='0.1.0', url='http://github.com/elkebir-group/jumper', author='Palash Sashittal and Chuanyi Zhang', author_email='sashitt2@illinois.edu', python_requires='>=3.6', scripts=[ 'scripts/jumper', 'scripts/jumper_simulate', ], install_requires=[ "pysam", "pandas", ], )
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 900, 37623, 10141, 198, 198, 4480, 1280, 10786, 15675, 11682, 13, 9132, 11537, 355, 277, 25, 198, 220, 220, ...
2.353741
294
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable=no-member, too-many-lines """Online evaluation metric module.""" from __future__ import absolute_import import math from collections import OrderedDict import numpy from .base import numeric_types, string_types from . import ndarray from . import registry class EvalMetric(object): """Base class for all evaluation metrics. .. note:: This is a base class that provides common metric interfaces. One should not use this class directly, but instead create new metric classes that extend it. Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. """ def get_config(self): """Save configurations of metric. Can be recreated from configs with metric.create(**config) """ config = self._kwargs.copy() config.update({ 'metric': self.__class__.__name__, 'name': self.name, 'output_names': self.output_names, 'label_names': self.label_names}) return config def update_dict(self, label, pred): """Update the internal evaluation with named label and pred Parameters ---------- labels : OrderedDict of str -> NDArray name to array mapping for labels. preds : list of NDArray name to array mapping of predicted outputs. """ if self.output_names is not None: pred = [pred[name] for name in self.output_names] else: pred = list(pred.values()) if self.label_names is not None: label = [label[name] for name in self.label_names] else: label = list(label.values()) self.update(label, pred) def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ raise NotImplementedError() def reset(self): """Resets the internal evaluation result to initial state.""" self.num_inst = 0 self.sum_metric = 0.0 def get(self): """Gets the current evaluation result. Returns ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.sum_metric / self.num_inst) def get_name_value(self): """Returns zipped name and value pairs. Returns ------- list of tuples A (name, value) tuple list. """ name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) # pylint: disable=invalid-name register = registry.get_register_func(EvalMetric, 'metric') alias = registry.get_alias_func(EvalMetric, 'metric') _create = registry.get_create_func(EvalMetric, 'metric') # pylint: enable=invalid-name def create(metric, *args, **kwargs): """Creates evaluation metric from metric names or instances of EvalMetric or a custom metric function. Parameters ---------- metric : str or callable Specifies the metric to create. This argument must be one of the below: - Name of a metric. - An instance of `EvalMetric`. - A list, each element of which is a metric or a metric name. - An evaluation function that computes custom metric for a given batch of labels and predictions. *args : list Additional arguments to metric constructor. Only used when metric is str. **kwargs : dict Additional arguments to metric constructor. Only used when metric is str Examples -------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label - pred)) ... >>> metric1 = mx.metric.create('acc') >>> metric2 = mx.metric.create(custom_metric) >>> metric3 = mx.metric.create([metric1, metric2, 'rmse']) """ if callable(metric): return CustomMetric(metric, *args, **kwargs) elif isinstance(metric, list): composite_metric = CompositeEvalMetric() for child_metric in metric: composite_metric.add(create(child_metric, *args, **kwargs)) return composite_metric return _create(metric, *args, **kwargs) @register @alias('composite') class CompositeEvalMetric(EvalMetric): """Manages multiple evaluation metrics. Parameters ---------- metrics : list of EvalMetric List of child metrics. name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([0, 1, 1])] >>> eval_metrics_1 = mx.metric.Accuracy() >>> eval_metrics_2 = mx.metric.F1() >>> eval_metrics = mx.metric.CompositeEvalMetric() >>> for child_metric in [eval_metrics_1, eval_metrics_2]: >>> eval_metrics.add(child_metric) >>> eval_metrics.update(labels = labels, preds = predicts) >>> print eval_metrics.get() (['accuracy', 'f1'], [0.6666666666666666, 0.8]) """ def add(self, metric): """Adds a child metric. Parameters ---------- metric A metric instance. """ self.metrics.append(create(metric)) def get_metric(self, index): """Returns a child metric. Parameters ---------- index : int Index of child metric in the list of metrics. """ try: return self.metrics[index] except IndexError: return ValueError("Metric index {} is out of range 0 and {}".format( index, len(self.metrics))) def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ for metric in self.metrics: metric.update(labels, preds) def reset(self): """Resets the internal evaluation result to initial state.""" try: for metric in self.metrics: metric.reset() except AttributeError: pass def get(self): """Returns the current evaluation result. Returns ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ names = [] values = [] for metric in self.metrics: name, value = metric.get() if isinstance(name, string_types): name = [name] if isinstance(value, numeric_types): value = [value] names.extend(name) values.extend(value) return (names, values) ######################## # CLASSIFICATION METRICS ######################## @register @alias('acc') class Accuracy(EvalMetric): """Computes accuracy classification score. The accuracy score is defined as .. math:: \\text{accuracy}(y, \\hat{y}) = \\frac{1}{n} \\sum_{i=0}^{n-1} \\text{1}(\\hat{y_i} == y_i) Parameters ---------- axis : int, default=1 The axis that represents classes name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([0, 1, 1])] >>> acc = mx.metric.Accuracy() >>> acc.update(preds = predicts, labels = labels) >>> print acc.get() ('accuracy', 0.6666666666666666) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data with class indices as values, one per sample. preds : list of `NDArray` Prediction values for samples. Each prediction value can either be the class index, or a vector of likelihoods for all classes. """ check_label_shapes(labels, preds) for label, pred_label in zip(labels, preds): if pred_label.shape != label.shape: pred_label = ndarray.argmax(pred_label, axis=self.axis) pred_label = pred_label.asnumpy().astype('int32') label = label.asnumpy().astype('int32') check_label_shapes(label, pred_label) self.sum_metric += (pred_label.flat == label.flat).sum() self.num_inst += len(pred_label.flat) @register @alias('top_k_accuracy', 'top_k_acc') class TopKAccuracy(EvalMetric): """Computes top k predictions accuracy. `TopKAccuracy` differs from Accuracy in that it considers the prediction to be ``True`` as long as the ground truth label is in the top K predicated labels. If `top_k` = ``1``, then `TopKAccuracy` is identical to `Accuracy`. Parameters ---------- top_k : int Whether targets are in top k predictions. name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> np.random.seed(999) >>> top_k = 3 >>> labels = [mx.nd.array([2, 6, 9, 2, 3, 4, 7, 8, 9, 6])] >>> predicts = [mx.nd.array(np.random.rand(10, 10))] >>> acc = mx.metric.TopKAccuracy(top_k=top_k) >>> acc.update(labels, predicts) >>> print acc.get() ('top_k_accuracy', 0.3) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred_label in zip(labels, preds): assert(len(pred_label.shape) <= 2), 'Predictions should be no more than 2 dims' pred_label = numpy.argsort(pred_label.asnumpy().astype('float32'), axis=1) label = label.asnumpy().astype('int32') check_label_shapes(label, pred_label) num_samples = pred_label.shape[0] num_dims = len(pred_label.shape) if num_dims == 1: self.sum_metric += (pred_label.flat == label.flat).sum() elif num_dims == 2: num_classes = pred_label.shape[1] top_k = min(num_classes, self.top_k) for j in range(top_k): self.sum_metric += (pred_label[:, num_classes - 1 - j].flat == label.flat).sum() self.num_inst += num_samples class _BinaryClassificationMetrics(object): """ Private container class for classification metric statistics. True/false positive and true/false negative counts are sufficient statistics for various classification metrics. This class provides the machinery to track those statistics across mini-batches of (label, prediction) pairs. """ def update_binary_stats(self, label, pred): """ Update various binary classification counts for a single (label, pred) pair. Parameters ---------- label : `NDArray` The labels of the data. pred : `NDArray` Predicted values. """ pred = pred.asnumpy() label = label.asnumpy().astype('int32') pred_label = numpy.argmax(pred, axis=1) check_label_shapes(label, pred) if len(numpy.unique(label)) > 2: raise ValueError("%s currently only supports binary classification." % self.__class__.__name__) for y_pred, y_true in zip(pred_label, label): if y_pred == 1 and y_true == 1: self.true_positives += 1. elif y_pred == 1 and y_true == 0: self.false_positives += 1. elif y_pred == 0 and y_true == 1: self.false_negatives += 1. else: self.true_negatives += 1. @property @property @property @property @register class F1(EvalMetric): """Computes the F1 score of a binary classification problem. The F1 score is equivalent to weighted average of the precision and recall, where the best value is 1.0 and the worst value is 0.0. The formula for F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) The formula for precision and recall is:: precision = true_positives / (true_positives + false_positives) recall = true_positives / (true_positives + false_negatives) .. note:: This F1 score only supports binary classification. Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. average : str, default 'macro' Strategy to be used for aggregating across mini-batches. "macro": average the F1 scores for each batch. "micro": compute a single F1 score across all batches. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0., 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([0., 1., 1.])] >>> f1 = mx.metric.F1() >>> f1.update(preds = predicts, labels = labels) >>> print f1.get() ('f1', 0.8) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): self.metrics.update_binary_stats(label, pred) if self.average == "macro": self.sum_metric += self.metrics.fscore self.num_inst += 1 self.metrics.reset_stats() else: self.sum_metric = self.metrics.fscore * self.metrics.total_examples self.num_inst = self.metrics.total_examples def reset(self): """Resets the internal evaluation result to initial state.""" self.sum_metric = 0. self.num_inst = 0. self.metrics.reset_stats() @register class Perplexity(EvalMetric): """Computes perplexity. Perplexity is a measurement of how well a probability distribution or model predicts a sample. A low perplexity indicates the model is good at predicting the sample. The perplexity of a model q is defined as .. math:: b^{\\big(-\\frac{1}{N} \\sum_{i=1}^N \\log_b q(x_i) \\big)} = \\exp \\big(-\\frac{1}{N} \\sum_{i=1}^N \\log q(x_i)\\big) where we let `b = e`. :math:`q(x_i)` is the predicted value of its ground truth label on sample :math:`x_i`. For example, we have three samples :math:`x_1, x_2, x_3` and their labels are :math:`[0, 1, 1]`. Suppose our model predicts :math:`q(x_1) = p(y_1 = 0 | x_1) = 0.3` and :math:`q(x_2) = 1.0`, :math:`q(x_3) = 0.6`. The perplexity of model q is :math:`exp\\big(-(\\log 0.3 + \\log 1.0 + \\log 0.6) / 3\\big) = 1.77109762852`. Parameters ---------- ignore_label : int or None Index of invalid label to ignore when counting. By default, sets to -1. If set to `None`, it will include all entries. axis : int (default -1) The axis from prediction that was used to compute softmax. By default use the last axis. name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([0, 1, 1])] >>> perp = mx.metric.Perplexity(ignore_label=None) >>> perp.update(labels, predicts) >>> print perp.get() ('Perplexity', 1.7710976285155853) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ assert len(labels) == len(preds) loss = 0. num = 0 for label, pred in zip(labels, preds): assert label.size == pred.size/pred.shape[-1], \ "shape mismatch: %s vs. %s"%(label.shape, pred.shape) label = label.as_in_context(pred.context).reshape((label.size,)) pred = ndarray.pick(pred, label.astype(dtype='int32'), axis=self.axis) if self.ignore_label is not None: ignore = (label == self.ignore_label).astype(pred.dtype) num -= ndarray.sum(ignore).asscalar() pred = pred*(1-ignore) + ignore loss -= ndarray.sum(ndarray.log(ndarray.maximum(1e-10, pred))).asscalar() num += pred.size self.sum_metric += loss self.num_inst += num def get(self): """Returns the current evaluation result. Returns ------- Tuple of (str, float) Representing name of the metric and evaluation result. """ return (self.name, math.exp(self.sum_metric/self.num_inst)) #################### # REGRESSION METRICS #################### @register class MAE(EvalMetric): """Computes Mean Absolute Error (MAE) loss. The mean absolute error is given by .. math:: \\frac{\\sum_i^n |y_i - \\hat{y}_i|}{n} Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))] >>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))] >>> mean_absolute_error = mx.metric.MAE() >>> mean_absolute_error.update(labels = labels, preds = predicts) >>> print mean_absolute_error.get() ('mae', 0.5) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): label = label.asnumpy() pred = pred.asnumpy() if len(label.shape) == 1: label = label.reshape(label.shape[0], 1) self.sum_metric += numpy.abs(label - pred).mean() self.num_inst += 1 # numpy.prod(label.shape) @register class MSE(EvalMetric): """Computes Mean Squared Error (MSE) loss. The mean squared error is given by .. math:: \\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n} Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))] >>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))] >>> mean_squared_error = mx.metric.MSE() >>> mean_squared_error.update(labels = labels, preds = predicts) >>> print mean_squared_error.get() ('mse', 0.375) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): label = label.asnumpy() pred = pred.asnumpy() if len(label.shape) == 1: label = label.reshape(label.shape[0], 1) self.sum_metric += ((label - pred)**2.0).mean() self.num_inst += 1 # numpy.prod(label.shape) @register class RMSE(EvalMetric): """Computes Root Mean Squred Error (RMSE) loss. The root mean squared error is given by .. math:: \\sqrt{\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}} Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))] >>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))] >>> root_mean_squared_error = mx.metric.RMSE() >>> root_mean_squared_error.update(labels = labels, preds = predicts) >>> print root_mean_squared_error.get() ('rmse', 0.612372457981) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): label = label.asnumpy() pred = pred.asnumpy() if len(label.shape) == 1: label = label.reshape(label.shape[0], 1) self.sum_metric += numpy.sqrt(((label - pred)**2.0).mean()) self.num_inst += 1 @register @alias('ce') class CrossEntropy(EvalMetric): """Computes Cross Entropy loss. The cross entropy over a batch of sample size :math:`N` is given by .. math:: -\\sum_{n=1}^{N}\\sum_{k=1}^{K}t_{nk}\\log (y_{nk}), where :math:`t_{nk}=1` if and only if sample :math:`n` belongs to class :math:`k`. :math:`y_{nk}` denotes the probability of sample :math:`n` belonging to class :math:`k`. Parameters ---------- eps : float Cross Entropy loss is undefined for predicted value is 0 or 1, so predicted values are added with the small constant. name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([0, 1, 1])] >>> ce = mx.metric.CrossEntropy() >>> ce.update(labels, predicts) >>> print ce.get() ('cross-entropy', 0.57159948348999023) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): label = label.asnumpy() pred = pred.asnumpy() label = label.ravel() assert label.shape[0] == pred.shape[0] prob = pred[numpy.arange(label.shape[0]), numpy.int64(label)] self.sum_metric += (-numpy.log(prob + self.eps)).sum() self.num_inst += label.shape[0] @register @alias('nll_loss') class NegativeLogLikelihood(EvalMetric): """Computes the negative log-likelihood loss. The negative log-likelihoodd loss over a batch of sample size :math:`N` is given by .. math:: -\\sum_{n=1}^{N}\\sum_{k=1}^{K}t_{nk}\\log (y_{nk}), where :math:`K` is the number of classes, :math:`y_{nk}` is the prediceted probability for :math:`k`-th class for :math:`n`-th sample. :math:`t_{nk}=1` if and only if sample :math:`n` belongs to class :math:`k`. Parameters ---------- eps : float Negative log-likelihood loss is undefined for predicted value is 0, so predicted values are added with the small constant. name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([0, 1, 1])] >>> nll_loss = mx.metric.NegativeLogLikelihood() >>> nll_loss.update(labels, predicts) >>> print nll_loss.get() ('nll-loss', 0.57159948348999023) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): label = label.asnumpy() pred = pred.asnumpy() label = label.ravel() num_examples = pred.shape[0] assert label.shape[0] == num_examples, (label.shape[0], num_examples) prob = pred[numpy.arange(num_examples, dtype=numpy.int64), numpy.int64(label)] self.sum_metric += (-numpy.log(prob + self.eps)).sum() self.num_inst += num_examples @register @alias('pearsonr') class PearsonCorrelation(EvalMetric): """Computes Pearson correlation. The pearson correlation is given by .. math:: \\frac{cov(y, \\hat{y})}{\\sigma{y}\\sigma{\\hat{y}}} Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])] >>> labels = [mx.nd.array([[1, 0], [0, 1], [0, 1]])] >>> pr = mx.metric.PearsonCorrelation() >>> pr.update(labels, predicts) >>> print pr.get() ('pearson-correlation', 0.42163704544016178) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ check_label_shapes(labels, preds) for label, pred in zip(labels, preds): check_label_shapes(label, pred, 1) label = label.asnumpy() pred = pred.asnumpy() self.sum_metric += numpy.corrcoef(pred.ravel(), label.ravel())[0, 1] self.num_inst += 1 @register class Loss(EvalMetric): """Dummy metric for directly printing loss. Parameters ---------- name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. """ @register class Torch(Loss): """Dummy metric for torch criterions.""" @register class Caffe(Loss): """Dummy metric for caffe criterions.""" @register class CustomMetric(EvalMetric): """Computes a customized evaluation metric. The `feval` function can return a `tuple` of (sum_metric, num_inst) or return an `int` sum_metric. Parameters ---------- feval : callable(label, pred) Customized evaluation function. name : str, optional The name of the metric. (the default is None). allow_extra_outputs : bool, optional If true, the prediction outputs can have extra outputs. This is useful in RNN, where the states are also produced in outputs for forwarding. (the default is False). name : str Name of this metric instance for display. output_names : list of str, or None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None Name of labels that should be used when updating with update_dict. By default include all labels. Examples -------- >>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))] >>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))] >>> feval = lambda x, y : (x + y).mean() >>> eval_metrics = mx.metric.CustomMetric(feval=feval) >>> eval_metrics.update(labels, predicts) >>> print eval_metrics.get() ('custom(<lambda>)', 6.0) """ def update(self, labels, preds): """Updates the internal evaluation result. Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. """ if not self._allow_extra_outputs: check_label_shapes(labels, preds) for pred, label in zip(preds, labels): label = label.asnumpy() pred = pred.asnumpy() reval = self._feval(label, pred) if isinstance(reval, tuple): (sum_metric, num_inst) = reval self.sum_metric += sum_metric self.num_inst += num_inst else: self.sum_metric += reval self.num_inst += 1 # pylint: disable=invalid-name def np(numpy_feval, name=None, allow_extra_outputs=False): """Creates a custom evaluation metric that receives its inputs as numpy arrays. Parameters ---------- numpy_feval : callable(label, pred) Custom evaluation function that receives labels and predictions for a minibatch as numpy arrays and returns the corresponding custom metric as a floating point number. name : str, optional Name of the custom metric. allow_extra_outputs : bool, optional Whether prediction output is allowed to have extra outputs. This is useful in cases like RNN where states are also part of output which can then be fed back to the RNN in the next step. By default, extra outputs are not allowed. Returns ------- float Custom metric corresponding to the provided labels and predictions. Example ------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label-pred)) ... >>> metric = mx.metric.np(custom_metric) """ def feval(label, pred): """Internal eval function.""" return numpy_feval(label, pred) feval.__name__ = numpy_feval.__name__ return CustomMetric(feval, name, allow_extra_outputs) # pylint: enable=invalid-name
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
2.405862
14,569
from gmpy2 import invert import libnum e = 65537 n = 88503001447845031603457048661635807319447136634748350130947825183012205093541 c = 40876621398366534035989065383910105526025410999058860023908252093679681817257 # from yafu p = 274539690398523616505159415195049044439 q = 322368694010594584041053487661458382819 assert p*q == n phi = (p-1)*(q-1) d = invert(e, phi) m = pow(c, d, n) flag = libnum.n2s(int(m)) print(flag)
[ 6738, 308, 3149, 88, 17, 1330, 287, 1851, 198, 11748, 9195, 22510, 198, 198, 68, 796, 45021, 2718, 198, 77, 796, 9193, 1120, 6200, 1415, 2857, 5705, 31938, 1433, 3070, 2231, 2154, 2780, 2791, 1433, 2327, 1795, 4790, 1129, 34825, 1485, ...
2.12
200
from collections import defaultdict from brownie import chain from yearn.networks import Network INCIDENTS = defaultdict(list) INCIDENTS.update({ Network.Mainnet: { # yUSDC getPricePerFullShare reverts from block 10532764 to block 10532775 because all liquidity was removed for testing "0x597aD1e0c13Bfe8025993D9e79C69E1c0233522e": [{"start":10532764,"end":10532775,"result":1}], "0x629c759D1E83eFbF63d84eb3868B564d9521C129": [{"start":11221202,"end":11238201,"result":1.037773031500707}], "0xcC7E70A958917cCe67B4B87a8C30E6297451aE98": [{"start":11512085,"end":11519723,"result":1.0086984562068226}], # GUSD vault state was broken due to an incident # https://github.com/yearn/yearn-security/blob/master/disclosures/2021-01-17.md "0xec0d8D3ED5477106c6D4ea27D90a60e594693C90": [{"start":11603873,"end":11645877,"result":0}], "0xF6C9E9AF314982A4b38366f4AbfAa00595C5A6fC": [{"start":11833643,"end":11833971,"result":1.0094921430595167}], "0x5533ed0a3b83F70c3c4a1f69Ef5546D3D4713E44": [{"start":11865718,"end":11884721,"result":1.0345005219440915}], # yvcrvAAVE vault state was broken due to an incident # https://github.com/yearn/yearn-security/blob/master/disclosures/2021-05-13.md "0x03403154afc09Ce8e44C3B185C82C6aD5f86b9ab": [{"start":12430455,"end":12430661,"result":1.091553}], # yvust3CRV v1 "0xF6C9E9AF314982A4b38366f4AbfAa00595C5A6fC": [ {"start":11893317,"end":12020551,"result":1.0107300938482453}, {"start":12028696,"end":12194529,"result":1.0125968580471483} ], # for these, price cannot be fetched from chain because totalSupply == 0 # on block of last withdrawal we return price at block - 1 # after that block, returns 0 # yvhusd3CRV v1 "0x39546945695DCb1c037C836925B355262f551f55": [ {"start":12074825,"end":12074825,"result":1.0110339337578227}, {"start":12074826,"end":chain.height,"result":0}, ], # yvobtccrv v1 "0x7F83935EcFe4729c4Ea592Ab2bC1A32588409797": [ {"start":12582511,"end":12582511,"result":37611.70819906929}, {"start":12582512,"end":chain.height,"result":0}, ], # yvpbtccrv v1 "0x123964EbE096A920dae00Fb795FFBfA0c9Ff4675": [ {"start":12868929,"end":12868929,"result":1456401056701488300032}, {"start":12868930,"end":chain.height,"result":0}, ], }, }.get(chain.id, {}))
[ 6738, 17268, 1330, 4277, 11600, 198, 198, 6738, 7586, 494, 1330, 6333, 198, 198, 6738, 614, 77, 13, 3262, 5225, 1330, 7311, 198, 198, 30158, 2389, 15365, 796, 4277, 11600, 7, 4868, 8, 198, 198, 30158, 2389, 15365, 13, 19119, 15090, 19...
1.985063
1,272
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/api/system_parameter.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='google/api/system_parameter.proto', package='google.api', syntax='proto3', serialized_options=_b('\n\016com.google.apiB\024SystemParameterProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI'), serialized_pb=_b('\n!google/api/system_parameter.proto\x12\ngoogle.api\"B\n\x10SystemParameters\x12.\n\x05rules\x18\x01 \x03(\x0b\x32\x1f.google.api.SystemParameterRule\"X\n\x13SystemParameterRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12/\n\nparameters\x18\x02 \x03(\x0b\x32\x1b.google.api.SystemParameter\"Q\n\x0fSystemParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bhttp_header\x18\x02 \x01(\t\x12\x1b\n\x13url_query_parameter\x18\x03 \x01(\tBv\n\x0e\x63om.google.apiB\x14SystemParameterProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3') ) _SYSTEMPARAMETERS = _descriptor.Descriptor( name='SystemParameters', full_name='google.api.SystemParameters', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='rules', full_name='google.api.SystemParameters.rules', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=49, serialized_end=115, ) _SYSTEMPARAMETERRULE = _descriptor.Descriptor( name='SystemParameterRule', full_name='google.api.SystemParameterRule', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='selector', full_name='google.api.SystemParameterRule.selector', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='parameters', full_name='google.api.SystemParameterRule.parameters', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=117, serialized_end=205, ) _SYSTEMPARAMETER = _descriptor.Descriptor( name='SystemParameter', full_name='google.api.SystemParameter', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='google.api.SystemParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='http_header', full_name='google.api.SystemParameter.http_header', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='url_query_parameter', full_name='google.api.SystemParameter.url_query_parameter', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=207, serialized_end=288, ) _SYSTEMPARAMETERS.fields_by_name['rules'].message_type = _SYSTEMPARAMETERRULE _SYSTEMPARAMETERRULE.fields_by_name['parameters'].message_type = _SYSTEMPARAMETER DESCRIPTOR.message_types_by_name['SystemParameters'] = _SYSTEMPARAMETERS DESCRIPTOR.message_types_by_name['SystemParameterRule'] = _SYSTEMPARAMETERRULE DESCRIPTOR.message_types_by_name['SystemParameter'] = _SYSTEMPARAMETER _sym_db.RegisterFileDescriptor(DESCRIPTOR) SystemParameters = _reflection.GeneratedProtocolMessageType('SystemParameters', (_message.Message,), { 'DESCRIPTOR' : _SYSTEMPARAMETERS, '__module__' : 'google.api.system_parameter_pb2' # @@protoc_insertion_point(class_scope:google.api.SystemParameters) }) _sym_db.RegisterMessage(SystemParameters) SystemParameterRule = _reflection.GeneratedProtocolMessageType('SystemParameterRule', (_message.Message,), { 'DESCRIPTOR' : _SYSTEMPARAMETERRULE, '__module__' : 'google.api.system_parameter_pb2' # @@protoc_insertion_point(class_scope:google.api.SystemParameterRule) }) _sym_db.RegisterMessage(SystemParameterRule) SystemParameter = _reflection.GeneratedProtocolMessageType('SystemParameter', (_message.Message,), { 'DESCRIPTOR' : _SYSTEMPARAMETER, '__module__' : 'google.api.system_parameter_pb2' # @@protoc_insertion_point(class_scope:google.api.SystemParameter) }) _sym_db.RegisterMessage(SystemParameter) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 262, 8435, 11876, 17050, 13, 220, 8410, 5626, 48483, 0, 198, 2, 2723, 25, 23645, 14, 15042, 14, 10057, 62, 17143, 2357, 13, 1676, 1462, 198, 198, ...
2.566386
2,493
import ast import re import pymel.core as pm import maya.cmds as cmds import maya.OpenMaya as api import utils __all__ = [ 'decodeMetaData', 'decodeMetaDataValue', 'encodeMetaData', 'encodeMetaDataValue', 'findMetaNodes', 'getMetaClasses', 'getMetaData', 'hasMetaClass', 'isMetaNode', 'removeMetaData', 'setAllMetaData', 'setMetaData', 'updateMetaData', ] METACLASS_ATTR_PREFIX = 'pyMetaClass_' METADATA_ATTR = 'pyMetaData' VALID_CLASSATTR = re.compile(r'^[_a-z0-9]*$', re.IGNORECASE) def _getMetaDataPlug(mfnnode): """ Return the MPlug for the meta data attribute on a node Args: mfnnode: A MFnDependencyNode referencing the target node. """ try: return mfnnode.findPlug(METADATA_ATTR) except RuntimeError: pass def _getMetaClassPlug(mfnnode, className): """ Return the MPlug for a meta class attribute on a node Args: mfnnode: A MFnDependencyNode referencing the target node. className: A string name of the meta class type. """ attrName = METACLASS_ATTR_PREFIX + className try: return mfnnode.findPlug(attrName) except RuntimeError: pass def _getOrCreateMetaDataPlug(mfnnode, undoable=True): """ Return the MPlug for the meta data attribute on a node, adding the attribute if it does not already exist. Args: mfnnode (MFnDependencyNode): The MFnDependencyNode of a node undoable (bool): When True, the operation will be undoable """ try: plug = mfnnode.findPlug(METADATA_ATTR) except: if undoable: cmds.addAttr(mfnnode.name(), ln=METADATA_ATTR, dt='string') else: mfnattr = api.MFnTypedAttribute() attr = mfnattr.create( METADATA_ATTR, METADATA_ATTR, api.MFnData.kString) mfnnode.addAttribute(attr) plug = mfnnode.findPlug(METADATA_ATTR) return plug def _addMetaClassAttr(mfnnode, className, undoable=True): """ Add a meta class attribute to a node. Does nothing if the attribute already exists. Args: mfnnode (MFnDependencyNode): The MFnDependencyNode of a node className (str): The meta data class name undoable (bool): When True, the operation will be undoable """ if not VALID_CLASSATTR.match(className): raise ValueError('Invalid meta class name: ' + className) classAttr = METACLASS_ATTR_PREFIX + className try: mfnnode.attribute(classAttr) except RuntimeError: if undoable: cmds.addAttr(mfnnode.name(), ln=classAttr, at='short') else: mfnattr = api.MFnNumericAttribute() attr = mfnattr.create( classAttr, classAttr, api.MFnNumericData.kShort) mfnnode.addAttribute(attr) def _removeMetaClassAttr(mfnnode, className, undoable=True): """ Remove a meta class attribute from a node. Does nothing if the attribute does not exist. Args: mfnnode (MFnDependencyNode): The api MFnDependencyNode of a node undoable (bool): When True, the operation will be undoable Returns: True if the attr was removed or didn't exist, False if it couldn't be removed. """ classPlug = _getMetaClassPlug(mfnnode, className) if not classPlug: return True if classPlug.isLocked(): return False else: if undoable: cmds.deleteAttr(classPlug.name()) else: mfnnode.removeAttribute(classPlug.attribute()) return True def encodeMetaData(data): """ Return the given meta data encoded into a string Args: data: A python dictionary-like object representing the data to serialize. """ return repr(encodeMetaDataValue(data)) def encodeMetaDataValue(value): """ Encode and return a meta data value. Handles special data types like Maya nodes. Args: value: Any python value to be encoded """ if isinstance(value, dict): result = {} for k, v in value.iteritems(): result[k] = encodeMetaDataValue(v) return result elif isinstance(value, (list, tuple)): return value.__class__([encodeMetaDataValue(v) for v in value]) elif isinstance(value, pm.nt.DependNode): return utils.getUUID(value) else: return value def decodeMetaData(data, refNode=None): """ Parse the given meta data and return it as a valid python object. Args: data: A string representing encoded meta data. """ if not data: return {} # convert from string to python object try: data = ast.literal_eval(data.replace('\r', '')) except Exception as e: raise ValueError("Failed to decode meta data: {0}".format(e)) return decodeMetaDataValue(data, refNode) def decodeMetaDataValue(value, refNode): """ Parse string formatted meta data and return the resulting python object. Args: data: A str representing encoded meta data """ if isinstance(value, dict): result = {} for k, v in value.iteritems(): result[k] = decodeMetaDataValue(v, refNode) return result elif isinstance(value, (list, tuple)): return value.__class__([decodeMetaDataValue(v, refNode) for v in value]) elif utils.isUUID(value): return utils.findNodeByUUID(value, refNode) else: return value def isMetaNode(node): """ Return True if the given node has any meta data Args: node: A PyMel node or string node name """ return utils.hasAttr(node, METADATA_ATTR) def hasMetaClass(node, className): """ Return True if the given node has data for the given meta class type Args: node: A PyMel node or string node name className: A string name of the meta class type. If given, the node must be of this class type. """ return utils.hasAttr(node, METACLASS_ATTR_PREFIX + className) def findMetaNodes(className=None, asPyNodes=True): """ Return a list of all meta nodes of the given class type. If no class is given, all nodes with meta data are returned. Args: className: A string name of the meta class type. asPyNodes: A bool, when True, returns a list of PyNodes, otherwise returns a list of MObjects """ if className is not None: plugName = METACLASS_ATTR_PREFIX + className else: plugName = METADATA_ATTR objs = utils.getMObjectsByPlug(plugName) if asPyNodes: return [pm.PyNode(o) for o in objs] else: return objs def setMetaData(node, className, data, undoable=True, replace=False): """ Set the meta data for the a meta class type on a node. The className must be a valid attribute name. Args: node (PyNode or str): The node on which to set data className (str): The data's meta class type name data (dict): The data to serialize and store on the node undoable (bool): When True, the operation will be undoable replace (bool): When True, will replace all data on the node with the new meta data. This uses setAllMetaData and can be much faster with large data sets. """ if replace: setAllMetaData(node, {className: data}, undoable) return mfnnode = utils.getMFnDependencyNode(node) plug = _getOrCreateMetaDataPlug(mfnnode, undoable) _addMetaClassAttr(mfnnode, className, undoable) # update meta data refNode = None if cmds.referenceQuery(str(node), isNodeReferenced=True): refNode = cmds.referenceQuery(str(node), rfn=True) fullData = decodeMetaData(plug.asString(), refNode) fullData[className] = data newValue = encodeMetaData(fullData) if undoable: cmds.setAttr(plug.name(), newValue, type='string') else: plug.setString(newValue) def setAllMetaData(node, data, undoable=True): """ Set all meta data on a node. This is faster because the existing data on the node is not retrieved first and then modified. The data must be first indexed by strings that are valid meta class names, otherwise errors may occur when retrieving it later. New meta class attributes will be added automatically, but existing meta class attributes will not be removed. If old meta class attributes on this node will no longer be applicable, they should be removed with removeAllData first. Args: node (PyNode or str): The node on which to set data data (dict): The data to serialize and store on the node undoable (bool): When True, the operation will be undoable """ mfnnode = utils.getMFnDependencyNode(node) plug = _getOrCreateMetaDataPlug(mfnnode, undoable) # add class attributes if data: for className in data.keys(): _addMetaClassAttr(mfnnode, className, undoable) # set meta data newValue = encodeMetaData(data) if undoable: cmds.setAttr(plug.name(), newValue, type='string') else: plug.setString(newValue) def getMetaData(node, className=None): """ Return meta data from a node. If `className` is given, return only meta data for that meta class type. Args: node: A PyMel node or string node name className: A string name of the meta class type. Returns: A dict or python object representing the stored meta data """ mfnnode = utils.getMFnDependencyNode(node) try: plug = mfnnode.findPlug(METADATA_ATTR) datastr = plug.asString() except RuntimeError: return else: refNode = None if cmds.referenceQuery(str(node), isNodeReferenced=True): refNode = cmds.referenceQuery(str(node), rfn=True) data = decodeMetaData(datastr, refNode) if className is not None: return data.get(className, None) else: return data def updateMetaData(node, className, data): """ Updates existing meta data on a node for a meta class type. Only supports dict-type meta data Args: node: A PyMel node or string node name className: A string name of the meta class type data: A dict object containing meta data to add to the node """ fullData = getMetaData(node, className) if not isinstance(fullData, dict): raise ValueError( "meta data for node '{0}' is not " "a dict and cannot be updated".format(node)) fullData.update(data) setMetaData(node, className, fullData) def removeMetaData(node, className=None, undoable=True): """ Remove meta data from a node. If no `className` is given then all meta data is removed. Args: node: A PyMel node or string node name className: A string name of the meta class type. undoable: A bool, when True the change will be undoable Returns: True if node is fully clean of relevant meta data. """ if not isMetaNode(node): return True mfnnode = utils.getMFnDependencyNode(node) # this may become true if we find there are no # classes left after removing one removeAllData = False if className is not None: # remove meta data for the given class only # make sure data attribute is unlocked dataPlug = _getMetaDataPlug(mfnnode) if dataPlug and dataPlug.isLocked(): return False # attempt to remove class attribute if not _removeMetaClassAttr(mfnnode, className, undoable): return False # remove class-specific data from all meta data # TODO(bsayre): add a partialDecodeMetaData for uses like this # since we will only be modifying the core dict object and not # using any meta data values (like nodes) data = decodeMetaData(dataPlug.asString()) if className in data: del data[className] newValue = encodeMetaData(data) if undoable: cmds.setAttr(dataPlug.name(), newValue, type='string') else: dataPlug.setString(newValue) # check if any classes left if len(data) == 0: removeAllData = True else: # no className was given removeAllData = True if removeAllData: # remove all meta data from the node # make sure data attribute is unlocked dataPlug = _getMetaDataPlug(mfnnode) if dataPlug and dataPlug.isLocked(): return False # make sure all class attributes are unlocked classPlugs = [_getMetaClassPlug(mfnnode, c) for c in getMetaClasses(node)] for cp in classPlugs: if cp and cp.isLocked(): return False # remove class attributes for classPlug in classPlugs: if classPlug: if undoable: cmds.deleteAttr(classPlug.name()) else: mfnnode.removeAttribute(classPlug.attribute()) # remove data attribute if dataPlug: if undoable: cmds.deleteAttr(dataPlug.name()) else: mfnnode.removeAttribute(dataPlug.attribute()) return True def getMetaClasses(node): """ Return the name of the meta class types that the given node has meta data for. Args: node: A PyMel node or string node name """ attrs = cmds.listAttr(str(node)) metaClassAttrs = [a for a in attrs if a.startswith(METACLASS_ATTR_PREFIX)] classes = [a[len(METACLASS_ATTR_PREFIX):] for a in metaClassAttrs] return classes
[ 198, 11748, 6468, 198, 11748, 302, 198, 198, 11748, 279, 4948, 417, 13, 7295, 355, 9114, 198, 11748, 743, 64, 13, 28758, 82, 355, 23991, 82, 198, 11748, 743, 64, 13, 11505, 6747, 64, 355, 40391, 198, 198, 11748, 3384, 4487, 628, 198...
2.446325
5,673
import argparse import os from model import build_face_encoder from recognizer import FaceRecognizer if __name__ == "__main__": parser = argparse.ArgumentParser(description = "") parser.add_argument("--weights", dest="weights", required=True, help="path to pre-train vgg-face weights file") parser.add_argument("--dbdir", dest="dbdir", default="faces", help="path to directory for storing encoded faces") parser.add_argument("--distance", dest="distance", required=True, help="similarity threshold for determining matched faces") args = parser.parse_args() db_directory = args.dbdir weights_file_path = args.weights distance = float(args.distance) if not os.path.exists(db_directory): os.mkdir(db_directory) haars_file = "./haarscascade.xml" face_encoder = build_face_encoder() face_encoder.load_weights(weights_file_path) face_recognizer = FaceRecognizer(encoder=face_encoder, db_dir=db_directory, distance=distance, haars_file=haars_file) face_recognizer.run()
[ 11748, 1822, 29572, 198, 11748, 28686, 198, 198, 6738, 2746, 1330, 1382, 62, 2550, 62, 12685, 12342, 198, 6738, 3018, 7509, 1330, 15399, 6690, 2360, 7509, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220,...
2.991329
346
from datetime import datetime from custom.ilsgateway.utils import supply_points_with_latest_status_by_datespan, get_current_group from dimagi.utils.dates import DateSpan
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 2183, 13, 4487, 10494, 1014, 13, 26791, 1330, 5127, 62, 13033, 62, 4480, 62, 42861, 62, 13376, 62, 1525, 62, 19581, 6839, 11, 651, 62, 14421, 62, 8094, 198, 6738, 5391, 18013, 13, ...
3.392157
51
from datetime import datetime from grpc_adenine.database import (connection as db) """ ServicesList table is mapped to the elastos_console database. It maps the User with its api key. """
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 1036, 14751, 62, 40780, 500, 13, 48806, 1330, 357, 38659, 355, 20613, 8, 198, 198, 37811, 198, 31007, 8053, 3084, 318, 27661, 284, 262, 1288, 459, 418, 62, 41947, 6831, 13, 632, 8739...
3.673077
52
from sys import stdin from enum import Enum #the two lines below are for the final code #setarr(arr) #arr = sortlines(arr, order) main()
[ 6738, 25064, 1330, 14367, 259, 198, 6738, 33829, 1330, 2039, 388, 198, 198, 2, 1169, 734, 3951, 2174, 389, 329, 262, 2457, 2438, 198, 198, 2, 2617, 3258, 7, 3258, 8, 198, 2, 3258, 796, 3297, 6615, 7, 3258, 11, 1502, 8, 198, 220, ...
2.305556
72
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 """ Pytorch map style dataset classes for proteins (seq, struct, seq+struct). Modified from https://github.com/drorlab/gvp-pytorch/blob/main/gvp/data.py """ import math import tqdm import numpy as np import torch import torch.utils.data as data import torch.nn.functional as F import torch_geometric import torch_cluster from lmgvp.utils import prep_seq def _normalize(tensor, dim=-1): """ Normalizes a `torch.Tensor` along dimension `dim` without `nan`s. Args: tensor: Torch tensor to be normalized. dim: Integer. Dimension to normalize across. Returns: Normalized tensor with zeros instead of nan's or infinity values. """ return torch.nan_to_num( torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)) ) def _rbf(D, D_min=0.0, D_max=20.0, D_count=16, device="cpu"): """ From https://github.com/jingraham/neurips19-graph-protein-design Returns an RBF embedding of `torch.Tensor` `D` along a new axis=-1. That is, if `D` has shape [...dims], then the returned tensor will have shape [...dims, D_count]. Args: D: generic torch tensor D_min: Float. Minimum of the sequence of numbers created. D_max: Float. Max of the sequence of numbers created. D_count: Positive integer. Count of the numbers in the sequence. It is also lenght of the new dimension (-1) created in D. device: Device where D is stored. Return: Input `D` matrix with an RBF embedding along axis -1. """ D_mu = torch.linspace(D_min, D_max, D_count, device=device) D_mu = D_mu.view([1, -1]) D_sigma = (D_max - D_min) / D_count D_expand = torch.unsqueeze(D, -1) RBF = torch.exp(-(((D_expand - D_mu) / D_sigma) ** 2)) return RBF class BaseProteinGraphDataset(data.Dataset): """Dataset for the Base Protein Graph.""" def __init__( self, data_list, num_positional_embeddings=16, top_k=30, num_rbf=16, device="cpu", preprocess=True, ): """ Initializes the dataset Args: data_list: List containint the initial dataset num_positional_embeddings: Integer specifying the number of positional embeddings. top_k: Integer k to use in kNN when constructing the graph num_rbf: Integer specifying number of radial basis functions device: Device to allocate the tensors. preprocess: Whether to preprocess the data_list. Returns: None """ super(BaseProteinGraphDataset, self).__init__() self.data_list = data_list self.top_k = top_k self.num_rbf = num_rbf self.num_positional_embeddings = num_positional_embeddings self.device = device if preprocess: print("Preprocessing data...") self._preprocess() def _preprocess(self): """Preprocess all the records in `data_list` with `_featurize_as_graph`""" for i in tqdm.tqdm(range(len(self.data_list))): self.data_list[i] = self._featurize_as_graph(self.data_list[i]) def _featurize_as_graph(self, protein): """Placeholder for the _featurize_as_graph method implemented in child classes. Args: protein: a dict representing a data object Returns: None """ raise NotImplementedError def _dihedrals(self, X, eps=1e-7): """Compute sines and cosines dihedral angles (phi, psi, and omega) Args: X: torch.Tensor specifying coordinates of key atoms (N, CA, C, O) in 3D space with shape [seq_len, 4, 3] eps: Float defining the epsilon using to clamp the angle between normals: min= -1*eps, max=1-eps Returns: Sines and cosines dihedral angles as a torch.Tensor of shape [seq_len, 6] """ # From https://github.com/jingraham/neurips19-graph-protein-design X = torch.reshape(X[:, :3], [3 * X.shape[0], 3]) dX = X[1:] - X[:-1] U = _normalize(dX, dim=-1) u_2 = U[:-2] u_1 = U[1:-1] u_0 = U[2:] # Backbone normals n_2 = _normalize(torch.cross(u_2, u_1), dim=-1) n_1 = _normalize(torch.cross(u_1, u_0), dim=-1) # Angle between normals cosD = torch.sum(n_2 * n_1, -1) cosD = torch.clamp(cosD, -1 + eps, 1 - eps) D = torch.sign(torch.sum(u_2 * n_1, -1)) * torch.acos(cosD) # This scheme will remove phi[0], psi[-1], omega[-1] D = F.pad(D, [1, 2]) D = torch.reshape(D, [-1, 3]) # Lift angle representations to the circle D_features = torch.cat([torch.cos(D), torch.sin(D)], 1) return D_features def _positional_embeddings(self, edge_index, num_embeddings=None): """Creates and returns the positional embeddings. Args: edge_index: torch.Tensor representing edges in COO format with shape [2, num_edges]. num_embeddings: Integer representing the number of embeddings. Returns: Positional embeddings as a torch tensor """ # From https://github.com/jingraham/neurips19-graph-protein-design num_embeddings = num_embeddings or self.num_positional_embeddings d = edge_index[0] - edge_index[1] frequency = torch.exp( torch.arange( 0, num_embeddings, 2, dtype=torch.float32, device=self.device ) * -(np.log(10000.0) / num_embeddings) ) angles = d.unsqueeze(-1) * frequency E = torch.cat((torch.cos(angles), torch.sin(angles)), -1) return E def _orientations(self, X): """Compute orientations between pairs of atoms from neighboring residues. Args: X: torch.Tensor representing atom coordinates with shape [n_atoms, 3] Returns: torch.Tensor atom pair orientations """ forward = _normalize(X[1:] - X[:-1]) backward = _normalize(X[:-1] - X[1:]) forward = F.pad(forward, [0, 0, 0, 1]) backward = F.pad(backward, [0, 0, 1, 0]) return torch.cat([forward.unsqueeze(-2), backward.unsqueeze(-2)], -2) def _sidechains(self, X): """Compute the unit vector representing the imputed side chain directions (C_beta - C_alpha). Args: X: torch.Tensor specifying coordinates of key atoms (N, CA, C, O) in 3D space with shape [seq_len, 4, 3] Returns: Torch tensor representing side chain directions with shape [seq_len, 3] """ n, origin, c = X[:, 0], X[:, 1], X[:, 2] c, n = _normalize(c - origin), _normalize(n - origin) bisector = _normalize(c + n) perp = _normalize(torch.cross(c, n)) vec = -bisector * math.sqrt(1 / 3) - perp * math.sqrt(2 / 3) return vec class StandardProteinGraphDataset(BaseProteinGraphDataset): """ Take care of encoding non-standard AA (represented as "X") in self.letter_to_num """ def __init__(self, data_list, **kwargs): """ Initializes the dataset Args: data_list: List containint the initial data Returns: None """ self.letter_to_num = { "C": 4, "D": 3, "S": 15, "Q": 5, "K": 11, "I": 9, "P": 14, "T": 16, "F": 13, "A": 0, "G": 7, "H": 8, "E": 6, "L": 10, "R": 1, "W": 17, "V": 19, "N": 2, "Y": 18, "M": 12, "X": 0, } self.num_to_letter = {v: k for k, v in self.letter_to_num.items()} super(StandardProteinGraphDataset, self).__init__(data_list, **kwargs) def _featurize_as_graph(self, protein): """Featurizes the protein information as a graph for the GNN Args: protein: Dictionary with the protein seq, coord and name. Returns: Torch geometric data instance representing with the protein information """ name = protein["name"] with torch.no_grad(): coords = torch.as_tensor( protein["coords"], device=self.device, dtype=torch.float32 ) seq = torch.as_tensor( [self.letter_to_num[a] for a in protein["seq"]], device=self.device, dtype=torch.long, ) mask = torch.isfinite(coords.sum(dim=(1, 2))) coords[~mask] = np.inf X_ca = coords[:, 1] edge_index = torch_cluster.knn_graph(X_ca, k=self.top_k) pos_embeddings = self._positional_embeddings(edge_index) E_vectors = X_ca[edge_index[0]] - X_ca[edge_index[1]] rbf = _rbf( E_vectors.norm(dim=-1), D_count=self.num_rbf, device=self.device, ) dihedrals = self._dihedrals(coords) orientations = self._orientations(X_ca) sidechains = self._sidechains(coords) node_s = dihedrals node_v = torch.cat( [orientations, sidechains.unsqueeze(-2)], dim=-2 ) edge_s = torch.cat([rbf, pos_embeddings], dim=-1) edge_v = _normalize(E_vectors).unsqueeze(-2) node_s, node_v, edge_s, edge_v = map( torch.nan_to_num, (node_s, node_v, edge_s, edge_v) ) return torch_geometric.data.Data( x=X_ca, seq=seq, name=name, node_s=node_s, node_v=node_v, edge_s=edge_s, edge_v=edge_v, edge_index=edge_index, mask=mask, ) class ProteinGraphDataset(BaseProteinGraphDataset): """ A map-syle `torch.utils.data.Dataset` which transforms JSON/dictionary -style protein structures into featurized protein graphs as described in the GVP manuscript (https://arxiv.org/abs/2009.01411). Modified for ProtBERT feature extractor. Returned graphs are of type `torch_geometric.data.Data` with attributes -x alpha carbon coordinates, shape [n_nodes, 3] -name name of the protein structure, string -node_s node scalar features, shape [n_nodes, 6] -node_v node vector features, shape [n_nodes, 3, 3] -edge_s edge scalar features, shape [n_edges, 32] -edge_v edge scalar features, shape [n_edges, 1, 3] -edge_index edge indices, shape [2, n_edges] -mask node mask, `False` for nodes with missing data that are excluded from message passing -input_ids amino acid id encoding from ProtBERT tokenizer -attention_mask attention_mask from ProtBERT tokenizer Modified from https://github.com/drorlab/gvp-pytorch/blob/main/gvp/data.py. """ def __init__(self, data_list, **kwargs): """ Initializes the dataset Args: data_list: List containint the initial data Returns: None """ super(ProteinGraphDataset, self).__init__(data_list, **kwargs) def _featurize_as_graph(self, protein): """Featurizes the protein information as a graph for the GNN Args: protein: Dictionary with the protein seq, coord and name. Returns: Torch geometric data instance representing with the protein information """ name = protein["name"] input_ids = protein["input_ids"] attention_mask = protein["attention_mask"] with torch.no_grad(): coords = torch.as_tensor( protein["coords"], device=self.device, dtype=torch.float32 ) mask = torch.isfinite(coords.sum(dim=(1, 2))) coords[~mask] = np.inf X_ca = coords[:, 1] edge_index = torch_cluster.knn_graph(X_ca, k=self.top_k) pos_embeddings = self._positional_embeddings(edge_index) E_vectors = X_ca[edge_index[0]] - X_ca[edge_index[1]] rbf = _rbf( E_vectors.norm(dim=-1), D_count=self.num_rbf, device=self.device, ) dihedrals = self._dihedrals(coords) orientations = self._orientations(X_ca) sidechains = self._sidechains(coords) node_s = dihedrals node_v = torch.cat( [orientations, sidechains.unsqueeze(-2)], dim=-2 ) edge_s = torch.cat([rbf, pos_embeddings], dim=-1) edge_v = _normalize(E_vectors).unsqueeze(-2) node_s, node_v, edge_s, edge_v = map( torch.nan_to_num, (node_s, node_v, edge_s, edge_v) ) data = torch_geometric.data.Data( x=X_ca, input_ids=input_ids, attention_mask=attention_mask, name=name, node_s=node_s, node_v=node_v, edge_s=edge_s, edge_v=edge_v, edge_index=edge_index, mask=mask, ) return data # dataset classes with targets: class SequenceDatasetWithTarget(data.Dataset): """Intended for all sequence-only models.""" def __init__(self, sequences, labels, tokenizer=None, preprocess=True): """Initializes the dataset Args: sequences: list of strings labels: tensor of labels [n_samples, n_labels] tokenizer: BertTokenizer preprocess: Bool. Wheather or not to process the sequences. Return: None """ self.sequences = sequences self.labels = labels self.tokenizer = tokenizer if preprocess: self._preprocess() def _preprocess(self): """Preprocess sequences to input_ids and attention_mask Args: Return: None """ print("Preprocessing seqeuence data...") self.sequences = [prep_seq(seq) for seq in self.sequences] encodings = self.tokenizer( self.sequences, return_tensors="pt", padding=True ) self.encodings = { key: val for key, val in encodings.items() if key in ("input_ids", "attention_mask") } def __getitem__(self, idx): """Retrieve protein information by index. Args: idx: Integer representing the position of the protein. Return: Dictionary with `input_ids`, `attention_mask` and `labels` """ return { "input_ids": self.encodings["input_ids"][idx], "attention_mask": self.encodings["attention_mask"][idx], "labels": self.labels[idx], } def __len__(self): """Lenght of the dataset. Args: Return: Integer representing the length of the dataset. """ return len(self.sequences) class ProteinGraphDatasetWithTarget(StandardProteinGraphDataset): """Thin wrapper for ProteinGraphDataset to include targets. Intended for all (structure-only) GNN models.""" def _preprocess(self): """Preprocess all the records in `data_list` with `_featurize_as_graph`""" for i in tqdm.tqdm(range(len(self.data_list))): self.data_list[i] = ( self._featurize_as_graph(self.data_list[i]), self.data_list[i]["target"], ) class BertProteinGraphDatasetWithTarget(ProteinGraphDataset): """Thin wrapper for ProteinGraphDataset to include targets. Intended for all BERT+GNN models""" def __init__(self, data_list, **kwargs): """Initializes the dataset Args: data_list: a list of data records (dicts with `input_ids`, `attention_mask`, `target`) Return: None """ super(BertProteinGraphDatasetWithTarget, self).__init__( data_list, **kwargs ) def __getitem__(self, idx): """Retrieve protein information by index. Args: idx: Integer representing the position of the protein. Return: None """ if not isinstance(self.data_list[idx], tuple): self.data_list[idx] = ( self._featurize_as_graph(self.data_list[idx]), self.data_list[idx]["target"], ) return self.data_list[idx] def _preprocess(self): """Preprocess all the records in `data_list` with `_featurize_as_graph`. Directly modifies self.data_list Args: Returns: None """ for i in tqdm.tqdm(range(len(self.data_list))): self.data_list[i] = ( self._featurize_as_graph(self.data_list[i]), self.data_list[i]["target"], )
[ 2, 15069, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 13, 1439, 6923, 33876, 13, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 17168, 12, 15, 198, 198, 37811, 198, 20519, 13165, 354, 3975, 3918, 27039, 6097, 329, 15568, 357, ...
2.073949
8,276
# -*- coding: utf-8 -*- import time import requests from dateutil.parser import parse from linebot import LineBotApi from linebot.models import TextSendMessage from linebot.exceptions import LineBotApiError # 爬蟲參考 # http://bhan0507.logdown.com/posts/1291544-python-facebook-api # 在Facebook Graph API Exploer取得token(自己新建應用程式),最長只能到2個月 # 參考 # http://blog.csdn.net/mochong/article/details/60872512 fb_token = '' # developers.line.me/console/取得Channel access token # 參考 # https://medium.com/@lukehong/%E5%88%9D%E6%AC%A1%E5%98%97%E8%A9%A6-line-bot-sdk-eaa4abbe8d6e line_bot_api = LineBotApi('') # 在Facebook Graph API Exploer取得粉絲專頁的id與名稱,並將其包成字典dic fanspage_dic = {'169635866411766':'原價屋coolpc'} user_dic = { 'user1': ''} # 測試用 # keyword_dic={0:'少女'} keyword_dic={1:'限量搶購',2:'顯示卡',3:'不貼保固貼紙'} # log記錄腳本執行的時間,改要儲存的路徑 f=open('script_log.txt','a+') # 舊文章與新文章 old_str="" new_str="" # log function,時間+狀態 # 腳本可一直執行,直到主動中斷為止(ctrl+C) while(True): try: # 營業時間才爬 timer=int(time.strftime("%H", time.localtime())) if(timer>=10 and timer<=22): # 使用for迴圈依序讀取粉絲頁的資訊,可以一次爬多個粉絲頁 for fanspage in fanspage_dic: # print出res會以json形式回傳,最外層為data # format把id、token和爬取的貼文數傳入{}裡,因為只確認是否有新貼文 基本上只爬1篇 res = requests.get('https://graph.facebook.com/v2.12/{}/posts?limit={}&access_token={}'.format(fanspage, 1, fb_token)) for information in res.json()['data']: if 'message' in information: # 判斷PO文日期是否為當天日期,若不判斷可能會爬到昨日的貼文 if(str(time.strftime("%Y-%m-%d", time.localtime()))==str(parse(information['created_time']).date())): # 原價屋,PO文內容,PO文時間 # 抓取下來的第一篇文章存入字串,下次重新比對,相同則代表沒有新的PO文 new_str=information['message'] if(old_str!=new_str): # 字串檢查,是否包含指定字串,沒有返回-1 # -----測試----- # if(str.find(information['message'],keyword_dic[0])!=-1): # line_bot_api.multicast([user_dic['user1']],TextSendMessage(text="出現 {} 字串".format(keyword_dic[0]))) # log(" 出現 {} 字串".format(keyword_dic[0])) # -----測試----- # 判斷多個關鍵字 for key in keyword_dic: if(str.find(information['message'],keyword_dic[key])!=-1): # 不使用line_bot_api.multicast,比較麻煩,改用迴圈傳給單一使用者 for user in user_dic: line_bot_api.push_message(user_dic[user],TextSendMessage(text="出現 {} 字串".format(keyword_dic[key]))) log(" 出現 {} 字串".format(keyword_dic[key])) old_str=new_str # print("Yes") else: pass # print("No") log(" run") # 建議1~2分鐘執行一次 time.sleep(120) else: log(" sleep time") time.sleep(600) except Exception as e: log(" "+str(e))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 640, 198, 11748, 7007, 198, 6738, 3128, 22602, 13, 48610, 1330, 21136, 198, 6738, 1627, 13645, 1330, 6910, 20630, 32, 14415, 198, 6738, 1627, 13645, 13, 27530, 1330...
1.30517
2,592