content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
TOKEN = "1096828547:AAHD7G8ZMTQ3FsU_4cQj6HQG-rWVMsrzUrg"
DB_USER = 'user'
DB_PASSWORD = 'HFtkS5n0iB7yfvnr'
DB_NAME = 'bot' | [
10468,
43959,
796,
366,
14454,
3104,
26279,
2857,
25,
3838,
10227,
22,
38,
23,
57,
13752,
48,
18,
42388,
52,
62,
19,
66,
48,
73,
21,
41275,
38,
12,
81,
54,
53,
10128,
81,
89,
16692,
70,
1,
198,
11012,
62,
29904,
796,
705,
7220,
... | 1.605263 | 76 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import time
from conary import dbstore
from conary import deps, errors, files, streams, trove, versions
from conary.dbstore import idtable, sqlerrors
from conary.local import deptable, troveinfo, versiontable, schema
from conary.lib import api
from conary.trovetup import TroveTuple
OldDatabaseSchema = schema.OldDatabaseSchema
class DBTroveFiles:
"""
pathId, versionId, path, instanceId, stream
"""
addItemStmt = "INSERT INTO DBTroveFiles (pathId, versionId, path, " \
"fileId, instanceId, isPresent, " \
"stream) " \
"VALUES (?, ?, ?, ?, ?, ?, ?)"
class DBInstanceTable:
"""
Generic table for assigning id's to (name, version, isnSet, use)
tuples, along with a isPresent flag
"""
| [
2,
198,
2,
15069,
357,
66,
8,
35516,
5136,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.716418 | 536 |
#!/usr/bin/python
'''
Example 4: Introducing Condition Groups
Note: Creating a rule to hunt for documents
rule doc_exe_with_macros
{
strings:
$EXE0 = "TVqQAAMAAAAEAAAA" wide ascii nocase
$EXE1 = "4d5a90000300000004000000ffff0000" wide ascii nocase
$EXE2 = "This program cannot be run in DOS mode" wide ascii nocase
$SOCIAL_ENGINEER0 = "enable macro" wide ascii nocase
$SOCIAL_ENGINEER1 = "please" wide ascii nocase
$SOCIAL_ENGINEER2 = "kindly" wide ascii nocase
$DIRECTORY_ENTRY0 = "WordDocument" wide
$DIRECTORY_ENTRY1 = "SummaryInformation" wide
$DIRECTORY_ENTRY2 = "CompObj" wide
condition:
(uint32(0x00) == 0xe011cfd0 or 2 of ($DIRECTORY_ENTRY*)) and
(any of ($SOCIAL_ENGINEER*) and (any of ($EXE*)))
}
'''
from __future__ import print_function
import yara_tools
import yara
import base64
import os
import sys
import binascii
#::Using calc.exe (MD5: b6b9aca1ac1e3d17877801e1ddcb856e as input)
EXE=bytearray(open(sys.argv[1], 'rb').read())
BASE64_EXE=base64.b64encode(EXE)
suspicious_doc_strings = ['_VBA_PROJECT', '_xmlsignatures', 'Macros']
common_directory_entries = ['WordDocument','SummaryInformation','CompObj']
suspicious_exe_strings = [BASE64_EXE[:16],binascii.hexlify(EXE[:16]),'This program cannot be run in DOS mode']
#::Create our rule
rule=yara_tools.create_rule(name="doc_exe_with_macros")
rule.set_default_boolean(value="and")
#::Condition Group 1 - Things that tell us this is a doc
rule.create_condition_group(name="is_doc",default_boolean="or")
rule.add_condition(condition="uint32(0x00) == 0xe011cfd0",condition_group="is_doc")
#::Loop through directory entries and add to group
for entry in common_directory_entries:
rule.add_strings(strings=entry,
modifiers='wide',
identifier="DIRECTORY_ENTRY",
condition="2 of ($IDENTIFIER*)",
condition_group="is_doc")
#::Condition Group 2 - Checking for suspicious strings
rule.create_condition_group(name="doc_iocs",default_boolean='and')
rule.add_strings(strings=['enable macro','please','kindly'],
modifiers=['wide','ascii','nocase'],
identifier="SOCIAL_ENGINEER",
condition="any of ($IDENTIFIER*)",
condition_group="doc_iocs"
)
#::Condition Group 3 - Nested under Condition Group 2, checking for executable strings
for exe_str in suspicious_exe_strings:
rule.add_strings(strings=exe_str,
modifiers=['wide','ascii','nocase'],
condition="any of ($IDENTIFIER*)",
identifier="EXE",
condition_group="exe_iocs",
default_boolean="or",
parent_group="doc_iocs")
generated_rule = rule.build_rule(condition_groups=True)
try:
compiled_rule = yara.compile(source=generated_rule)
print(generated_rule)
print("SUCCESS: IT WORKED!")
except Exception as e:
print("Failed... oh noes! %s" % e)
print(generated_rule)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
7061,
6,
198,
16281,
604,
25,
11036,
2259,
24295,
27441,
198,
6425,
25,
30481,
257,
3896,
284,
12601,
329,
4963,
198,
198,
25135,
2205,
62,
13499,
62,
4480,
62,
20285,
4951,
198,
90,
6... | 2.56602 | 1,083 |
import json
import operator
import os
import shutil
from dataclasses import asdict
from dataclasses import dataclass
from dataclasses import field
from typing import Any
from typing import Iterator
from typing import Dict
from homecomp import errors
from homecomp.models import HousingDetail
from homecomp.models import PurchaserProfile
@dataclass
| [
11748,
33918,
198,
11748,
10088,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
4818,
330,
28958,
1330,
355,
11600,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4818,
330,
28958,
1330,
2214,
198,
6738,
19720,
1330... | 4.045977 | 87 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# $File: main.py
# $Date: Fri Jan 03 22:01:46 2014 +0800
# $Author: Xinyu Zhou <zxytim[at]gmail[dot]com>
from dataextractor import DataExtractor as DE
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
if __name__ == '__main__':
main()
# vim: foldmethod=marker
| [
2,
48443,
14629,
14,
8800,
14,
29412,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
720,
8979,
25,
1388,
13,
9078,
198,
2,
720,
10430,
25,
19480,
2365,
7643,
2534,
25,
486,
25,
3510,
1946,
1343,
2919... | 2.517007 | 147 |
from ib_insync import *
from util import order_util
"""
A base model containing common IB functions.
For other models to extend and use.
"""
| [
6738,
24283,
62,
1040,
13361,
1330,
1635,
198,
6738,
7736,
1330,
1502,
62,
22602,
198,
198,
37811,
198,
32,
2779,
2746,
7268,
2219,
34782,
5499,
13,
220,
198,
198,
1890,
584,
4981,
284,
9117,
290,
779,
13,
198,
37811,
628
] | 3.625 | 40 |
from rlxp.envs import SquareWorld
from rlxp.rendering import render_env2d
env = SquareWorld()
env.enable_rendering()
for tt in range(10):
env.step(env.action_space.sample())
render_env2d(env)
| [
6738,
374,
75,
42372,
13,
268,
14259,
1330,
9276,
10603,
220,
198,
6738,
374,
75,
42372,
13,
13287,
278,
1330,
8543,
62,
24330,
17,
67,
198,
198,
24330,
796,
9276,
10603,
3419,
198,
24330,
13,
21633,
62,
13287,
278,
3419,
198,
1640,
... | 2.618421 | 76 |
import pyvisa
import sys
resurse = sys.argv[1]
cmd = sys.argv[2]
rm = pyvisa.ResourceManager()
#'USB0::0xF4ED::0xEE3A::NDG10GAQ3R0226::INSTR'
inst = rm.open_resource(resurse)
try:
inst.query(cmd)
except:
pass
#"*IDN?"
| [
11748,
12972,
4703,
64,
198,
11748,
25064,
198,
411,
12321,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
28758,
796,
25064,
13,
853,
85,
58,
17,
60,
198,
26224,
796,
12972,
4703,
64,
13,
26198,
13511,
3419,
198,
2,
6,
27155,
15,
3712... | 2.054545 | 110 |
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from apps.oauto.contents import OPENID_TOKEN_EXPIRES_TIME
from meiduo_mall02 import settings
import logging
logger = logging.getLogger('django')
# 13-itsdangerous的使用
#解密
#定义解密函数
| [
6738,
663,
38537,
516,
1330,
5045,
276,
40386,
13908,
11712,
1300,
32634,
7509,
355,
23283,
7509,
198,
198,
6738,
6725,
13,
78,
23736,
13,
3642,
658,
1330,
38303,
2389,
62,
10468,
43959,
62,
49864,
4663,
1546,
62,
34694,
198,
6738,
502,... | 2.490196 | 102 |
# Generated by Django 2.2.6 on 2019-10-21 09:08
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
940,
12,
2481,
7769,
25,
2919,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.886792 | 53 |
import os
import requests
import random
import time
banner()
print("""
[1]- Joiner [3]- Leaver
[2]- Spammer [4]- Checker
""")
while True:
all_proxies = requests.get('https://api.proxyscrape.com/?request=getproxies&proxytype=http&timeout=1000&country=all&ssl=all&anonymity=all').text
x = all_proxies.split()
b = random.choice(x)
sor = int(input("1 / 2 / 3 / 4:"))
if sor == 1:
invite = input("Lütfen sunucunu adresini giriniz :")
with open("token.txt", "r") as f:
for line in f:
header = {
'authorization': line.strip("\n"),
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Brave Chrome/79.0.3945.117 Safari/537.36'}
deneme = {'http':'http://'+b}
r = requests.post("https://discord.com/api/v8/invites/" + invite, headers=header, proxies=deneme)
if r.status_code == 200:
print(str(r) + "Başarılı")
elif sor == 2:
idd = input("Lütfen kanal idsini giriniz :")
message = input("Lütfen mesajınızı giriniz :")
while True:
with open("token.txt", "r") as f:
for line in f:
header = {
'authorization': line.strip("\n"),
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Brave Chrome/79.0.3945.117 Safari/537.36'}
deneme = {'http': 'http://' + b}
r = requests.post("https://discordapp.com/api/v6/channels/" + idd + "/messages", json={'content': message},headers=header, proxies=deneme)
print(r)
elif sor == 3:
leave = input("Lütfen sunucunu idisini giriniz :")
with open("token.txt", "r") as f:
for line in f:
header = {
'Authorization': line.strip("\n"),
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Brave Chrome/79.0.3945.117 Safari/537.36'}
deneme = {'http': 'http://' + b}
r = requests.delete("https://canary.discordapp.com/api/v6/users/@me/guilds/" + leave, headers=header,proxies=deneme)
print(r)
elif sor == 4:
invite = input("Lütfen sunucunu adresini giriniz :")
with open("token.txt", "r") as f:
for line in f:
header = {
'authorization': line.strip("\n"),
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Brave Chrome/79.0.3945.117 Safari/537.36'}
deneme = {'http':'http://'+b}
r = requests.post("https://discord.com/api/v8/invites/" + invite, headers=header, proxies=deneme)
if r.status_code == 200:
print(line)
else:
print("Geçersiz cevap.")
| [
11748,
28686,
198,
11748,
7007,
198,
11748,
4738,
198,
11748,
640,
198,
220,
220,
220,
220,
198,
3820,
1008,
3419,
628,
198,
4798,
7203,
15931,
198,
58,
16,
45297,
5302,
7274,
220,
220,
220,
220,
685,
18,
45297,
1004,
8770,
198,
198,
... | 1.931143 | 1,583 |
from caffe2.python import brew, optimizer, core, model_helper, workspace
from os.path import join
from os import makedirs
import datetime
import numpy as np
import math
from caffe2.python.predictor import mobile_exporter, predictor_exporter as pe
from caffe2.proto import caffe2_pb2 as c2p2
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags | [
6738,
21121,
17,
13,
29412,
1330,
9059,
11,
6436,
7509,
11,
4755,
11,
2746,
62,
2978,
525,
11,
44573,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
28686,
1330,
285,
4335,
17062,
198,
11748,
4818,
8079,
198,
11748,
299,
32152,
35... | 3.444444 | 117 |
import barcode
from barcode.writer import ImageWriter
from random import randint
import os
from PIL import Image, ImageFont ,ImageDraw
import pickle
#RANDOM NUMBERS GENERATOR
random_number_digits = lambda x : randint(10**(x-1),(10**x)-1)
#random number file
rand_list = sorted([random_number_digits(13) for x in range(5000)])
data = {x:[0,"%04d"%y] for x,y in zip(rand_list,range(1,5001))}
#print(i for i in data.items())
if not os.path.exists('obj'):
os.makedirs('obj')
save_obj(data,"data")
w=load_obj('data')
for key,val in w.items():
print(key,val)
rfile = open("rfile.txt","w")
main_dir = os.getcwd()
#directory for barcode images
barcode_images = os.path.join(main_dir,'barcode_images')
if not os.path.exists(barcode_images):
os.makedirs(barcode_images)
#directory for pass images
pass_images = os.path.join(main_dir,'pass_images')
if not os.path.exists(pass_images):
os.makedirs(pass_images)
#BARCODE WRITER OPTIONS AND BARCODE CLASS
options = dict(module_width=0.4,font_size=10,module_height=9,text_distance=2)
EAN = barcode.get_barcode_class('ean13')
#Change dir
font = ImageFont.truetype("DejaVuSans-Bold.ttf", 18,encoding="unic")
pass_design = os.getcwd()+'/TYFPASSES.jpg'
for i in range(5000):
os.chdir(main_dir)
barcode_number=str(rand_list[i])
#barcode_number="000000000"+"%4d"%i
print(barcode_number)
print(type(barcode_number))
rfile.write(barcode_number+"\n")
ean = EAN(barcode_number,writer=ImageWriter())
#saving barcode images in barcode_images directory
os.chdir(barcode_images)
barcode_img = 'ean13_barcode_%s'%(i+1)
print("[*] Barcode {} saved in {} ".format(i+1,"barcode_images"))
barcode = ean.save(barcode_img,options)
img_bcode = Image.open(barcode_img+'.png', 'r')
img_w, img_h = img_bcode.size
#print(img_w,img_h)
#background = Image.new('RGBA', (1440, 900), (255, 255, 255, 255))
angle=90
rot = img_bcode.rotate( angle, expand=1 )
youthfest_pass = Image.open(pass_design,'r')
bg_w, bg_h = youthfest_pass.size
#print("youthfest pass size ",youthfest_pass.size)
offset = ((bg_w - (img_h + 53)), (bg_h - img_w)//2)
#offset1 = (int(str(y) for y in offset:)
#offset1 = [int(y) for y in offset]
#print(offset)
youthfest_pass.paste(rot, offset)
serial_num = "%04d"%(i+1)
draw = ImageDraw.Draw(youthfest_pass)
txt = draw.text((60, 0),serial_num,(255,255,255),font=font)
#saving pass in pass images directory
os.chdir(pass_images)
youthfest_pass.save('pass%s.png'%(i+1))
print("[*] Pass {} saved in {} ".format(i+1,"pass_images"))
#closing all the pillow images
img_bcode.close()
youthfest_pass.close()
print("All numbers are written at rfile.txt")
#options = dict(compress=True)
| [
11748,
2318,
8189,
198,
6738,
2318,
8189,
13,
16002,
1330,
7412,
34379,
198,
6738,
4738,
1330,
43720,
600,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
23252,
837,
5159,
25302,
198,
11748,
2298,
293,
198,
198,
2,
49,
... | 2.415468 | 1,112 |
# ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / info@conceptive.be
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
from ...core.qt import QtCore, QtGui, QtWidgets, Qt
from camelot.view.art import Pixmap
| [
2,
220,
38093,
2559,
18604,
198,
2,
198,
2,
220,
15069,
357,
34,
8,
4343,
12,
5304,
1482,
25867,
14044,
275,
85,
7012,
13,
198,
2,
220,
7324,
13,
1102,
25867,
13,
1350,
1220,
7508,
31,
1102,
25867,
13,
1350,
198,
2,
198,
2,
220,... | 3.493458 | 535 |
import codecs
import json
from keras.preprocessing import sequence
from keras_bert import Tokenizer, load_trained_model_from_checkpoint
from keras.models import load_model
from flask import request, Flask, jsonify
import tensorflow as tf
app = Flask(__name__)
global_()
@app.route("/sentiment_analysis_api", methods=['POST'])
if __name__ == "__main__":
# add hot fresh
app.run() | [
11748,
40481,
82,
198,
11748,
33918,
198,
6738,
41927,
292,
13,
3866,
36948,
1330,
8379,
198,
6738,
41927,
292,
62,
4835,
1330,
29130,
7509,
11,
3440,
62,
35311,
62,
19849,
62,
6738,
62,
9122,
4122,
198,
6738,
41927,
292,
13,
27530,
1... | 3.201681 | 119 |
#
# PySNMP MIB module BLADETYPE2-ACL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BLADETYPE2-ACL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:39:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
hpSwitchBladeType2_Mgmt, = mibBuilder.importSymbols("HP-SWITCH-PL-MIB", "hpSwitchBladeType2-Mgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, Counter32, Unsigned32, Gauge32, IpAddress, MibIdentifier, iso, TimeTicks, Integer32, Bits, NotificationType, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "Counter32", "Unsigned32", "Gauge32", "IpAddress", "MibIdentifier", "iso", "TimeTicks", "Integer32", "Bits", "NotificationType", "ModuleIdentity")
DisplayString, TextualConvention, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "MacAddress")
acl = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9))
if mibBuilder.loadTexts: acl.setLastUpdated('200510120000Z')
if mibBuilder.loadTexts: acl.setOrganization('Hewlett Packard Company')
if mibBuilder.loadTexts: acl.setContactInfo('customerservice@hp.com')
if mibBuilder.loadTexts: acl.setDescription('The MIB module for the Access Control List configuration and statistics.')
acConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1))
acList = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1))
aclBlock = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2))
aclGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3))
aclCurCfgTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1), )
if mibBuilder.loadTexts: aclCurCfgTable.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgTable.setDescription('The table of current ACL configuration.')
aclCurCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1), ).setIndexNames((0, "BLADETYPE2-ACL-MIB", "aclCurCfgIndex"))
if mibBuilder.loadTexts: aclCurCfgEntry.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgEntry.setDescription('Current information about a particular ACL configuration entry.')
aclCurCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aclCurCfgIndex.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgIndex.setDescription('The index associated with this ACL entry.')
aclCurCfgBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgBlock.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgBlock.setDescription('The index of the ACL block to which this ACL entry is a member of. A value of zero means the ACL is not a member of any block.')
aclCurCfgGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgGroup.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgGroup.setDescription('The index of the ACL group to which this ACL entry is a member of. A value of zero means the ACL is not a member of any group.')
aclCurCfgFilterAction = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("none", 0), ("permit", 1), ("deny", 2), ("setcos", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgFilterAction.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgFilterAction.setDescription('The action to be performed on a packet that matches the filter settings of this ACL entry.')
aclCurCfgFilterActionSetCOS = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("none", 0), ("cos0", 1), ("cos1", 2), ("cos2", 3), ("cos3", 4), ("cos4", 5), ("cos5", 6), ("cos6", 7), ("cos7", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgFilterActionSetCOS.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgFilterActionSetCOS.setDescription('The value to be used when the action to be performed is setCOS for this ACL entry.')
aclCurCfgEthFmt = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("ethernet2", 1), ("snap", 2), ("llc", 3), ("ieee802dot3", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgEthFmt.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgEthFmt.setDescription('The packet ethernet format to be filtered.')
aclCurCfgTagFmt = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("untagged", 1), ("tagged", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgTagFmt.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgTagFmt.setDescription('The packet tag format to be filtered.')
aclCurCfgSrcMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 9), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgSrcMACAddress.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgSrcMACAddress.setDescription('The source MAC address to be filtered.')
aclCurCfgSrcMACMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 10), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgSrcMACMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgSrcMACMask.setDescription('The address mask applied to aclCurCfgSrcMACAddress for filtering.')
aclCurCfgDstMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 11), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgDstMACAddress.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgDstMACAddress.setDescription('The destination MAC address to be filtered.')
aclCurCfgDstMACMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 12), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgDstMACMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgDstMACMask.setDescription('The address mask applied to aclCurCfgDstMACAddress for filtering.')
aclCurCfgEthernetTypeName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 0), ("arp", 1), ("ipv4", 2), ("ipv6", 3), ("mpls", 4), ("rarp", 5), ("any", 6), ("other", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgEthernetTypeName.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgEthernetTypeName.setDescription('The Ethernet type to be filtered. If the value of this object is other(7), the value of aclNewCfgEthernetTypeValue indicates the ethernet type that will be filtered.')
aclCurCfgEthernetTypeValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgEthernetTypeValue.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgEthernetTypeValue.setDescription('The Ethernet type value to be filtered. The value of this object is equivalent to the value of aclNewCfgEthernetTypeName except when the value of aclNewCfgEthernetTypeName is other(7), which can be any user-defined value for this object.')
aclCurCfgVLanId = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgVLanId.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgVLanId.setDescription('The virtual LAN identifier to be filtered.')
aclCurCfgVLanMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgVLanMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgVLanMask.setDescription('The mask applied to aclCurCfgVLanId for filtering.')
aclCurCfg8021pPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("none", 0), ("priority0", 1), ("priority1", 2), ("priority2", 3), ("priority3", 4), ("priority4", 5), ("priority5", 6), ("priority6", 7), ("priority7", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfg8021pPriority.setStatus('current')
if mibBuilder.loadTexts: aclCurCfg8021pPriority.setDescription('The 802.1p priority to be filtered.')
aclCurCfgTypeOfService = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgTypeOfService.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgTypeOfService.setDescription('The type of service to be filtered.')
aclCurCfgProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgProtocol.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgProtocol.setDescription('The protocol to be filtered.')
aclCurCfgSrcIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 20), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgSrcIPAddress.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgSrcIPAddress.setDescription('The source IP address to be filtered.')
aclCurCfgSrcIPMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 21), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgSrcIPMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgSrcIPMask.setDescription('The address mask applied to aclCurCfgSrcIPAddress for filtering.')
aclCurCfgDstIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 22), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgDstIPAddress.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgDstIPAddress.setDescription('The destination IP address to be filtered.')
aclCurCfgDstIPMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 23), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgDstIPMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgDstIPMask.setDescription('The address mask applied to aclCurCfgDstIPAddress for filtering.')
aclCurCfgSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgSrcPort.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgSrcPort.setDescription('The source TCP/UDP port number to be filtered.')
aclCurCfgSrcPortMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgSrcPortMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgSrcPortMask.setDescription('The mask applied to aclCurCfgSrcPort for filtering.')
aclCurCfgDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgDstPort.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgDstPort.setDescription('The destination TCP/UDP port number to be filtered.')
aclCurCfgDstPortMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgDstPortMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgDstPortMask.setDescription('The mask applied to aclCurCfgDstPort for filtering.')
aclCurCfgTCPFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 28), Bits().clone(namedValues=NamedValues(("reserved1", 0), ("reserved2", 1), ("tcpURG", 2), ("tcpACK", 3), ("tcpPSH", 4), ("tcpRST", 5), ("tcpSYN", 6), ("tcpFIN", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgTCPFlags.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgTCPFlags.setDescription('The TCP flags to be filtered. OCTET xxxxxxxx ||||..|| ||||..||_tcpFIN(7) ||||..|__tcpSYN(6) |||| ||||_____tcpACK(3) |||______tcpURG(2) ||_______reserved2(1) |________reserved1(0) where: - reserved1 - 0; - reserved2 - 0; - x - 0 or 1; ')
aclCurCfgTCPFlagsMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 39), Bits().clone(namedValues=NamedValues(("reserved1", 0), ("reserved2", 1), ("tcpURG", 2), ("tcpACK", 3), ("tcpPSH", 4), ("tcpRST", 5), ("tcpSYN", 6), ("tcpFIN", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgTCPFlagsMask.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgTCPFlagsMask.setDescription('The TCP flags mask. OCTET xxxxxxxx ||||..|| ||||..||_tcpFIN(7) ||||..|__tcpSYN(6) |||| ||||_____tcpACK(3) |||______tcpURG(2) ||_______reserved2(1) |________reserved1(0) where: - reserved1 - 0; - reserved2 - 0; - x - 0 or 1; ')
aclCurCfgEgressPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 29), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgEgressPorts.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgEgressPorts.setDescription('The port list in the ACL configured for egress filtering. The ports are presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ port 9 || || || ||___ port 8 || |____ port 7 || . . . ||_________ port 2 |__________ port 1 where x: 1 - the represented port is configured for filtering. 0 - the represented port is not configured for filtering.')
aclCurCfgStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclCurCfgStatistics.setStatus('current')
if mibBuilder.loadTexts: aclCurCfgStatistics.setDescription('Whether statistics collection for this ACL is enabled or not.')
aclNewCfgTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2), )
if mibBuilder.loadTexts: aclNewCfgTable.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgTable.setDescription('The table of new ACL configuration.')
aclNewCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1), ).setIndexNames((0, "BLADETYPE2-ACL-MIB", "aclNewCfgIndex"))
if mibBuilder.loadTexts: aclNewCfgEntry.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgEntry.setDescription('New information about a particular ACL configuration.')
aclNewCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aclNewCfgIndex.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgIndex.setDescription('The index associated with this ACL entry.')
aclNewCfgBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclNewCfgBlock.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgBlock.setDescription('The index of the ACL block to which this ACL entry is a member of. A value of zero means the ACL is not a member of any block.')
aclNewCfgGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclNewCfgGroup.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgGroup.setDescription('The index of the ACL group to which this ACL entry is a member of. A value of zero means the ACL is not a member of any group.')
aclNewCfgFilterAction = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("none", 0), ("permit", 1), ("deny", 2), ("setcos", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgFilterAction.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgFilterAction.setDescription('The action to be performed on a packet that matches the filter settings of this ACL entry.')
aclNewCfgFilterActionSetCOS = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("none", 0), ("cos0", 1), ("cos1", 2), ("cos2", 3), ("cos3", 4), ("cos4", 5), ("cos5", 6), ("cos6", 7), ("cos7", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgFilterActionSetCOS.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgFilterActionSetCOS.setDescription('The COS queue to be used when the action for this ACL entry is set to SetCOS.')
aclNewCfgEthFmt = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("ethernet2", 1), ("snap", 2), ("llc", 3), ("ieee802dot3", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgEthFmt.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgEthFmt.setDescription('The packet ethernet format to be filtered.')
aclNewCfgTagFmt = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("tagged", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgTagFmt.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgTagFmt.setDescription('The packet tagging format to be filtered.')
aclNewCfgSrcMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 9), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgSrcMACAddress.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgSrcMACAddress.setDescription('The source MAC address to be filtered. Whenever this object is set to a nonzero value, the aclNewCfgSrcMACMask object, if not yet set, will be automatically set to ff:ff:ff:ff:ff.')
aclNewCfgSrcMACMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 10), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgSrcMACMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgSrcMACMask.setDescription('The address mask to be applied to aclNewCfgSrcMACAddress for filtering.')
aclNewCfgDstMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 11), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDstMACAddress.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDstMACAddress.setDescription('The destination MAC address to be filtered. Whenever this object is set to a nonzero value, the aclNewCfgDstMACMask object, if not yet set, will be automatically set to ff:ff:ff:ff:ff.')
aclNewCfgDstMACMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 12), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDstMACMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDstMACMask.setDescription('The address mask to be applied to aclNewCfgDstMACAddress for filtering.')
aclNewCfgEthernetTypeName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 0), ("arp", 1), ("ipv4", 2), ("ipv6", 3), ("mpls", 4), ("rarp", 5), ("any", 6), ("other", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgEthernetTypeName.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgEthernetTypeName.setDescription('The Ethernet type to be filtered. If the value of this object is other(7), the value of aclNewCfgEthernetTypeValue indicates the ethernet type that will be filtered. If this object is set to a value other than other(7), the value of the aclNewCfgEthernetTypeValue object is automatically set, as follows: aclNewCfgEthernetTypeName aclNewCfgEthernetTypeValue none(0) 0 arp(1) 2054 (0x0806) ipv4(2) 2048 (0x0800) ipv6(3) 34525 (0x86dd) mpls(4) 34887 (0x8847) rarp(5) 32821 (0x8035) any(6) 65535 (0xffff) ')
aclNewCfgEthernetTypeValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgEthernetTypeValue.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgEthernetTypeValue.setDescription('The Ethernet type value to be filtered. The value of this object is equivalent to the value of aclNewCfgEthernetTypeName except when the value of aclNewCfgEthernetTypeName is other(7), which can be any user-defined value for this object.')
aclNewCfgVLanId = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgVLanId.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgVLanId.setDescription('The virtual LAN identifier to be filtered. Whenever this object is set to a nonzero value, the aclNewCfgVLanMask object, if not yet set, will be automatically set to 4095 (0xfff).')
aclNewCfgVLanMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgVLanMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgVLanMask.setDescription('The mask to be applied to aclNewCfgVLanId for filtering.')
aclNewCfg8021pPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("none", 0), ("priority0", 1), ("priority1", 2), ("priority2", 3), ("priority3", 4), ("priority4", 5), ("priority5", 6), ("priority6", 7), ("priority7", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfg8021pPriority.setStatus('current')
if mibBuilder.loadTexts: aclNewCfg8021pPriority.setDescription('The 802.1p priority to be filtered.')
aclNewCfgTypeOfService = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgTypeOfService.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgTypeOfService.setDescription('The type of service to be filtered.')
aclNewCfgProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgProtocol.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgProtocol.setDescription('The protocol to be filtered.')
aclNewCfgSrcIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 20), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgSrcIPAddress.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgSrcIPAddress.setDescription('The source IP address to be filtered. Whenever this object is set to a nonzero value, the aclNewCfgSrcIPMask object, if not yet set, will be automatically set to 255.255.255.255.')
aclNewCfgSrcIPMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 21), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgSrcIPMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgSrcIPMask.setDescription('The address mask to be applied to aclNewCfgSrcIPAddress for filtering.')
aclNewCfgDstIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 22), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDstIPAddress.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDstIPAddress.setDescription('The destination IP address to be filtered. Whenever this object is set to a nonzero value, the aclNewCfgDstIPMask object, if not yet set, will be automatically set to 255.255.255.255.')
aclNewCfgDstIPMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 23), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDstIPMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDstIPMask.setDescription('The address mask to be applied to aclNewCfgDstIPAddress for filtering.')
aclNewCfgSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgSrcPort.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgSrcPort.setDescription('The source TCP/UDP port number to be filtered. Whenever this object is set if the aclNewCfgSrcPortMask object is not set will be automatically set to 65535 (0xffff).')
aclNewCfgSrcPortMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgSrcPortMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgSrcPortMask.setDescription('The mask to be applied to aclNewCfgSrcPort for filtering.')
aclNewCfgDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDstPort.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDstPort.setDescription('The destination TCP/UDP port number to be filtered. Whenever this object is set the aclNewCfgSrcPortMask object, if not yet set, will be automatically set to 65535 (0xffff).')
aclNewCfgDstPortMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDstPortMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDstPortMask.setDescription('The mask to be applied to aclNewCfgDstPort for filtering.')
aclNewCfgTCPFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 28), Bits().clone(namedValues=NamedValues(("reserved1", 0), ("reserved2", 1), ("tcpURG", 2), ("tcpACK", 3), ("tcpPSH", 4), ("tcpRST", 5), ("tcpSYN", 6), ("tcpFIN", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgTCPFlags.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgTCPFlags.setDescription('The TCP flags to be filtered. The TCP flags are presented in bitmap format, as follows: OCTET xxxxxxxx ||||..|| ||||..||_tcpFIN(7) ||||..|__tcpSYN(6) |||| ||||_____tcpACK(3) |||______tcpURG(2) ||_______reserved2(1) |________reserved1(0) where: - reserved1 - 0; - reserved2 - 0; - x - 0 or 1; ')
aclNewCfgTCPFlagsMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 39), Bits().clone(namedValues=NamedValues(("reserved1", 0), ("reserved2", 1), ("tcpURG", 2), ("tcpACK", 3), ("tcpPSH", 4), ("tcpRST", 5), ("tcpSYN", 6), ("tcpFIN", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgTCPFlagsMask.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgTCPFlagsMask.setDescription('The TCP flags mask. The TCP flags are presented in bitmap format, as follows: OCTET xxxxxxxx ||||..|| ||||..||_tcpFIN(7) ||||..|__tcpSYN(6) |||| ||||_____tcpACK(3) |||______tcpURG(2) ||_______reserved2(1) |________reserved1(0) where: - reserved1 - 0; - reserved2 - 0; - x - 0 or 1; Default value is 0x3f.')
aclNewCfgEgressPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 29), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclNewCfgEgressPorts.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgEgressPorts.setDescription('The port list in the ACL configured for egress filtering. The ports are presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ port 9 || || || ||___ port 8 || |____ port 7 || . . . ||_________ port 2 |__________ port 1 where x: 1 - the represented port is configured for filtering. 0 - the represented port is not configured for filtering.')
aclNewCfgStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgStatistics.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgStatistics.setDescription('Whether statistics collection for this ACL is enabled or not.')
aclNewCfgAddEgressPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 31), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgAddEgressPort.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgAddEgressPort.setDescription('The port to be added to the specified ACL for egress filtering. A value of zero is always returned when this object is read.')
aclNewCfgRemoveEgressPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 32), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgRemoveEgressPort.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgRemoveEgressPort.setDescription('The port to be removed from the specified ACL. A value of zero is always returned when this object is read.')
aclNewCfgDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 1, 2, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclNewCfgDelete.setStatus('current')
if mibBuilder.loadTexts: aclNewCfgDelete.setDescription('This is an action object to delete an ACL entry. A value of other(1) is always returned when this object is read.')
aclBlockCurCfgTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 1), )
if mibBuilder.loadTexts: aclBlockCurCfgTable.setStatus('current')
if mibBuilder.loadTexts: aclBlockCurCfgTable.setDescription('The table of current ACL block configuration.')
aclBlockCurCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 1, 1), ).setIndexNames((0, "BLADETYPE2-ACL-MIB", "aclBlockCurCfgIndex"))
if mibBuilder.loadTexts: aclBlockCurCfgEntry.setStatus('current')
if mibBuilder.loadTexts: aclBlockCurCfgEntry.setDescription('Current information about a particular ACL block configuration.')
aclBlockCurCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aclBlockCurCfgIndex.setStatus('current')
if mibBuilder.loadTexts: aclBlockCurCfgIndex.setDescription('The index associated with this ACL block entry.')
aclBlockCurCfgMemberAcls = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 1, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclBlockCurCfgMemberAcls.setStatus('current')
if mibBuilder.loadTexts: aclBlockCurCfgMemberAcls.setDescription('The ACL members of this ACL block, presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ ACL 9 || || || ||___ ACL 8 || |____ ACL 7 || . . . ||_________ ACL 2 |__________ ACL 1 where x: 1 - the represented ACL is a member of the block. 0 - the represented ACL is not a member of the block.')
aclBlockNewCfgTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2), )
if mibBuilder.loadTexts: aclBlockNewCfgTable.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgTable.setDescription('The table of new ACL block configuration.')
aclBlockNewCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2, 1), ).setIndexNames((0, "BLADETYPE2-ACL-MIB", "aclBlockNewCfgIndex"))
if mibBuilder.loadTexts: aclBlockNewCfgEntry.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgEntry.setDescription('New information about a particular ACL block configuration.')
aclBlockNewCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aclBlockNewCfgIndex.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgIndex.setDescription('The index associated with this ACL block entry.')
aclBlockNewCfgMemberAcls = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclBlockNewCfgMemberAcls.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgMemberAcls.setDescription('The ACL members of this ACL block, presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ ACL 9 || || || ||___ ACL 8 || |____ ACL 7 || . . . ||_________ ACL 2 |__________ ACL 1 where x: 1 - the represented ACL is a member of the block. 0 - the represented ACL is not a member of the block.')
aclBlockNewCfgAddAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclBlockNewCfgAddAcl.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgAddAcl.setDescription('The index of the ACL entry to be added into this ACL block. A successful set operation on this object will also set the bit corresponding to the ACL entry in the aclBlockNewCfgMemberAcls bitmap. A value of zero is always returned when this object is read.')
aclBlockNewCfgRemoveAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2, 1, 4), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclBlockNewCfgRemoveAcl.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgRemoveAcl.setDescription('The index of the ACL entry to be removed from this ACL block. A successful set operation on this object will unset the bit corresponding to the ACL entry in the aclBlockNewCfgMemberAcls bitmap. A value of zero is always returned when this object is read.')
aclBlockNewCfgDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclBlockNewCfgDelete.setStatus('current')
if mibBuilder.loadTexts: aclBlockNewCfgDelete.setDescription('This is an action object to delete an ACL block. A value of other(1) is always returned when this object is read.')
aclGroupCurCfgTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 1), )
if mibBuilder.loadTexts: aclGroupCurCfgTable.setStatus('current')
if mibBuilder.loadTexts: aclGroupCurCfgTable.setDescription('The table of current ACL Group configuration.')
aclGroupCurCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 1, 1), ).setIndexNames((0, "BLADETYPE2-ACL-MIB", "aclGroupCurCfgIndex"))
if mibBuilder.loadTexts: aclGroupCurCfgEntry.setStatus('current')
if mibBuilder.loadTexts: aclGroupCurCfgEntry.setDescription('Information about a particular ACL configuration.')
aclGroupCurCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aclGroupCurCfgIndex.setStatus('current')
if mibBuilder.loadTexts: aclGroupCurCfgIndex.setDescription('The index associated with this ACL Group entry.')
aclGroupCurCfgMemberAcls = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 1, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclGroupCurCfgMemberAcls.setStatus('current')
if mibBuilder.loadTexts: aclGroupCurCfgMemberAcls.setDescription('The ACL members of this ACL group, presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ ACL 9 || || || ||___ ACL 8 || |____ ACL 7 || . . . ||_________ ACL 2 |__________ ACL 1 where x: 1 - the represented ACL is a member of the group. 0 - the represented ACL is not a member of the group.')
aclGroupCurCfgMemberBlocks = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 1, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclGroupCurCfgMemberBlocks.setStatus('current')
if mibBuilder.loadTexts: aclGroupCurCfgMemberBlocks.setDescription('The ACL block members of this ACL group, presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ ACL Block 9 || || || ||___ ACL Block 8 || |____ ACL Block 7 || . . . . ||_________ ACL Block 2 |__________ ACL Block 1 where x: 1 - the represented ACL block is a member of the group. 0 - the represented ACL block is not a member of the group.')
aclGroupNewCfgTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2), )
if mibBuilder.loadTexts: aclGroupNewCfgTable.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgTable.setDescription('The table of new ACL Group configuration.')
aclGroupNewCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1), ).setIndexNames((0, "BLADETYPE2-ACL-MIB", "aclGroupNewCfgIndex"))
if mibBuilder.loadTexts: aclGroupNewCfgEntry.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgEntry.setDescription('New information about a particular ACL configuration.')
aclGroupNewCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aclGroupNewCfgIndex.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgIndex.setDescription('The index associated with this ACL Group entry.')
aclGroupNewCfgMemberAcls = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclGroupNewCfgMemberAcls.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgMemberAcls.setDescription('The ACL members of this ACL group, presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ ACL 9 || || || ||___ ACL 8 || |____ ACL 7 || . . . ||_________ ACL 2 |__________ ACL 1 where x: 1 - the represented ACL is a member of the group. 0 - the represented ACL is not a member of the group.')
aclGroupNewCfgMemberBlocks = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aclGroupNewCfgMemberBlocks.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgMemberBlocks.setDescription('The ACL block members of this ACL group, presented in bitmap format, as follows: OCTET 1 OCTET 2 ..... xxxxxxxx xxxxxxxx ..... || || | || || |_ ACL Block 9 || || || ||___ ACL Block 8 || |____ ACL Block 7 || . . . . ||_________ ACL Block 2 |__________ ACL Block 1 where x: 1 - the represented ACL block is a member of the group. 0 - the represented ACL block is not a member of the group.')
aclGroupNewCfgAddAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 4), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclGroupNewCfgAddAcl.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgAddAcl.setDescription('The index of the ACL entry to be added into this ACL group. A successful set operation on this object will also set the bit corresponding to the ACL entry in the aclGroupNewCfgMemberAcls bitmap. A value of zero is always returned when this object is read.')
aclGroupNewCfgRemoveAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 5), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclGroupNewCfgRemoveAcl.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgRemoveAcl.setDescription('The index of the ACL entry to be removed from this ACL group. A successful set operation on this object will unset the bit corresponding to the ACL entry in the aclGroupNewCfgMemberAcls bitmap. A value of zero is always returned when this object is read.')
aclGroupNewCfgAddBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 6), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclGroupNewCfgAddBlock.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgAddBlock.setDescription('The index of the ACL block entry to be added into this ACL group. A successful set operation on this object will also set the bit corresponding to the ACL block entry in the aclGroupNewCfgMemberBlocks bitmap. A value of zero is always returned when this object is read.')
aclGroupNewCfgRemoveBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 7), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclGroupNewCfgRemoveBlock.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgRemoveBlock.setDescription('The index of the ACL block entry to be removed from this ACL group. A successful set operation on this object will unset the bit corresponding to the ACL block entry in the aclGroupNewCfgMemberBlocks bitmap. A value of zero is always returned when this object is read.')
aclGroupNewCfgDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 33, 1, 2, 9, 1, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aclGroupNewCfgDelete.setStatus('current')
if mibBuilder.loadTexts: aclGroupNewCfgDelete.setDescription('This is an action object to delete an ACL group. A value of other(1) is always returned when this object is read.')
mibBuilder.exportSymbols("BLADETYPE2-ACL-MIB", aclGroupCurCfgMemberBlocks=aclGroupCurCfgMemberBlocks, acl=acl, aclNewCfgSrcIPAddress=aclNewCfgSrcIPAddress, aclGroupNewCfgDelete=aclGroupNewCfgDelete, aclCurCfgDstMACMask=aclCurCfgDstMACMask, aclBlockNewCfgRemoveAcl=aclBlockNewCfgRemoveAcl, aclCurCfgTable=aclCurCfgTable, aclNewCfgDstIPAddress=aclNewCfgDstIPAddress, aclCurCfgEthFmt=aclCurCfgEthFmt, aclNewCfgSrcMACMask=aclNewCfgSrcMACMask, aclNewCfgVLanId=aclNewCfgVLanId, aclNewCfgTagFmt=aclNewCfgTagFmt, aclCurCfgTCPFlags=aclCurCfgTCPFlags, aclNewCfgSrcIPMask=aclNewCfgSrcIPMask, aclCurCfgVLanId=aclCurCfgVLanId, aclNewCfgDelete=aclNewCfgDelete, aclNewCfgDstIPMask=aclNewCfgDstIPMask, aclNewCfgTypeOfService=aclNewCfgTypeOfService, aclNewCfgTCPFlagsMask=aclNewCfgTCPFlagsMask, aclNewCfgEgressPorts=aclNewCfgEgressPorts, aclGroupNewCfgRemoveBlock=aclGroupNewCfgRemoveBlock, aclNewCfgDstPortMask=aclNewCfgDstPortMask, aclBlockCurCfgEntry=aclBlockCurCfgEntry, aclBlock=aclBlock, aclCurCfgTypeOfService=aclCurCfgTypeOfService, aclNewCfgAddEgressPort=aclNewCfgAddEgressPort, aclCurCfgSrcIPMask=aclCurCfgSrcIPMask, aclNewCfgEntry=aclNewCfgEntry, aclCurCfgEthernetTypeValue=aclCurCfgEthernetTypeValue, aclBlockCurCfgMemberAcls=aclBlockCurCfgMemberAcls, aclCurCfgSrcPortMask=aclCurCfgSrcPortMask, aclNewCfgSrcPortMask=aclNewCfgSrcPortMask, aclBlockCurCfgTable=aclBlockCurCfgTable, aclNewCfgStatistics=aclNewCfgStatistics, aclCurCfgProtocol=aclCurCfgProtocol, aclCurCfgTCPFlagsMask=aclCurCfgTCPFlagsMask, aclCurCfgSrcMACMask=aclCurCfgSrcMACMask, aclGroupNewCfgMemberAcls=aclGroupNewCfgMemberAcls, aclCurCfgTagFmt=aclCurCfgTagFmt, aclCurCfgDstPort=aclCurCfgDstPort, aclGroupNewCfgEntry=aclGroupNewCfgEntry, aclCurCfgDstIPMask=aclCurCfgDstIPMask, aclGroup=aclGroup, aclGroupCurCfgTable=aclGroupCurCfgTable, aclNewCfgEthernetTypeName=aclNewCfgEthernetTypeName, aclCurCfgFilterAction=aclCurCfgFilterAction, aclBlockNewCfgEntry=aclBlockNewCfgEntry, aclNewCfgDstMACAddress=aclNewCfgDstMACAddress, acList=acList, aclNewCfgEthFmt=aclNewCfgEthFmt, aclNewCfgGroup=aclNewCfgGroup, aclCurCfgDstIPAddress=aclCurCfgDstIPAddress, aclGroupCurCfgIndex=aclGroupCurCfgIndex, aclNewCfgFilterActionSetCOS=aclNewCfgFilterActionSetCOS, aclNewCfgSrcMACAddress=aclNewCfgSrcMACAddress, aclBlockNewCfgIndex=aclBlockNewCfgIndex, aclNewCfgBlock=aclNewCfgBlock, aclCurCfgFilterActionSetCOS=aclCurCfgFilterActionSetCOS, aclCurCfgDstPortMask=aclCurCfgDstPortMask, aclCurCfgDstMACAddress=aclCurCfgDstMACAddress, aclNewCfgVLanMask=aclNewCfgVLanMask, aclBlockNewCfgMemberAcls=aclBlockNewCfgMemberAcls, aclGroupNewCfgMemberBlocks=aclGroupNewCfgMemberBlocks, aclGroupCurCfgEntry=aclGroupCurCfgEntry, aclNewCfgRemoveEgressPort=aclNewCfgRemoveEgressPort, aclBlockCurCfgIndex=aclBlockCurCfgIndex, aclCurCfgEntry=aclCurCfgEntry, aclNewCfgFilterAction=aclNewCfgFilterAction, acConfig=acConfig, aclCurCfgSrcMACAddress=aclCurCfgSrcMACAddress, aclCurCfgSrcIPAddress=aclCurCfgSrcIPAddress, aclCurCfgSrcPort=aclCurCfgSrcPort, aclNewCfgTable=aclNewCfgTable, aclCurCfgIndex=aclCurCfgIndex, aclGroupNewCfgAddBlock=aclGroupNewCfgAddBlock, PYSNMP_MODULE_ID=acl, aclNewCfg8021pPriority=aclNewCfg8021pPriority, aclGroupNewCfgRemoveAcl=aclGroupNewCfgRemoveAcl, aclNewCfgEthernetTypeValue=aclNewCfgEthernetTypeValue, aclNewCfgProtocol=aclNewCfgProtocol, aclCurCfgEgressPorts=aclCurCfgEgressPorts, aclGroupNewCfgIndex=aclGroupNewCfgIndex, aclCurCfg8021pPriority=aclCurCfg8021pPriority, aclNewCfgIndex=aclNewCfgIndex, aclBlockNewCfgTable=aclBlockNewCfgTable, aclCurCfgBlock=aclCurCfgBlock, aclGroupNewCfgTable=aclGroupNewCfgTable, aclNewCfgDstPort=aclNewCfgDstPort, aclNewCfgSrcPort=aclNewCfgSrcPort, aclBlockNewCfgDelete=aclBlockNewCfgDelete, aclBlockNewCfgAddAcl=aclBlockNewCfgAddAcl, aclCurCfgEthernetTypeName=aclCurCfgEthernetTypeName, aclNewCfgDstMACMask=aclNewCfgDstMACMask, aclCurCfgGroup=aclCurCfgGroup, aclGroupNewCfgAddAcl=aclGroupNewCfgAddAcl, aclNewCfgTCPFlags=aclNewCfgTCPFlags, aclCurCfgVLanMask=aclCurCfgVLanMask, aclGroupCurCfgMemberAcls=aclGroupCurCfgMemberAcls, aclCurCfgStatistics=aclCurCfgStatistics)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
9878,
2885,
2767,
56,
11401,
17,
12,
2246,
43,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
... | 2.677242 | 17,871 |
import string
import random
from mysql.connector import MySQLConnection, Error
import database_conf as cfg
import team
__select_ALL = "SELECT team_number, team_site, team_name, password, team_short_name, enabled, type, multi_login, team_full_name FROM TEAMS"
if __name__ == "__main__":
main() | [
11748,
4731,
198,
11748,
4738,
198,
6738,
48761,
13,
8443,
273,
1330,
33476,
32048,
11,
13047,
198,
11748,
6831,
62,
10414,
355,
30218,
70,
198,
11748,
1074,
198,
198,
834,
19738,
62,
7036,
796,
366,
46506,
1074,
62,
17618,
11,
1074,
... | 3.311111 | 90 |
from decimal import Decimal
from bank_account_app.choices import BankAccountActivityTypeChoices
from bank_account_app.utils import transfer_money
from credit_app.models import CreditContract
| [
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
3331,
62,
23317,
62,
1324,
13,
6679,
1063,
1330,
5018,
30116,
16516,
6030,
22164,
1063,
198,
6738,
3331,
62,
23317,
62,
1324,
13,
26791,
1330,
4351,
62,
26316,
198,
6738,
3884,
62,
1324,
... | 4.020408 | 49 |
from setuptools import setup, find_packages
setup(
name="NotebookArchaeology",
version="0.1",
#packages=find_packages(),
#scripts=['say_hello.py'],
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=[
'sqlalchemy',
'six',
'ipython',
'astroid',
'jupyter',
'nbformat',
'future',
'pygithub',
'timeout-decorator',
'yagmail[all]',
'psycopg2-binary',
'matplotlib_venn',
'langdetect',
'pathlib2;python_version<="3.4"',
'pathlib2;python_version=="2.7"',
],
# metadata for upload to PyPI
author="Joao Felipe Pimentel",
author_email="joaofelipenp@gmail.com",
description="Notebook Archeology",
license="MIT",
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
6425,
2070,
19895,
3609,
1435,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
16,
1600,
198,
220,
220,
220,
1303,
43789,
28,
197... | 2.185567 | 388 |
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../src")
from blackscholes.pde.Parabolic import Solver1d, Coef2d, Solver2d
from blackscholes.pde.Euro import Euro1d
from blackscholes.pde.American import Amer1d
from blackscholes.utils.Analytical import Analytical_Sol, GeometricAvg
from utils.Domain import Domain1d, Domain2d
from blackscholes.utils.Type import CallPutType
import unittest
import numpy as np
if __name__ == '__main__':
unittest.main()
| [
11748,
25064,
11,
28686,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
10,
1,
14,
40720,
10677,
4943,
198,
6738,
15102,
354,
4316,
13,
79,
2934,
... | 2.841176 | 170 |
# implementation of our model:Weighted Residual Attention Network for Super Resolution
from model import common
import torch.nn as nn
import torch
# concatenated ResidualAttentionBlock
# intermediate ResidualAttentionBlock
# embedded ResidualAttentionBlock
# dense ResidualAttentionGroup
| [
2,
7822,
286,
674,
2746,
25,
25844,
276,
1874,
312,
723,
47406,
7311,
329,
3115,
22406,
198,
198,
6738,
2746,
1330,
2219,
198,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
628,
628,
628,
198,
2,
1673,
36686,
515,
1... | 3.790123 | 81 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
class Stats(object):
"""A class to collect episode rewards statistics"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4871,
20595,
7,
15252,
2599,
198,
220,
... | 3.357143 | 56 |
#!/usr/bin/env python
import os
import sys
import h5py
import argparse
import pandas as pd
import numpy as np
from tronn.util.scripts import setup_run_logs
BLACKLIST_MOTIFS = [
"SMARC",
"NANOG"]
def get_blacklist_indices(df):
"""search for blacklist substrings and return a list of indices
"""
blacklist_indices = []
for i in range(df.shape[0]):
for substring in BLACKLIST_MOTIFS:
if substring in df.index[i]:
blacklist_indices.append(df.index[i])
return blacklist_indices
def parse_args():
"""parser
"""
parser = argparse.ArgumentParser(
description="annotate grammars with functions")
parser.add_argument(
"--data_file",
help="pval file produced after running intersect_pwms_and_rna.py")
parser.add_argument(
"-o", "--out_dir", dest="out_dir", type=str,
default="./",
help="out directory")
parser.add_argument(
"--prefix",
help="prefix to attach to output files")
args = parser.parse_args()
return args
def main():
"""condense results
"""
# set up args
args = parse_args()
os.system("mkdir -p {}".format(args.out_dir))
setup_run_logs(args, os.path.basename(sys.argv[0]).split(".py")[0])
prefix = "{}/{}".format(args.out_dir, args.prefix)
# GGR ordered trajectory indices
with h5py.File(args.data_file, "r") as hf:
foregrounds_keys = hf["pvals"].attrs["foregrounds.keys"]
labels = [val.replace("_LABELS", "") for val in foregrounds_keys]
days = ["day {0:.1f}".format(float(val))
for val in [0,1,1.5,2,2.5,3,4.5,5,6]]
# go through each index to collect
for i in range(len(foregrounds_keys)):
key = foregrounds_keys[i]
#key = "TRAJ_LABELS-{}".format(index)
with h5py.File(args.data_file, "r") as hf:
sig = hf["pvals"][key]["sig"][:]
rna_patterns = hf["pvals"][key]["rna_patterns"][:]
pwm_patterns = hf["pvals"][key]["pwm_patterns"][:]
correlations = hf["pvals"][key]["correlations"][:]
hgnc_ids = hf["pvals"][key].attrs["hgnc_ids"]
pwm_names = hf["pvals"][key].attrs["pwm_names"]
# TF present
tf_present = pd.DataFrame(
correlations,
index=hgnc_ids)
tf_present.columns = [key]
# rna pattern
tf_data = pd.DataFrame(rna_patterns, index=hgnc_ids)
# pwm present
pwm_present = pd.DataFrame(
np.arcsinh(np.max(pwm_patterns, axis=1)),
index=pwm_names)
pwm_present.columns = [key]
# pwm pattern
pwm_data = pd.DataFrame(pwm_patterns, index=pwm_names)
pwm_data["pwm_names"] = pwm_data.index.values
pwm_data = pwm_data.drop_duplicates()
pwm_data = pwm_data.drop("pwm_names", axis=1)
if i == 0:
traj_tfs = tf_present
traj_pwms = pwm_present
tf_patterns = tf_data
motif_patterns = pwm_data
else:
traj_tfs = traj_tfs.merge(tf_present, how="outer", left_index=True, right_index=True)
traj_pwms = traj_pwms.merge(pwm_present, how="outer", left_index=True, right_index=True)
tf_patterns = pd.concat([tf_patterns, tf_data])
tf_patterns = tf_patterns.drop_duplicates()
motif_patterns = pd.concat([motif_patterns, pwm_data])
#motif_patterns = motif_patterns.drop_duplicates()
# remove nans/duplicates
traj_tfs = traj_tfs.fillna(0)
traj_pwms = traj_pwms.fillna(0).reset_index().drop_duplicates()
traj_pwms = traj_pwms.set_index("index")
# reindex
tf_patterns = tf_patterns.reindex(traj_tfs.index)
motif_patterns = motif_patterns.groupby(motif_patterns.index).mean() # right now, just average across trajectories (though not great)
motif_patterns = motif_patterns.reindex(traj_pwms.index)
# fix column names
traj_pwms.columns = labels
motif_patterns.columns = days
traj_tfs.columns = labels
tf_patterns.columns = days
# remove blacklist
motif_indices = get_blacklist_indices(motif_patterns)
traj_pwms = traj_pwms.drop(index=motif_indices)
motif_patterns = motif_patterns.drop(index=motif_indices)
tf_indices = get_blacklist_indices(tf_patterns)
traj_tfs = traj_tfs.drop(index=tf_indices)
tf_patterns = tf_patterns.drop(index=tf_indices)
# filtering on specific TFs and motifs to exclude
traj_tfs_file = "{}.tfs_corr_summary.txt".format(prefix)
traj_pwms_file = "{}.pwms_present_summary.txt".format(prefix)
tf_patterns_file = "{}.tfs_patterns_summary.txt".format(prefix)
motif_patterns_file = "{}.pwms_patterns_summary.txt".format(prefix)
traj_tfs.to_csv(traj_tfs_file, sep="\t")
traj_pwms.to_csv(traj_pwms_file, sep="\t")
tf_patterns.to_csv(tf_patterns_file, sep="\t")
motif_patterns.to_csv(motif_patterns_file, sep="\t")
# and R script?
plot_results = "ggr_plot_motif_summary.R {} {} {} {}".format(
traj_pwms_file, motif_patterns_file, traj_tfs_file, tf_patterns_file)
print plot_results
os.system(plot_results)
return
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
289,
20,
9078,
198,
11748,
1822,
29572,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
... | 2.090765 | 2,523 |
import argparse
import sys
import yaml
from utils import str2bool, str2list
from hub import Hub
from mirror import Mirror
if __name__ == '__main__':
mirror = HubMirror()
mirror.run()
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
331,
43695,
198,
198,
6738,
3384,
4487,
1330,
965,
17,
30388,
11,
965,
17,
4868,
198,
6738,
12575,
1330,
14699,
198,
6738,
10162,
1330,
17918,
628,
198,
198,
361,
11593,
3672,
834,
6624... | 3 | 65 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import requests
import time
import logging
import socket
import helpers
import tsqa.test_cases
import tsqa.utils
import tsqa.endpoint
log = logging.getLogger(__name__)
import SocketServer
class KeepaliveTCPHandler(SocketServer.BaseRequestHandler):
"""
A subclass of RequestHandler which will return a connection uuid
"""
class KeepAliveInMixin(object):
"""Mixin for keep alive in.
TODO: Allow protocol to be specified for ssl traffic
"""
def _aux_error_path_post(self, protocol, headers=None):
'''
Ensure that sending a request with a body doesn't break the keepalive session
'''
# connect tcp
s = self._get_socket()
request = ('POST / HTTP/1.1\r\n'
'Host: foobar.com\r\n'
'Content-Length: 10\r\n')
request += self._headers_to_str(headers)
request += '\r\n'
request += '1234567890'
for x in xrange(1, 10):
try:
s.send(request)
except IOError:
s = self._get_socket()
s.send(request)
response = s.recv(4096)
# Check if client disconnected
if response:
self.assertIn('HTTP/1.1 404 Not Found on Accelerator', response)
# TODO: refactor these tests, these are *very* similar, we should paramatarize them
# Some basic tests for auth_sever_session_private
| [
2,
220,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
220,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
220,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
220,
5115,
6634,
9238,... | 2.708283 | 833 |
"""
Read file into texts and calls.
It's ok if you don't understand how to read files
"""
import csv
with open('texts.csv', 'r') as f:
texts = list(csv.reader(f))
with open('calls.csv', 'r') as f:
calls = list(csv.reader(f))
"""
TASK 2: Which telephone number spent the longest time on the phone
during the period? Don't forget that time spent answering a call is
also time spent on the phone.
Print a message:
"<telephone number> spent the longest time, <total time> seconds, on the phone during
September 2016.".
"""
from collections import defaultdict
import pdb
timeDict = dict()
for call in calls:
timeDict[call[0]] = timeDict.get(call[0], 0) + int(call[3])
timeDict[call[1]] = timeDict.get(call[1], 0) + int(call[3])
max_time = max(timeDict, key=lambda x: timeDict[x])
print(f"{max_time} spent the longest time, {timeDict[max_time]} seconds, on the phone during September 2016.")
| [
37811,
198,
5569,
2393,
656,
13399,
290,
3848,
13,
198,
1026,
338,
12876,
611,
345,
836,
470,
1833,
703,
284,
1100,
3696,
198,
37811,
198,
198,
11748,
269,
21370,
198,
198,
4480,
1280,
10786,
5239,
82,
13,
40664,
3256,
705,
81,
11537,... | 2.92283 | 311 |
from tkinter import ttk, Frame, N, E, W, LEFT, X, VERTICAL, Y
import source.gui.widgets as widgets
import json
import os
import source.classes.constants as CONST
| [
6738,
256,
74,
3849,
1330,
256,
30488,
11,
25184,
11,
399,
11,
412,
11,
370,
11,
12509,
9792,
11,
1395,
11,
569,
17395,
20151,
11,
575,
198,
11748,
2723,
13,
48317,
13,
28029,
11407,
355,
40803,
198,
11748,
33918,
198,
11748,
28686,
... | 2.963636 | 55 |
from pathlib import Path
import matplotlib.pyplot as plt
from lib import (
sigma, sigma_t, sigmaRatio, sigmaArgmax,
powerAproximation, degreeContinuoutyIndex,
randomConnectedEdges,
randomConnectedGraph,
randomTree,
randomSigmaOptAprox,
maxSigmaRatio_annealing,
localBasicNeighbor, globalBasicNeighbor,
globalTwoPartNeighbor,
neighborListToNx, nxToNeighborList,
simplePlot, simpleWriteG6, simpleReadG6, simpleSubplot
)
nsim_global, nsim_local = 400, 100
nrange = range(25, 60, 5)
sigma_growth = []
for i in nrange:
ropt, gopt = 0, None
for _ in range(100):
startedges = i * (i - 1) // 2
g, rg = maxSigmaRatio_annealing(
i, startedges, nsim_global + i // 2,
globalTwoPartNeighbor
)
g, r = maxSigmaRatio_annealing(
i, startedges, nsim_local,
localBasicNeighbor
)
if r >= ropt:
ropt = r
gopt = g
simplePlot(gopt, path=f'riste/graph_{i}')
sigma_growth.append(ropt)
print(i, ropt)
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
9195,
1330,
357,
198,
220,
220,
220,
264,
13495,
11,
264,
13495,
62,
83,
11,
264,
13495,
29665,
952,
11,
264,
13495,
28100,
... | 2.113725 | 510 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 12:58:36 2020
@author: Edmund Lo
"""
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import sys
import numpy as np
import scipy as sp
from scipy import linalg as sl
import math
import matplotlib.pyplot as mpl
import time
#Function for retrieving joint center positions relative to base
#Returns lists of positions for all three axis
#Returns Position of End Effector
#Returns Orientation of End Effector
#Returns the skew of a vector
#Returns screw matrix of a screw axis vector
#Returns an array of screw axis of all joints
#Returns M, the matrix of body configuration in spatial frame when robot is in zero configuration
#Returns T(theta) transformation for Forward Kinematics with screw axis array and joint angle (deg) inputs
#Returns Euler Angles from rotation matrix
#Moves object to pose determined by Forward Kinematics
timeStep = 0.005
TIMEOUT = 5000
#Define joint parameters
jointNum = 6
print ('Program started')
sim.simxFinish(-1) # just in case, close all opened connections
clientID=sim.simxStart('127.0.0.1',19997,True,True,5000,5) # Connect to CoppeliaSim
if clientID!=-1:
print ('Connected to remote API server')
else:
print ('Failed connecting to remote API server')
sys.exit('Could not connect')
#Retrieve base handle
returnCode, baseHandle = sim.simxGetObjectHandle(clientID,'UR3_link1_visible', sim.simx_opmode_blocking)
#Retrieve End Effector Handle
returnCode, endHandle = sim.simxGetObjectHandle(clientID,'UR3_connection', sim.simx_opmode_blocking)
#Retrieve joint handles
jointHandle = np.zeros((jointNum,),dtype=np.int)
for i in range(jointNum):
returnCode, Handle = sim.simxGetObjectHandle(clientID,'UR3_joint'+str(i+1), sim.simx_opmode_blocking)
if returnCode != sim.simx_return_ok:
raise Exception('Could not get object handle for ' + str(i+1) + 'th joint')
jointHandle[i] = Handle
print('Joint Handles Retrieved')
returnCode, forceSensorHandle = sim.simxGetObjectHandle(clientID,'UR3_connection', sim.simx_opmode_blocking)
print('Sensor Handle Retrieved')
time.sleep(2)
#Retrieve reference object handle
returnCode, refHandle = sim.simxGetObjectHandle(clientID,'referenceObject', sim.simx_opmode_blocking)
if returnCode != sim.simx_return_ok:
raise Exception('Could not get object handle for Reference Object')
print('Reference Object Handle Retrieved')
# ==================================================================================================== #
# Start simulation
sim.simxStartSimulation(clientID, sim.simx_opmode_oneshot)
#Inital Postion
zero = [0*math.pi/180,0*math.pi/180,0*math.pi/180,0*math.pi/180,0*math.pi/180,0*math.pi/180]
time.sleep(1)
S = screw()
#print(S)
M = zeroConfig(zero)
print(M)
T = transformation(S,M,zero)
print("T at zero")
print(T)
#Move reference frame to inital position
movePose(T,clientID,refHandle)
time.sleep(2)
#Move joints to position
for i in range(jointNum):
returnCode = sim.simxSetJointTargetPosition(clientID, jointHandle[i], zero[i],sim.simx_opmode_oneshot)
time.sleep(0.5)
#
##First Postion
#targetPos1 = [90*math.pi/180,90*math.pi/180,-90*math.pi/180,90*math.pi/180,90*math.pi/180,90*math.pi/180]
#T = transformation(S,M,targetPos1)
#
#
##Move reference frame to first position
#movePose(T,clientID,refHandle)
#time.sleep(2)
#
##Move joints to position
#for i in range(jointNum):
# returnCode = sim.simxSetJointTargetPosition(clientID, jointHandle[i], targetPos1[i],sim.simx_opmode_oneshot)
# time.sleep(0.5)
#print("Actual Pos1: ")
#print(get_endPos())
#print("Actual Ang1: ")
#print(get_endAng())
#time.sleep(10)
#
#Second Postion
targetPos2 = [-90*math.pi/180,45*math.pi/180,-90*math.pi/180,90*math.pi/180,90*math.pi/180,90*math.pi/180]
T = transformation(S,M,targetPos2)
#Move reference frame to first position
movePose(T,clientID,refHandle)
time.sleep(2)
#Move joints to position
for i in range(jointNum):
returnCode = sim.simxSetJointTargetPosition(clientID, jointHandle[i], targetPos2[i],sim.simx_opmode_oneshot)
time.sleep(0.5)
print("Actual Pos2: ")
print(get_endPos())
print("Actual Ang2: ")
print(get_endAng())
time.sleep(10)
thetaik = [-1.5715412186172628, -0.6522139987009226, 1.571401230156468, -0.13185144974709573, 1.570737634968232, -1.5716190259658518]
T = transformation(S,M,thetaik)
#Move reference frame to first position
movePose(T,clientID,refHandle)
time.sleep(2)
#Move joints to position
for i in range(jointNum):
returnCode = sim.simxSetJointTargetPosition(clientID, jointHandle[i], thetaik[i],sim.simx_opmode_oneshot)
time.sleep(0.5)
print("Actual Posik: ")
print(get_endPos())
print("Actual Angik: ")
print(get_endAng())
time.sleep(10)
sim.simxStopSimulation(clientID, sim.simx_opmode_oneshot)
sim.simxGetPingTime(clientID)
sim.simxFinish(clientID)
print("==================== ** Simulation Ended ** ====================") | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
1526,
2534,
1105,
25,
3365,
25,
2623,
12131,
201,
198,
201,
198,
31,
9800,
25,
35646,
6706,
201,
198,
37811,
201,
198,
201,
198... | 2.677435 | 2,074 |
import time
from pywps import Process, LiteralInput, LiteralOutput, UOM
from .process_defaults import process_defaults, LiteralInputD
| [
11748,
640,
198,
6738,
12972,
86,
862,
1330,
10854,
11,
25659,
1691,
20560,
11,
25659,
1691,
26410,
11,
471,
2662,
198,
6738,
764,
14681,
62,
12286,
82,
1330,
1429,
62,
12286,
82,
11,
25659,
1691,
20560,
35,
628
] | 3.552632 | 38 |
'''
Wrapper classes to implement independent Bayesian classifier combination (IBCC-VB)
using the Bayesian combination framework, along with variants of that model.
'''
from bayesian_combination.bayesian_combination import BC
| [
7061,
6,
198,
36918,
2848,
6097,
284,
3494,
4795,
4696,
35610,
1398,
7483,
6087,
357,
40,
2749,
34,
12,
44526,
8,
198,
3500,
262,
4696,
35610,
6087,
9355,
11,
1863,
351,
17670,
286,
326,
2746,
13,
198,
7061,
6,
198,
6738,
15489,
356... | 4.109091 | 55 |
import pymysql
from ..utils.to_do_exception import ToDoException
import os
| [
11748,
279,
4948,
893,
13976,
198,
6738,
11485,
26791,
13,
1462,
62,
4598,
62,
1069,
4516,
1330,
1675,
5211,
16922,
198,
11748,
28686,
628,
198
] | 3.08 | 25 |
import json
import shelve
from functools import wraps
from asyncio import iscoroutinefunction
def dump_args(args: tuple, kwargs: dict) -> str:
"""Util to make hashable function arguments."""
return json.dumps(args) + json.dumps(kwargs, sort_keys=True)
def shelvecache(shelvename="cache"):
"""
Decorator to wrap a function or corroutine with a memoizing callable (like functools.lru_cache but in disk).
Save the function argument and result in a shelve.
Parameters
----------
shlevename: str
Path of the shelve wuere the data will be saved.
"""
return real_decorator
| [
11748,
33918,
198,
11748,
7497,
303,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
30351,
952,
1330,
318,
10215,
28399,
8818,
628,
198,
4299,
10285,
62,
22046,
7,
22046,
25,
46545,
11,
479,
86,
22046,
25,
8633,
8,
4613,
965,
25... | 2.901869 | 214 |
from typing import Tuple, List
input = """420D598021E0084A07C98EC91DCAE0B880287912A925799429825980593D7DCD400820329480BF21003CC0086028910097520230C80813401D8CC00F601881805705003CC00E200E98400F50031801D160048E5AFEFD5E5C02B93F2F4C11CADBBB799CB294C5FDB8E12C40139B7C98AFA8B2600DCBAF4D3A4C27CB54EA6F5390B1004B93E2F40097CA2ECF70C1001F296EF9A647F5BFC48C012C0090E675DF644A675DF645A7E6FE600BE004872B1B4AAB5273ED601D2CD240145F802F2CFD31EFBD4D64DD802738333992F9FFE69CAF088C010E0040A5CC65CD25774830A80372F9D78FA4F56CB6CDDC148034E9B8D2F189FD002AF3918AECD23100953600900021D1863142400043214C668CB31F073005A6E467600BCB1F4B1D2805930092F99C69C6292409CE6C4A4F530F100365E8CC600ACCDB75F8A50025F2361C9D248EF25B662014870035600042A1DC77890200D41086B0FE4E918D82CC015C00DCC0010F8FF112358002150DE194529E9F7B9EE064C015B005C401B8470F60C080371460CC469BA7091802F39BE6252858720AC2098B596D40208A53CBF3594092FF7B41B3004A5DB25C864A37EF82C401C9BCFE94B7EBE2D961892E0C1006A32C4160094CDF53E1E4CDF53E1D8005FD3B8B7642D3B4EB9C4D819194C0159F1ED00526B38ACF6D73915F3005EC0179C359E129EFDEFEEF1950005988E001C9C799ABCE39588BB2DA86EB9ACA22840191C8DFBE1DC005EE55167EFF89510010B322925A7F85A40194680252885238D7374C457A6830C012965AE00D4C40188B306E3580021319239C2298C4ED288A1802B1AF001A298FD53E63F54B7004A68B25A94BEBAAA00276980330CE0942620042E3944289A600DC388351BDC00C9DCDCFC8050E00043E2AC788EE200EC2088919C0010A82F0922710040F289B28E524632AE0"""
binary_input = bin(int(input, 16))[2:].zfill(len(input) * 4)
_, p = parse_packet(binary_input)
print(p.evaluate())
| [
6738,
19720,
1330,
309,
29291,
11,
7343,
198,
198,
15414,
796,
37227,
27211,
35,
3270,
1795,
2481,
36,
405,
5705,
32,
2998,
34,
4089,
2943,
6420,
35,
8141,
36,
15,
33,
41655,
2078,
3720,
1065,
32,
24,
28676,
2079,
11785,
23,
25191,
... | 1.854345 | 817 |
import configparser
import click
import logist.setting as setting
import logist.libs.backlog_issues as backlog_issues
import logist.libs.todoist_tasks as todoist_tasks
from PyInquirer import style_from_dict, Token, prompt, Separator
| [
11748,
4566,
48610,
198,
11748,
3904,
198,
11748,
2604,
396,
13,
33990,
355,
4634,
198,
11748,
2604,
396,
13,
8019,
82,
13,
1891,
6404,
62,
37165,
355,
38780,
62,
37165,
198,
11748,
2604,
396,
13,
8019,
82,
13,
83,
24313,
396,
62,
8... | 3.219178 | 73 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TerrainRelativeNavigation
A QGIS plugin
This plugin analyzes terrain for the purpose of automatic bearing-based robotic navigation
-------------------
begin : 2021-04-05
copyright : (C) 2021 by NASA JPL
email : russells@jpl.nasa.gov
***************************************************************************/
"""
__author__ = 'NASA JPL'
__date__ = '2021-04-05'
__copyright__ = '(C) 2021 by NASA JPL'
__revision__ = '$Format:%H$'
import os
import sys
import inspect
from qgis.core import QgsProcessingAlgorithm, QgsApplication
from .terrain_relative_navigation_provider import TerrainRelativeNavigationProvider
cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
14,
17174,
17174,
4557,
8162,
198,
3813,
3201,
6892,
876,
30575,
7065,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.610526 | 380 |
import numpy as np
import matplotlib.pyplot as plt
import geom_fcns as geo
import render_fcns as ren
import os
import sys
##########################################################################################
# investigation #2 -- elliptical shape with crossed strips in the center
##########################################################################################
# . -- .
# * *
# * \ | / *
# * \ | / *
# * \ | / *
# * ----------|---------- *
# * / | \ *
# * / | \ *
# * / | \ *
# * *
# ' -- '
##########################################################################################
folder_name = 'synthetic_data_S2'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
##########################################################################################
##########################################################################################
# ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ #
# create geometry
# ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ #
##########################################################################################
##########################################################################################
# define undeformed geometry
##########################################################################################
x_cent = 0; y_cent = 0; z_cent = 0; ellipse_a = 20; ellipse_b = 25;
th_min = 0; th_max = np.pi*2.0; num_sarc = 50
sarc_list_1 = geo.sarc_list_ellipse_seg(x_cent, y_cent, z_cent, ellipse_a, ellipse_b, th_min, th_max, num_sarc)
x_end_1 = -15; y_end_1 = 0.0; z_end_1 = 0.0
x_end_2 = 15; y_end_2 = 0.0; z_end_2 = 0.0
num_sarc = 10
sarc_list_2 = geo.sarc_list_line_seg(x_end_1,y_end_1,z_end_1,x_end_2,y_end_2,z_end_2,num_sarc)
x_end_1 = 0; y_end_1 = -15.0; z_end_1 = 0.0
x_end_2 = 0; y_end_2 = 15.0; z_end_2 = 0.0
num_sarc = 11
sarc_list_3 = geo.sarc_list_line_seg(x_end_1,y_end_1,z_end_1,x_end_2,y_end_2,z_end_2,num_sarc)
x_end_1 = -12; y_end_1 = -12.0; z_end_1 = 0.0
x_end_2 = 12; y_end_2 = 12.0; z_end_2 = 0.0
num_sarc = 14
sarc_list_4 = geo.sarc_list_line_seg(x_end_1,y_end_1,z_end_1,x_end_2,y_end_2,z_end_2,num_sarc)
x_end_1 = 12; y_end_1 = -12.0; z_end_1 = 0.0
x_end_2 = -12; y_end_2 = 12.0; z_end_2 = 0.0
num_sarc = 14
sarc_list_5 = geo.sarc_list_line_seg(x_end_1,y_end_1,z_end_1,x_end_2,y_end_2,z_end_2,num_sarc)
sarc_list = sarc_list_1 + sarc_list_2 + sarc_list_3 + sarc_list_4 + sarc_list_5
##########################################################################################
# define and apply the deformation gradient F_homog_iso
##########################################################################################
max_contract = 0.075
val_list = []
for kk in range(0,5): val_list.append(0)
for kk in range(0,20): val_list.append(-1.0*max_contract*np.sin(kk/40*np.pi*2))
for kk in range(0,5): val_list.append(0)
for kk in range(0,20): val_list.append(-1.0*max_contract*np.sin(kk/40*np.pi*2))
for kk in range(0,5): val_list.append(0)
for kk in range(0,20): val_list.append(-1.0*max_contract*np.sin(kk/40*np.pi*2))
for kk in range(0,5): val_list.append(0)
x0 = 20; y0 = 0; z0 = 0
x_zone_1 = 5; x_zone_2 = 15
F_fcn = geo.transform_helper_F_homog_iso
sarc_list_ALL = geo.sarc_list_ALL_transform_F( sarc_list, val_list, x0, y0, z0, x_zone_1, x_zone_2, F_fcn)
##########################################################################################
# save geometry
##########################################################################################
geo.plot_3D_geom(folder_name,sarc_list,'k','r-')
geo.pickle_sarc_list_ALL(folder_name,sarc_list_ALL)
sarc_array, sarc_array_normalized, x_pos_array, y_pos_array = geo.get_ground_truth(sarc_list_ALL)
geo.plot_ground_truth_timeseries(sarc_array_normalized, folder_name)
##########################################################################################
# ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ #
# render
# ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ #
##########################################################################################
is_normal_radius = True; is_normal_height = True
avg_radius = 1.5; avg_height = .5
parameter_radius = 0.005; parameter_height = 0.002
radius_list_1, height_list_1 = ren.z_disk_props( sarc_list_1, is_normal_radius, is_normal_height, avg_radius, avg_height, parameter_radius, parameter_height)
sarc_list_center = sarc_list_2 + sarc_list_3 + sarc_list_4 + sarc_list_5
is_normal_radius = True; is_normal_height = True
avg_radius = 1.15; avg_height = .5
parameter_radius = 0.005; parameter_height = 0.002
radius_list_2, height_list_2 = ren.z_disk_props( sarc_list_center, is_normal_radius, is_normal_height, avg_radius, avg_height, parameter_radius, parameter_height)
radius_list = radius_list_1 + radius_list_2
height_list = height_list_1 + height_list_2
# --> begin loop, render each frame
num_frames = len(val_list)
img_list = []
for frame in range(0,num_frames):
sarc_list = sarc_list_ALL[frame]
# only keep sarcomeres that are within the frame
z_lower = -1; z_upper = 1
sarc_list_in_slice, radius_list_in_slice, height_list_in_slice = ren.sarc_list_in_slice_fcn(sarc_list, radius_list, height_list, z_lower, z_upper)
# turn into a 3D matrix of points
x_lower = -30; x_upper = 30
y_lower = -30; y_upper = 30
z_lower = -5; z_upper = 5
dim_x = int((x_upper-x_lower)/2*6); dim_y = int((y_upper-y_lower)/2*6); dim_z = int(5)
mean_rad = radius_list_in_slice; mean_hei = height_list_in_slice
bound_x = 10; bound_y = 10; bound_z = 10; val = 100
matrix = ren.slice_to_matrix(sarc_list_in_slice,dim_x,dim_y,dim_z,x_lower,x_upper,y_lower,y_upper,z_lower,z_upper, mean_rad, mean_hei, bound_x, bound_y, bound_z,val)
# add random
mean = 10; std_random = 1
matrix = ren.random_val(matrix,mean,std_random)
# add blur
sig = 1
matrix_blur = ren.matrix_gaussian_blur_fcn(matrix,sig)
# convert matrix to image
slice_lower = 1; slice_upper = 4
image = ren.matrix_to_image(matrix_blur,slice_lower,slice_upper)
# image list
img_list.append(image)
ren.save_img_stills(img_list,folder_name)
ren.still_to_avi(folder_name,num_frames,False)
ren.ground_truth_movie(folder_name,num_frames,img_list,sarc_array_normalized, x_pos_array, y_pos_array,x_lower,x_upper,y_lower,y_upper,dim_x,dim_y)
#ren.still_to_avi(folder_name_render,num_frames,True)
np.savetxt(folder_name + '/' + folder_name + '_GT_sarc_array_normalized.txt',sarc_array_normalized)
np.savetxt(folder_name + '/' + folder_name + '_GT_x_pos_array.txt',(x_pos_array - x_lower)/(x_upper-x_lower)*dim_x)
np.savetxt(folder_name + '/' + folder_name + '_GT_y_pos_array.txt',(y_pos_array - y_lower)/(y_upper-y_lower)*dim_y)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
4903,
296,
62,
16072,
5907,
355,
40087,
198,
11748,
8543,
62,
16072,
5907,
355,
8851,
198,
11748,
28686,
220,
198,
11748,
25064,
19... | 2.458114 | 2,853 |
"""Module for converting files into correct format"""
import json
import logging
import os
import platform
import re
import subprocess
class Converter:
"""Converts files into the correct format"""
def convert_file(self, dry_run=False, convert_video=False, convert_audio=True,
convert_subtitles=True):
"""Converts a single file"""
is_convert_subtitles = convert_subtitles and self.file_stream_info.has_subtitle_stream
is_convert_audio = (convert_audio
and self.file_stream_info.has_audio_stream)
ffmpeg_args = []
ffmpeg_args.append(self._get_ffmpeg_tool_location("ffmpeg"))
ffmpeg_args.append("-hide_banner")
if self.is_unattended_mode:
ffmpeg_args.append("-loglevel")
ffmpeg_args.append("warning")
ffmpeg_args.append("-nostats")
ffmpeg_args.append("-i")
ffmpeg_args.append(self.input_file)
ffmpeg_args.append("-map_metadata")
ffmpeg_args.append("-1")
ffmpeg_args.append("-map_chapters")
ffmpeg_args.append("0")
ffmpeg_args.extend(self.get_video_conversion_args(convert_video))
ffmpeg_args.extend(self.get_audio_conversion_args(is_convert_audio))
ffmpeg_args.extend(self.get_subtitle_conversion_args(is_convert_subtitles))
ffmpeg_args.append(self.output_file)
self.logger.info("Convert %s -> %s", self.input_file, self.output_file)
if dry_run:
self.logger.info("Conversion arguments:\n%s", ffmpeg_args)
else:
subprocess.run(ffmpeg_args, check=True)
self.convert_forced_subtitles(dry_run)
def convert_forced_subtitles(self, dry_run=False):
"""Converts forced subtitle track, if any"""
if self.file_stream_info.has_forced_subtitle_stream:
base_name, _ = os.path.splitext(self.output_file)
forced_subs_file = f"{base_name}.eng.forced.srt"
forced_subs_args = []
forced_subs_args.append(self._get_ffmpeg_tool_location("ffmpeg"))
forced_subs_args.append("-hide_banner")
forced_subs_args.append("-i")
forced_subs_args.append(self.input_file)
forced_subs_args.append("-map")
forced_subs_args.append(f"0:{self.file_stream_info.forced_subtitle_stream.index}")
forced_subs_args.append(forced_subs_file)
if dry_run:
self.logger.info("Forced subtitle conversion arguments:\n%s", forced_subs_args)
else:
subprocess.run(forced_subs_args, check=True)
def get_video_conversion_args(self, is_convert_video):
"""Gets ffmpeg command line arguments for video streams in the file"""
ffmpeg_args = []
ffmpeg_args.append("-map")
ffmpeg_args.append(f"0:{self.file_stream_info.video_stream.index}")
ffmpeg_args.append("-c:v")
if is_convert_video:
ffmpeg_args.append("libx264")
ffmpeg_args.append("-vf")
ffmpeg_args.append("scale=-1:1080")
ffmpeg_args.append("-crf")
ffmpeg_args.append("17")
ffmpeg_args.append("-preset")
ffmpeg_args.append("medium")
else:
ffmpeg_args.append("copy")
return ffmpeg_args
def get_audio_conversion_args(self, is_convert_audio):
"""Gets ffmpeg command line arguments for audio streams in the file"""
ffmpeg_args = []
ffmpeg_args.append("-map")
ffmpeg_args.append(f"0:{self.file_stream_info.audio_stream.index}")
ffmpeg_args.append("-metadata:s:a:0")
ffmpeg_args.append("language=eng")
ffmpeg_args.append("-disposition:a:0")
ffmpeg_args.append("default")
ffmpeg_args.append("-c:a:0")
if is_convert_audio:
if (self.file_stream_info.audio_stream.codec == "aac"
and self.file_stream_info.audio_stream.channel_count <= 2):
ffmpeg_args.append("copy")
else:
ffmpeg_args.append("aac")
ffmpeg_args.append("-b:a:0")
ffmpeg_args.append("160k")
ffmpeg_args.append("-ac:a:0")
ffmpeg_args.append(f"{min(self.file_stream_info.audio_stream.channel_count, 2)}")
ffmpeg_args.append("-map")
ffmpeg_args.append(f"0:{self.file_stream_info.audio_stream.index}")
ffmpeg_args.append("-metadata:s:a:1")
ffmpeg_args.append("language=eng")
ffmpeg_args.append("-disposition:a:1")
ffmpeg_args.append("0")
ffmpeg_args.append("-c:a:1")
if self.file_stream_info.audio_stream.codec in ("ac3", "eac3"):
ffmpeg_args.append("copy")
else:
ffmpeg_args.append("ac3")
ffmpeg_args.append("-b:a:1")
ffmpeg_args.append("640k")
ffmpeg_args.append("-ac:a:1")
ffmpeg_args.append(f"{min(self.file_stream_info.audio_stream.channel_count, 6)}")
else:
ffmpeg_args.append("copy")
return ffmpeg_args
def get_subtitle_conversion_args(self, is_convert_subtitles):
"""Gets ffmpeg command line arguments for subtitle streams in the file"""
ffmpeg_args = []
if is_convert_subtitles:
ffmpeg_args.append("-map")
ffmpeg_args.append(f"0:{self.file_stream_info.subtitle_stream.index}")
ffmpeg_args.append("-metadata:s:s:0")
ffmpeg_args.append("language=eng")
ffmpeg_args.append("-disposition:s:0")
ffmpeg_args.append("default")
ffmpeg_args.append("-c:s")
if self.file_stream_info.subtitle_stream.codec == "mov_text":
ffmpeg_args.append("copy")
else:
ffmpeg_args.append("mov_text")
return ffmpeg_args
class FileStreamInfo:
"""Represents the file stream information of a media file"""
@staticmethod
@classmethod
def read_stream_info(cls, input_file, ffprobe_location = "ffprobe"):
"""Reads the stream information from a file and creates a FileStreamInfo object"""
streams = {
"video": None,
"audio": None,
"subtitle": None,
"forced_subtitle": None
}
metadata = FileStreamInfo._probe_file(input_file, ffprobe_location)
for stream_metadata in metadata["streams"]:
stream = FileStreamInfo.StreamInfo(stream_metadata)
if stream.is_video and streams["video"] is None:
streams["video"] = stream
if stream.is_audio:
if (stream.is_default or (stream.language == "eng" and streams["audio"] is None)):
streams["audio"] = stream
if (stream.is_subtitle
and (stream.codec in ("subrip" , "mov_text"))
and stream.language == "eng"):
if stream.is_forced:
if streams["forced_subtitle"] is None:
streams["forced_subtitle"] = stream
elif streams["subtitle"] is None:
streams["subtitle"] = stream
return cls(streams)
@property
def has_video_stream(self):
"""Gets a value indicating whether the file has a video stream"""
return self.video_stream is not None
@property
def has_audio_stream(self):
"""Gets a value indicating whether the file has an audio stream"""
return self.audio_stream is not None
@property
def has_subtitle_stream(self):
"""Gets a value indicating whether the file has a subtitle stream"""
return self.subtitle_stream is not None
@property
def has_forced_subtitle_stream(self):
"""Gets a value indicating whether the file has a forced subtitle stream"""
return self.forced_subtitle_stream is not None
def show(self):
"""Displays the file stream information"""
print("Stream Info:")
print(f"video stream: index={self.video_stream.index}, codec={self.video_stream.codec}")
print((f"audio stream: index={self.audio_stream.index}, codec={self.audio_stream.codec}, "
f"channels={self.audio_stream.channel_count}"))
print((f"subtitle stream: index={self.subtitle_stream.index}, "
f"codec={self.subtitle_stream.codec}"))
print((f"forced subtitle stream: index={self.forced_subtitle_stream.index}, "
f"codec={self.forced_subtitle_stream.codec}"))
class StreamInfo:
"""Gets information about an individual stream within a file"""
@property
def is_video(self):
"""Gets a value indicating whether this stream is a video stream"""
return self.codec_type == "video"
@property
def is_audio(self):
"""Gets a value indicating whether this stream is an audio stream"""
return self.codec_type == "audio"
@property
def is_subtitle(self):
"""Gets a value indicating whether this stream is a subtitle stream"""
return self.codec_type == "subtitle"
class FileMapper:
"""Maps file names to a Plex-friendly format"""
def find_keyword_match(self, partial_file_name):
"""Attempts to find a keyword match based on the partial file name"""
for tracked_series in self.episode_db.get_all_tracked_series():
for keyword in tracked_series.keywords:
if keyword.lower() in partial_file_name.lower():
return keyword
return None
def map_files(self, source, destination, keyword=None):
"""Maps a file given a source and destination, handling individual files and directories"""
file_map = []
if os.path.isdir(source):
src_dir = os.path.dirname(source)
dest_dir = destination
if not os.path.isdir(destination):
dest_dir = os.path.dirname(destination)
file_list = os.listdir(src_dir)
file_list.sort()
for input_file in file_list:
match = re.match(self.file_name_match_regex, input_file, re.IGNORECASE)
if match is not None:
if keyword is None:
keyword = self.find_keyword_match(match.group(1))
series_metadata = self.episode_db.get_tracked_series_by_keyword(keyword)
episode_metadata = series_metadata.get_episode(
int(match.group(2)), int(match.group(3)))
if episode_metadata is not None:
converted_file_name = f"{episode_metadata.plex_title}.mp4"
file_map.append((os.path.join(src_dir, input_file),
os.path.join(dest_dir, converted_file_name)))
else:
if os.path.isdir(destination):
source_file_base, _ = os.path.splitext(os.path.basename(source))
file_map.append((source, os.path.join(destination, f"{source_file_base}.mp4")))
else:
file_map.append((source, destination))
return file_map
| [
37811,
26796,
329,
23202,
3696,
656,
3376,
5794,
37811,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
302,
198,
11748,
850,
14681,
628,
198,
4871,
35602,
353,
25,
628,
220,
220,
220,
37227,
... | 2.134162 | 5,307 |
# -*- coding: utf-8 -*-
# @Time : 2020-04-11 12:34
# @Author : speeding_moto
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
EUNITE_PATH = "dataset/eunite.xlsx"
PARSE_TABLE_NAME = "mainData"
def load_eunite_data():
"""
return the generated load data, include all the features wo handle
"""
data = open_file()
X, Y = generate_features(data)
return X.values, Y.values
def generate_features(df):
"""
parse the data, wo need to transfer the class number to ont_hot for our calculate later
"""
months = df["Month"]
days = df["Day"]
one_hot_months = cast_to_one_hot(months, n_classes=12)
days = cast_to_one_hot(days, n_classes=31)
one_hot_months = pd.DataFrame(one_hot_months)
days = pd.DataFrame(days)
df = pd.merge(left=df, right=one_hot_months, left_index=True, right_index=True)
df = pd.merge(left=df, right=days, left_index=True, right_index=True)
y = df['Max Load']
# think, maybe wo need to normalization the temperature data,
temperature = normalization(df['Temp'].values)
temperature = pd.DataFrame(temperature)
df = pd.merge(left=df, right=temperature, left_index=True, right_index=True)
drop_columns = ["ID", "Month", "Day", "Year", "Max Load", "Temp"]
df.drop(drop_columns, axis=1, inplace=True)
print(df[0:10], "\n", y[0])
return df, y
def cast_to_one_hot(data, n_classes):
"""
cast the classifier data to one hot
"""
one_hot_months = np.eye(N=n_classes)[[data - 1]]
return one_hot_months
def open_file():
"""
open the eunite load excel file to return
"""
xlsx_file = pd.ExcelFile(EUNITE_PATH)
return xlsx_file.parse(PARSE_TABLE_NAME)
if __name__ == '__main__':
df = open_file()
show_month_temperature_load_image(df)
x, y = load_eunite_data()
print(x.shape)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
12131,
12,
3023,
12,
1157,
1105,
25,
2682,
198,
2,
2488,
13838,
220,
1058,
26347,
62,
76,
2069,
198,
198,
11748,
299,
32152,
355,
... | 2.450065 | 771 |
n = map(int, input().split())
l = list(map(int, input().split()))
ans = 1000
for i in range(1,len(l)-1):
li = l[:]
li.remove(l[i])
m = 0
for j in range(len(li)-1):
m = max(li[j+1] - li[j], m)
ans = min(m, ans)
print(ans)
| [
77,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
75,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
198,
504,
796,
8576,
198,
1640,
1312,
287,
2837,
7,
16,
11,
11925,
7,
75,
13219,
16,
2599,
198,
220,
... | 1.818792 | 149 |
#!/usr/bin/env python
#
# Copyright (c) 1996-2011, SR Research Ltd., All Rights Reserved
#
#
# For use by SR Research licencees only. Redistribution and use in source
# and binary forms, with or without modification, are NOT permitted.
#
#
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
#
# Neither name of SR Research Ltd nor the name of contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
# IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# $Date: 2011/04/13 18:48:21 $
#
#
import pylink
import pyglet
import ctypes
import math
import sys
import array
from pyglet.gl import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
8235,
12,
9804,
11,
16808,
4992,
12052,
1539,
1439,
6923,
33876,
198,
2,
198,
2,
198,
2,
1114,
779,
416,
16808,
4992,
17098,
274,
691,
13,
2297,
396,
... | 3.393873 | 457 |
"""
Kth Largest Element in an Array:
Find the kth largest element in an unsorted array. Note that it is the kth largest element in
the sorted order, not the kth distinct element.
Example 1:
Input: [3,2,1,5,6,4] and k = 2
Output: 5
Example 2:
Input: [3,2,3,1,2,4,5,5,6] and k = 4
Output: 4
Note:
You may assume k is always valid, 1 ≤ k ≤ array's length.
"""
# https://www.geeksforgeeks.org/quickselect-algorithm/
# This is example of Quickselect algorithm
a = Solution()
assert 5 == a.findKthLargest([3, 2, 1, 5, 6, 4], 2)
assert 4 == a.findKthLargest([3, 2, 3, 1, 2, 4, 5, 5, 6], 4)
| [
37811,
198,
42,
400,
406,
853,
395,
11703,
287,
281,
15690,
25,
198,
198,
16742,
262,
479,
400,
4387,
5002,
287,
281,
5576,
9741,
7177,
13,
5740,
326,
340,
318,
262,
479,
400,
4387,
5002,
287,
198,
1169,
23243,
1502,
11,
407,
262,
... | 2.558442 | 231 |
import unittest
import yoda
from click.testing import CliRunner
class TestSuggestDrink(unittest.TestCase):
"""
Test for the following commands:
| Module: food
| command: suggest_drinks
""" | [
11748,
555,
715,
395,
198,
11748,
331,
11329,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
628,
198,
4871,
6208,
43857,
6187,
676,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
6208,
... | 2.772152 | 79 |
import requests
page = requests.get("http://dataquestio.github.io/web-scraping-pages/simple.html")
page
| [
11748,
7007,
198,
198,
7700,
796,
7007,
13,
1136,
7203,
4023,
1378,
7890,
6138,
952,
13,
12567,
13,
952,
14,
12384,
12,
1416,
2416,
278,
12,
31126,
14,
36439,
13,
6494,
4943,
198,
7700,
198
] | 3 | 35 |
import imutils
import cv2
from imutils.paths import list_images
import numpy as np
import matplotlib.pyplot as plt
from skimage import exposure as ex
import imageio
import sys
for imagePath in list_images("/Users/rohanbanerjee/Documents/SukShi19/dhe"):
img = cv2.imread(imagePath)
print(imagePath)
# img = cv2.resize(image, (25, 25))
# img = cv2.imread('6.jpg')
if(len(img.shape)==2): #gray
outImg = ex.equalize_hist(img[:,:])*255
elif(len(img.shape)==3): #RGB
outImg = np.zeros((img.shape[0],img.shape[1],3))
for channel in range(img.shape[2]):
outImg[:, :, channel] = ex.equalize_hist(img[:, :, channel])*255
outImg[outImg>255] = 255
outImg[outImg<0] = 0
cv2.imwrite(imagePath, outImg)
| [
11748,
545,
26791,
198,
11748,
269,
85,
17,
198,
6738,
545,
26791,
13,
6978,
82,
1330,
1351,
62,
17566,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
9060,
1330,
711... | 2.15427 | 363 |
# encoding: UTF-8
# Copyright 2016-2017 Cedric Mesnil <cedric.mesnil@ubinity.com>, Ubinity SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Elliptic Curve and Point manipulation
.. moduleauthor:: Cédric Mesnil <cedric.mesnil@ubinity.com>
"""
#python 2 compatibility
from builtins import int,pow
import binascii
import random
def decode_scalar_25519(k):
""" decode scalar according to RF7748 and draft-irtf-cfrg-eddsa
Args:
k (bytes) : scalar to decode
Returns:
int: decoded scalar
"""
k = bytearray(k)
k[0] &= 0xF8
k[31] = (k[31] &0x7F) | 0x40
k = bytes(k)
k = int.from_bytes(k,'little')
return k
def encode_scalar_25519(k):
""" encode scalar according to RF7748 and draft-irtf-cfrg-eddsa
Args:
k (int) : scalar to encode
Returns:
bytes: encoded scalar
"""
k.to_bytes(32,'little')
k = bytearray(k)
k[0] &= 0xF8
k[31] = (k[31] &0x7F) | 0x40
k = bytes(k)
return k
class Curve:
"""Elliptic Curve abstraction
You should not directly create such Object.
Use `get_curve` to get the predefined curve or create a well-know type
of curve with your parameters
Supported well know elliptic curve are:
- Short Weierstrass form: y²=x³+a*x+b
- Twisted Edward a*x²+y2=1+d*x²*y²
Attributes:
name (str) : curve name, the one given to get_curve or return by get_curve_names
size (int) : bit size of curve
a (int) : first curve parameter
b d (int) : second curve parameter
field (int) : curve field
generator (Point): curve point generator
order (int) : order of generator
"""
@staticmethod
def get_curve(name):
"""Return a Curve object according to its name
Args:
name (str) : curve name to retrieve
Returns:
Curve: Curve object
"""
l = [c for c in curves if c['name']==name]
if len(l) == 0:
return None
cp = l[0]
if cp['type'] == WEIERSTRASS:
return WeierstrassCurve(cp)
if cp['type'] == TWISTEDEDWARD:
return TwistedEdwardCurve(cp)
if cp['type'] == MONTGOMERY:
return MontgomeryCurve(cp)
return None
@staticmethod
def get_curve_names():
""" Returns all known curve names
Returns:
tuple: list of names as str
"""
return [c['name'] for c in curves]
def is_on_curve(self, P):
"""Check if P is on this curve
This function ignores the default curve attach to P
Args:
P (Point): Point to check
Returns:
bool: True if P is on curve, False else
"""
raise NotImplementedError('Abstract method is_on_curve')
def add_point(self, P,Q):
""" Returns the sum of P and Q
This function ignores the default curve attach to P and Q,
and assumes P and Q are on this curve.
Args:
P (Point): first point to add
Q (Point): second point to add
Returns:
Point: A new Point R = P+Q
Raises:
ECPyException : with "Point not on curve", if Point R is not on \
curve, thus meaning either P or Q was not on.
"""
raise NotImplementedError('Abstract method add_point')
def sub_point(self, P,Q):
""" Returns the difference of P and Q
This function ignores the default curve attach to P and Q,
and assumes P and Q are on this curve.
Args:
P (Point): first point to subtract with
Q (Point): second point to subtract to
Returns:
Point: A new Point R = P-Q
Raises:
ECPyException : with "Point not on curve", if Point R is not on \
curve, thus meaning either P or Q was not on.
"""
return self.add_point(P,Q.neg())
def mul_point(self, k, P):
""" Returns the scalar multiplication P with k.
This function ignores the default curve attach to P and Q,
and assumes P and Q are on this curve.
Args:
P (Point): point to mul_point
k (int) : scalar to multiply
Returns:
Point: A new Point R = k*Q
Raises:
ECPyException : with "Point not on curve", if Point R is not
on curve, thus meaning P was not on.
"""
raise NotImplementedError('Abstract method mul_point')
def encode_point(self, P):
""" encode/compress a point according to its curve"""
raise NotImplementedError('Abstract method encode_point')
pass
def decode_point(self, eP):
""" decode/decompress a point according to its curve"""
raise NotImplementedError('Abstract method _point decode_point')
pass
@staticmethod
def _sqrt(n,p,sign=0):
""" Generic Tonelli–Shanks algorithm """
#check Euler criterion
if pow(n,(p-1)//2,p) != 1:
return None
#compute square root
p_1 = p-1
s = 0
q = p_1
while q & 1 == 0:
q = q>>1
s = s+1
if s == 1:
r = pow(n,(p+1)//4,p)
else:
z = 2
while pow(z,(p-1)//2,p) == 1:
z = z+1
c = pow(z,q,p)
r = pow(n,(q+1)//2,p)
t = pow(n,q,p)
m = s
while True:
if t == 1:
break
else:
for i in range(1,m):
if pow(t,pow(2,i),p) == 1:
break
b = pow(c,pow(2,m-i-1),p)
r = (r*b) %p
t = (t*b*b) %p
c = (b*b) %p
m = i
if sign:
sign = 1
if r &1 != sign:
r = p-r
return r
class WeierstrassCurve(Curve):
"""An elliptic curve defined by the equation: y²=x³+a*x+b.
The given domain must be a dictionary providing the following keys/values:
- name (str) : curve unique name
- size (int) : bit size
- a (int) : `a` equation coefficient
- b (int) : `b` equation coefficient
- field (inf) : field value
- generator (int[2]) : x,y coordinate of generator
- order (int) : order of generator
- cofactor (int) : cofactor
*Note*: you should not use the constructor and only use :func:`Curve.get_curve`
builder to ensure using supported curve.
Args:
domain (dict): a dictionary providing curve parameters
"""
def __init__(self,domain):
""" Built an new short Weierstrass curve with the provided parameters. """
self._domain = {}
self._set(domain, ('name','type', 'size',
'a','b','field','generator','order','cofactor'))
def is_on_curve(self, P):
""" See :func:`Curve.is_on_curve` """
q = self.field
x = P.x
sq3x = (x*x*x)%q
y = P.y
sqy = (y*y)%q
left = sqy
right = (sq3x+self.a*x+self.b)%q
return left == right
def add_point(self, P,Q):
""" See :func:`Curve.add_point` """
q = self.field
if (P == Q):
Px,Py,Pz = self._aff2jac(P.x,P.y, q)
x,y,z = self._dbl_jac(Px,Py,Pz, q,self.a)
else:
Px,Py,Pz = self._aff2jac(P.x,P.y, q)
Qx,Qy,Qz = self._aff2jac(Q.x,Q.y, q)
x,y,z = self._add_jac(Px,Py,Pz, Qx,Qy,Qz, q)
x,y = self._jac2aff(x,y,z, q)
PQ = Point(x,y, self)
return PQ
def mul_point(self, k, P):
""" See :func:`Curve.mul_point` """
q = self.field
a = self.a
x1,y1,z1 = self._aff2jac(P.x,P.y, q)
k = bin(k)
k = k[2:]
sz = len(k)
x2,y2,z2 = self._dbl_jac(x1,y1,z1, q,a)
for i in range(1, sz):
if k[i] == '1' :
x1,y1,z1 = self._add_jac(x2,y2,z2, x1,y1,z1, q)
x2,y2,z2 = self._dbl_jac(x2,y2,z2, q,a)
else:
x2,y2,z2 = self._add_jac(x1,y1,z1, x2,y2,z2, q)
x1,y1,z1 = self._dbl_jac(x1,y1,z1, q,a)
x,y = self._jac2aff(x1,y1,z1, q)
return Point(x,y,self)
def y_recover(self,x,sign=0):
""" """
p = self.field
y2 = (x*x*x + self.a*x + self.b)%p
y = self._sqrt(y2,p,sign)
return y
def encode_point(self, P, compressed=False):
""" Encodes a point P according to *P1363-2000*.
Args:
P: point to encode
Returns
bytes : encoded point [04 | x | y] or [02 | x | sign]
"""
size = self.size>>3
x = bytearray(P.x.to_bytes(size,'big'))
y = bytearray(P.y.to_bytes(size,'big'))
if compressed:
y = [P.y&1]
enc = [2]
else:
enc = [4]
enc.extend(x)
enc.extend(y)
return enc
def decode_point(self, eP):
""" Decodes a point P according to *P1363-2000*.
Args:
eP (bytes) : encoded point
curve (Curve) : curve on witch point is
Returns
Point : decoded point
"""
size = self.size>>3
xy = bytearray(eP)
if xy[0] == 2:
x = xy[1:1+size]
x = int.from_bytes(x,'big')
y = self.y_recover(x,xy[1+size])
elif xy[0] == 4:
x = xy[1:1+size]
x = int.from_bytes(x,'big')
y = xy[1+size:1+size+size]
y = int.from_bytes(y,'big')
else:
raise ECPyException("Invalid encoded point")
return Point(x,y,self,False)
@staticmethod
@staticmethod
@staticmethod
@staticmethod
class TwistedEdwardCurve(Curve):
"""An elliptic curve defined by the equation: a*x²+y²=1+d*x²*y²
The given domain must be a dictionary providing the following keys/values:
- name (str) : curve unique name
- size (int) : bit size
- a (int) : `a` equation coefficient
- d (int) : `b` equation coefficient
- field (inf) : field value
- generator (int[2]) : x,y coordinate of generator
- order (int) : order of generator
*Note*: you should not use the constructor and only use :func:`Curve.get_curve`
builder to ensure using supported curve.
Args:
domain (dict): a dictionary providing curve domain parameters
"""
def __init__(self,domain):
""" Built an new short twisted Edward curve with the provided parameters. """
self._domain = {}
self._set(domain, ('name','type','size',
'a','d','field','generator','order'))
def is_on_curve(self, P):
""" See :func:`Curve.is_on_curve` """
q = self.field
x = P.x
sqx = (x*x)%q
y = P.y
sqy = (y*y)%q
left = (self.a*sqx+sqy)%q
right = (1+self.d*sqx*sqy)%q
return left == right
def x_recover(self, y, sign=0):
""" Retrieves the x coordinate according to the y one, \
such that point (x,y) is on curve.
Args:
y (int): y coordinate
sign (int): sign of x
Returns:
int: the computed x coordinate
"""
q = self.field
a = self.a
d = self.d
if sign:
sign = 1
# #x2 = (y^2-1) * (d*y^2-a)^-1
yy = (y*y)%q
u = (1-yy)%q
v = pow(a-d*yy,q-2,q)
xx = (u*v)%q
if self.name =='Ed25519':
x = pow(xx,(q+3)//8,q)
if (x*x - xx) % q != 0:
I = pow(2,(q-1)//4,q)
x = (x*I) % q
elif self.name =='Ed448':
x = pow(xx,(q+1)//4,q)
else:
assert False, '%s not supported'%curve.name
if x &1 != sign:
x = q-x
assert (x*x)%q == xx
# over F(q):
# a.xx +yy = 1+d.xx.yy
# <=> xx(a-d.yy) = 1-yy
# <=> xx = (1-yy)/(a-d.yy)
# <=> x = +- sqrt((1-yy)/(a-d.yy))
# yy = (y*y)%q
# u = (1-yy)%q
# v = (a - d*yy)%q
# v_1 = pow(v, q-2,q)
# xx = (v_1*u)%q
# x = self._sqrt(xx,q,sign) # Inherited generic Tonelli–Shanks from Curve
return x
def encode_point(self, P):
""" Encodes a point P according to *draft_irtf-cfrg-eddsa-04*.
Args:
P: point to encode
Returns
bytes : encoded point
"""
size = self._coord_size()
y = bytearray(P.y.to_bytes(size,'little'))
if P.x&1:
y[len(y)-1] |= 0x80
return bytes(y)
def decode_point(self, eP):
""" Decodes a point P according to *draft_irtf-cfrg-eddsa-04*.
Args:
eP (bytes) : encoded point
curve (Curve) : curve on witch point is
Returns
Point : decoded point
"""
y = bytearray(eP)
sign = y[len(y)-1] & 0x80
y[len(y)-1] &= ~0x80
y = int.from_bytes(y,'little')
x = self.x_recover(y,sign)
return Point(x,y,self,True)
def add_point(self,P,Q):
""" See :func:`Curve.add_point` """
q = self.field
a = self.a
if (P == Q):
Px,Py,Pz,Pt = self._aff2ext(P.x,P.y, q)
x,y,z,t = self._dbl_ext(Px,Py,Pz,Pt, q,self.a)
else:
Px,Py,Pz,Pt = self._aff2ext(P.x,P.y, q)
Qx,Qy,Qz,Qt = self._aff2ext(Q.x,Q.y, q)
x,y,z,t = self._add_ext(Px,Py,Pz,Pt, Qx,Qy,Qz,Qt, q,a)
x,y = self._ext2aff(x,y,z,t, q)
return Point(x,y, self)
def mul_point(self, k, P):
""" See :func:`Curve.add_point` """
q = self.field
a = self.a
x1,y1,z1,t1 = self._aff2ext(P.x,P.y, q)
k = bin(k)
k = k[2:]
sz = len(k)
x2,y2,z2,t2 = self._dbl_ext(x1,y1,z1,t1, q,a)
for i in range(1, sz):
if k[i] == '1' :
x1,y1,z1,t1 = self._add_ext(x2,y2,z2,t2, x1,y1,z1,t1, q,a)
x2,y2,z2,t2 = self._dbl_ext(x2,y2,z2,t2, q,a)
else:
x2,y2,z2,t2 = self._add_ext(x1,y1,z1,t1, x2,y2,z2,t2, q,a)
x1,y1,z1,t1 = self._dbl_ext(x1,y1,z1,t1, q,a)
x,y = self._ext2aff(x1,y1,z1,t1, q)
return Point(x,y,self)
@staticmethod
@staticmethod
@staticmethod
@staticmethod
class MontgomeryCurve(Curve):
"""An elliptic curve defined by the equation: b.y²=x³+a*x²+x.
The given domain must be a dictionary providing the following keys/values:
- name (str) : curve unique name
- size (int) : bit size
- a (int) : `a` equation coefficient
- b (int) : `b` equation coefficient
- field (inf) : field value
- generator (int[2]) : x,y coordinate of generator
- order (int) : order of generator
*Note*: you should not use the constructor and only use :func:`Curve.get_curve`
builder to ensure using supported curve.
Args:
domain (dict): a dictionary providing curve domain parameters
"""
def __init__(self,domain):
""" Built an new short twisted Edward curve with the provided parameters. """
self._domain = {}
self._set(domain, ('name','type','size',
'a','b','field','generator','order'))
#inv4 = pow(4,p-2,p)
#self.a24 = ((self.a+2)*inv4)%p
self.a24 = (self.a+2)//4
def is_on_curve(self, P):
""" See :func:`Curve.is_on_curve` """
p = self.field
x = P.x
right = (x*x*x + self.a*x*x + x)%p
if P.y:
y = P.y
left = (self.b*y*y)%p
return left == right
else:
#check equation has a solution according to Euler criterion
return pow(right,(p-1)//2, p) == 1
def y_recover(self,x,sign=0):
""" """
p = self.field
y2 = (x*x*x + self.a*x*x + x)%p
y = self._sqrt(y2,p,sign)
return y
def encode_point(self, P):
""" Encodes a point P according to *RFC7748*.
Args:
P: point to encode
Returns
bytes : encoded point
"""
size = self.size>>3
x = bytearray(P.x.to_bytes(size,'little'))
return bytes(x)
def decode_point(self, eP):
""" Decodes a point P according to *RFC7748*.
Args:
eP (bytes) : encoded point
curve (Curve) : curve on witch point is
Returns
Point : decoded point
"""
x = bytearray(eP)
x[len(x)-1] &= ~0x80
x = int.from_bytes(x,'little')
return Point(x,None,self)
def mul_point(self,k,P):
""" See :func:`Curve.add_point` """
x = self._mul_point_x(k,P.x)
return Point(x,None, P.curve)
def _mul_point_x(self, k, u):
""" """
k = bin(k)
k = k[2:]
sz = len(k)
x1 = u
x2 = 1
z2 = 0
x3 = u
z3 = 1
for i in range(0, sz):
ki = int(k[i])
if ki == 1:
x3,z3, x2,z2 = self._ladder_step(x1, x3,z3, x2,z2)
else:
x2,z2, x3,z3 = self._ladder_step(x1, x2,z2, x3,z3)
p = self.field
zinv = pow(z2,(p - 2),p)
ku = (x2*zinv)%p
return ku
class Point:
"""Immutable Elliptic Curve Point.
A Point support the following operator:
- `+` : Point Addition, with automatic doubling support.
- `*` : Scalar multiplication, can write as k*P or P*k, with P :class:Point and k :class:int
- `==`: Point comparison
Attributes:
x (int) : Affine x coordinate
y (int) : Affine y coordinate
curve (Curve) : Curve on which the point is define
Args:
x (int) : x coordinate
y (int) : y coordinate
check (bool): if True enforce x,y is on curve
Raises:
ECPyException : if check=True and x,y is not on curve
"""
__slots__ = '_x','_y','_curve'
@property
@property
@property
WEIERSTRASS = "weierstrass"
TWISTEDEDWARD = "twistededward"
MONTGOMERY = "montgomery"
curves = [
{
'name': "frp256v1",
'type': WEIERSTRASS,
'size': 256,
'field': 0xF1FD178C0B3AD58F10126DE8CE42435B3961ADBCABC8CA6DE8FCF353D86E9C03,
'generator': (0xB6B3D4C356C139EB31183D4749D423958C27D2DCAF98B70164C97A2DD98F5CFF,
0x6142E0F7C8B204911F9271F0F3ECEF8C2701C307E8E4C9E183115A1554062CFB),
'order': 0xF1FD178C0B3AD58F10126DE8CE42435B53DC67E140D2BF941FFDD459C6D655E1,
'cofactor': 1,
'a': 0xF1FD178C0B3AD58F10126DE8CE42435B3961ADBCABC8CA6DE8FCF353D86E9C00,
'b': 0xEE353FCA5428A9300D4ABA754A44C00FDFEC0C9AE4B1A1803075ED967B7BB73F,
},
{
'name': "secp521r1",
'type': WEIERSTRASS,
'size': 521,
'field': 0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
'generator': (0x00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66,
0x011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650),
'order': 0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409,
'cofactor': 1,
'a': 0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC,
'b': 0x0051953EB9618E1C9A1F929A21A0B68540EEA2DA725B99B315F3B8B489918EF109E156193951EC7E937B1652C0BD3BB1BF073573DF883D2C34F1EF451FD46B503F00,
},
{
'name': "secp384r1",
'type': WEIERSTRASS,
'size': 384,
'field': 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff,
'generator': (0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7,
0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f),
'order': 0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973,
'cofactor': 1,
'a': 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc,
'b': 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef,
},
{
'name': "secp256k1",
'type': WEIERSTRASS,
'size': 256,
'field': 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,
'generator': (0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,
0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8),
'order': 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141,
'cofactor': 1,
'a': 0,
'b': 7
},
{
'name': "secp256r1",
'type': WEIERSTRASS,
'size': 256,
'field': 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff,
'generator': (0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296,
0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5),
'order': 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551,
'cofactor': 0x1,
'a': 0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc,
'b': 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
},
{
'name': "secp224k1",
'type': WEIERSTRASS,
'size': 224,
'field': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D,
'generator': (0xA1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C,
0x7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5),
'order': 0x010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7,
'cofactor': 0x1,
'a': 0x0,
'b': 0x5,
},
{
'name': "secp224r1",
'type': WEIERSTRASS,
'size': 224,
'field': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001,
'generator': (0xB70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21 ,
0xBD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34),
'order': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D,
'cofactor': 0x1,
'a': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE,
'b': 0xB4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4
},
{
'name': "secp192k1",
'type': WEIERSTRASS,
'size': 192,
'field': 0xfffffffffffffffffffffffffffffffffffffffeffffee37,
'generator': (0xdb4ff10ec057e9ae26b07d0280b7f4341da5d1b1eae06c7d,
0x9b2f2f6d9c5628a7844163d015be86344082aa88d95e2f9d),
'order': 0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d,
'cofactor': 0x1,
'a': 0x0,
'b': 0x3
},
{
'name': "secp192r1",
'type': WEIERSTRASS,
'size': 256,
'field': 0xfffffffffffffffffffffffffffffffeffffffffffffffff,
'generator': (0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012,
0x7192b95ffc8da78631011ed6b24cdd573f977a11e794811),
'order': 0xffffffffffffffffffffffff99def836146bc9b1b4d22831,
'cofactor': 0x1,
'a': 0xfffffffffffffffffffffffffffffffefffffffffffffffc,
'b': 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
},
{
'name': "secp160k1",
'type': WEIERSTRASS,
'size': 160,
'field': 0xfffffffffffffffffffffffffffffffeffffac73,
'generator': (0x3b4c382ce37aa192a4019e763036f4f5dd4d7ebb,
0x938cf935318fdced6bc28286531733c3f03c4fee),
'order': 0x100000000000000000001b8fa16dfab9aca16b6b3,
'cofactor': 0x1,
'a': 0x0,
'b': 0x7
},
{
'name': "secp160r1",
'type': WEIERSTRASS,
'size': 160,
'field': 0xffffffffffffffffffffffffffffffff7fffffff,
'generator': (0x4a96b5688ef573284664698968c38bb913cbfc82,
0x23a628553168947d59dcc912042351377ac5fb32),
'order': 0x100000000000000000001f4c8f927aed3ca752257,
'cofactor': 0x1,
'a': 0xffffffffffffffffffffffffffffffff7ffffffc,
'b': 0x1c97befc54bd7a8b65acf89f81d4d4adc565fa45
},
{
'name': "secp160r2",
'type': WEIERSTRASS,
'size': 160,
'field': 0xfffffffffffffffffffffffffffffffeffffac73,
'generator': (0x52dcb034293a117e1f4ff11b30f7199d3144ce6d,
0xfeaffef2e331f296e071fa0df9982cfea7d43f2e),
'order': 0x100000000000000000000351ee786a818f3a1a16b,
'cofactor': 0x1,
'a': 0xfffffffffffffffffffffffffffffffeffffac70,
'b': 0xb4e134d3fb59eb8bab57274904664d5af50388ba
},
{
'name': "Brainpool-p512t1",
'type': WEIERSTRASS,
'size': 512,
'field': 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3,
'generator': (0x640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA,
0x5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332),
'order': 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069,
'cofactor': 1,
'a': 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F0,
'b': 0x7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E,
},
{
'name': "Brainpool-p512r1",
'type': WEIERSTRASS,
'size': 512,
'field': 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3,
'generator': (0x81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822,
0x7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892),
'order': 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069,
'cofactor': 1,
'a': 0x7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA,
'b': 0x3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723,
},
{
'name': "Brainpool-p384t1",
'type': WEIERSTRASS,
'size': 384,
'field': 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53,
'generator': (0x18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC,
0x25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928),
'order': 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565,
'cofactor': 1,
'a': 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC50,
'b': 0x7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE,
},
{
'name': "Brainpool-p384r1",
'type': WEIERSTRASS,
'size': 384,
'field': 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53,
'generator': (0x1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E,
0x8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315),
'order': 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565,
'cofactor': 1,
'a': 0x7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826,
'b': 0x04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D57CB4390295DBC9943AB78696FA504C11,
},
{
'name': "Brainpool-p320t1",
'type': WEIERSTRASS,
'size': 320,
'field': 0xD35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27,
'generator': (0x925BE9FB01AFC6FB4D3E7D4990010F813408AB106C4F09CB7EE07868CC136FFF3357F624A21BED52,
0x63BA3A7A27483EBF6671DBEF7ABB30EBEE084E58A0B077AD42A5A0989D1EE71B1B9BC0455FB0D2C3),
'order': 0xD35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311,
'cofactor': 1,
'a': 0xD35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E24,
'b': 0xA7F561E038EB1ED560B3D147DB782013064C19F27ED27C6780AAF77FB8A547CEB5B4FEF422340353,
},
{
'name': "Brainpool-p320r1",
'type': WEIERSTRASS,
'size': 320,
'field': 0xD35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27,
'generator': (0x43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C710AF8D0D39E20611,
0x14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7D35245D1692E8EE1),
'order': 0xD35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311,
'cofactor': 1,
'a': 0x3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F492F375A97D860EB4,
'b': 0x520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539816F5EB4AC8FB1F1A6,
},
{
'name': "Brainpool-p256r1",
'type': WEIERSTRASS,
'size': 256,
'field': 0xa9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377,
'generator': (0x8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262,
0x547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997),
'order': 0xa9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7,
'cofactor': 0x1,
'a': 0x7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9,
'b': 0x26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6
},
{
'name': "Brainpool-p256t1",
'type': WEIERSTRASS,
'size': 256,
'field': 0xa9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377,
'generator': (0xa3e8eb3cc1cfe7b7732213b23a656149afa142c47aafbc2b79a191562e1305f4,
0x2d996c823439c56d7f7b22e14644417e69bcb6de39d027001dabe8f35b25c9be),
'order': 0xa9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7,
'cofactor': 0x1,
'a': 0xa9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5374,
'b': 0x662c61c430d84ea4fe66a7733d0b76b7bf93ebc4af2f49256ae58101fee92b04
},
{
'name': "Brainpool-p224r1",
'type': WEIERSTRASS,
'size': 224,
'field': 0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF,
'generator': (0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D,
0x58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD),
'order': 0xD7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F,
'cofactor': 0x1,
'a': 0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43,
'b': 0x2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B
},
{
'name': "Brainpool-p224t1",
'type': WEIERSTRASS,
'size': 192,
'a': 0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FC,
'b': 0x4B337D934104CD7BEF271BF60CED1ED20DA14C08B3BB64F18A60888D,
'field': 0x2DF271E14427A346910CF7A2E6CFA7B3F484E5C2CCE1C8B730E28B3F,
'generator': (0x6AB1E344CE25FF3896424E7FFE14762ECB49F8928AC0C76029B4D580,
0x0374E9F5143E568CD23F3F4D7C0D4B1E41C8CC0D1C6ABD5F1A46DB4C),
'order': 0xD7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F,
'cofactor': 0x1,
},
{
'name': "Brainpool-p192r1",
'type': WEIERSTRASS,
'size': 192,
'field': 0xc302f41d932a36cda7a3463093d18db78fce476de1a86297,
'generator': (0xc0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6,
0x14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f),
'order': 0xc302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1,
'cofactor': 0x1,
'a': 0x6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef,
'b': 0x469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9
},
{
'name': "Brainpool-p192t1",
'type': WEIERSTRASS,
'size': 192,
'field': 0xc302f41d932a36cda7a3463093d18db78fce476de1a86297,
'generator': (0x3ae9e58c82f63c30282e1fe7bbf43fa72c446af6f4618129,
0x97e2c5667c2223a902ab5ca449d0084b7e5b3de7ccc01c9),
'order': 0xc302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1,
'cofactor': 0x1,
'a': 0xc302f41d932a36cda7a3463093d18db78fce476de1a86294,
'b': 0x13d56ffaec78681e68f9deb43b35bec2fb68542e27897b79
},
{
'name': "Brainpool-p160r1",
'type': WEIERSTRASS,
'size': 160,
'field': 0xe95e4a5f737059dc60dfc7ad95b3d8139515620f,
'generator': (0xbed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3,
0x1667cb477a1a8ec338f94741669c976316da6321),
'order': 0xe95e4a5f737059dc60df5991d45029409e60fc09,
'cofactor': 0x1,
'a': 0x340e7be2a280eb74e2be61bada745d97e8f7c300,
'b': 0x1e589a8595423412134faa2dbdec95c8d8675e58
},
{
'name': "Brainpool-p160t1",
'type': WEIERSTRASS,
'size': 160,
'field': 0xe95e4a5f737059dc60dfc7ad95b3d8139515620f,
'generator': (0xb199b13b9b34efc1397e64baeb05acc265ff2378,
0xadd6718b7c7c1961f0991b842443772152c9e0ad),
'order': 0xe95e4a5f737059dc60df5991d45029409e60fc09,
'cofactor': 0x1,
'a': 0xe95e4a5f737059dc60dfc7ad95b3d8139515620c,
'b': 0x7a556b6dae535b7b51ed2c4d7daa7a0b5c55f380
},
{
'name': "NIST-P256",
'type': WEIERSTRASS,
'size': 256,
'field': 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff,
'generator': (0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296,
0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5),
'order': 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551,
'cofactor': 0x1,
'a': 0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc,
'b': 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
},
{
'name': "NIST-P224",
'type': WEIERSTRASS,
'size': 224,
'field': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001,
'generator': (0xB70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21 ,
0xBD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34),
'order': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D,
'cofactor': 0x1,
'a': 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE,
'b': 0xB4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4
},
{
'name': "NIST-P192",
'type': WEIERSTRASS,
'size': 192,
'field': 0xfffffffffffffffffffffffffffffffeffffffffffffffff,
'generator': (0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012,
0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811),
'order': 0xffffffffffffffffffffffff99def836146bc9b1b4d22831,
'cofactor': 0x1,
'a': 0xfffffffffffffffffffffffffffffffefffffffffffffffc,
'b': 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
},
{
'name': "Ed448",
'type': TWISTEDEDWARD,
'size': 448,
'field': 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffffffffffffffffffffffffffffffffffffffffffffffffffff,
'generator': (0x4f1970c66bed0ded221d15a622bf36da9e146570470f1767ea6de324a3d3a46412ae1af72ab66511433b80e18b00938e2626a82bc70cc05e,
0x693f46716eb6bc248876203756c9c7624bea73736ca3984087789c1e05a0c2d73ad3ff1ce67c39c4fdbd132c4ed7c8ad9808795bf230fa14),
'order': 0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffff7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3,
'cofactor': 4,
'd': 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffffffffffffffffffffffffffffffffffffffffffffffff6756,
'a': 1
},
{
'name': "Ed25519",
'type': TWISTEDEDWARD,
'size': 256,
'field': 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed,
'generator': (15112221349535400772501151409588531511454012693041857206046113283949847762202,
46316835694926478169428394003475163141307993866256225615783033603165251855960),
'order': 0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED,
'cofactor': 0x08,
'd': 0x52036cee2b6ffe738cc740797779e89800700a4d4141d8ab75eb4dca135978a3,
'a': -1
},
{
'name': "Curve448",
'type': MONTGOMERY,
'size': 448,
'field': 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffffffffffffffffffffffffffffffffffffffffffffffffffff,
'generator': (5,
0x7d235d1295f5b1f66c98ab6e58326fcecbae5d34f55545d060f75dc28df3f6edb8027e2346430d211312c4b150677af76fd7223d457b5b1a),
'order': 0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffff7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3,
'cofactor': 4,
'b': 1,
'a': 0x262a6
},
{
'name': "Curve25519",
'type': MONTGOMERY,
'size': 256,
'field': 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed,
'generator': (9,
43114425171068552920764898935933967039370386198203806730763910166200978582548),
'order': 0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED,
'cofactor': 0x08,
'b': 1,
'a': 486662
},
]
if __name__ == "__main__":
try:
###############################
### Weierstrass quick check ###
###############################
cv = Curve.get_curve('secp256k1')
#check generator
Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
G = Point(Gx, Gy, cv)
assert(G == cv.generator)
#define point
W1 = Point(0x6fb13b7e8ab1c7d191d16197c1bf7f8dc7992412e1266155b3fb3ac8b30f3ed8,
0x2e1eb77bd89505113819600b395e0475d102c4788a3280a583d9d82625ed8533,
cv)
W2 = Point(0x07cd9ee748a0b26773d9d29361f75594964106d13e1cad67cfe2df503ee3e90e,
0xd209f7c16cdb6d3559bea88c7d920f8ff077406c615da8adfecdeef604cb40a6,
cv)
#check add
sum_W1_W2 = Point(0xc4a20cbc2dc27c70fbc1335292c109a1ccd106981b5698feafe702bcb0fb2fca,
0x7e1ad514051b87b7ce815c7defcd4fcc01e88842b3135e10a342be49bf5cad09,
cv)
dbl_W2 = Point(0xb4f211b11166e6b3a3561e5978f47855787943dbeccd2014706c941a5890c913,
0xe0122dc6f3ce097eb73865e66a1ced02a518afdec02596d7d152f121391e2d63,
cv)
s = W1+W2
assert(s == sum_W1_W2)
d = W2+W2
assert(d == dbl_W2)
#check mul
k = 0x2976F786AE6333E125C0DFFD6C16D37E8CED5ABEDB491BCCA21C75B307D0B318
kW1 = Point(0x1de93c28f8c58db95f30be1704394f6f5d4602291c4933a1126cc61f9ed70b88,
0x6f66df7bb6b37609cacded3052e1d127b47684949dff366020f824d517d66f34,
cv)
mulW1 = k*W1
assert(kW1 == mulW1)
#check encoding
W2_enc = [ 0x04,
#x
0x07, 0xcd, 0x9e, 0xe7, 0x48, 0xa0, 0xb2, 0x67, 0x73, 0xd9, 0xd2, 0x93, 0x61, 0xf7, 0x55, 0x94,
0x96, 0x41, 0x06, 0xd1, 0x3e, 0x1c, 0xad, 0x67, 0xcf, 0xe2, 0xdf, 0x50, 0x3e, 0xe3, 0xe9, 0x0e,
#y
0xd2, 0x09, 0xf7, 0xc1, 0x6c, 0xdb, 0x6d, 0x35, 0x59, 0xbe, 0xa8, 0x8c, 0x7d, 0x92, 0x0f, 0x8f,
0xf0, 0x77, 0x40, 0x6c, 0x61, 0x5d, 0xa8, 0xad, 0xfe, 0xcd, 0xee, 0xf6, 0x04, 0xcb, 0x40, 0xa6]
dW2_enc = [ 0x04,
#x
0xb4, 0xf2, 0x11, 0xb1, 0x11, 0x66, 0xe6, 0xb3, 0xa3, 0x56, 0x1e, 0x59, 0x78, 0xf4, 0x78, 0x55,
0x78, 0x79, 0x43, 0xdb, 0xec, 0xcd, 0x20, 0x14, 0x70, 0x6c, 0x94, 0x1a, 0x58, 0x90, 0xc9, 0x13,
#y
0xe0, 0x12, 0x2d, 0xc6, 0xf3, 0xce, 0x09, 0x7e, 0xb7, 0x38, 0x65, 0xe6, 0x6a, 0x1c, 0xed, 0x02,
0xa5, 0x18, 0xaf, 0xde, 0xc0, 0x25, 0x96, 0xd7, 0xd1, 0x52, 0xf1, 0x21, 0x39, 0x1e, 0x2d, 0x63]
W2_enc_comp = [ 0x02,
#x
0x07, 0xcd, 0x9e, 0xe7, 0x48, 0xa0, 0xb2, 0x67, 0x73, 0xd9, 0xd2, 0x93, 0x61, 0xf7, 0x55, 0x94,
0x96, 0x41, 0x06, 0xd1, 0x3e, 0x1c, 0xad, 0x67, 0xcf, 0xe2, 0xdf, 0x50, 0x3e, 0xe3, 0xe9, 0x0e,
#y sign
0]
dW2_enc_comp = [ 0x02,
#x
0xb4, 0xf2, 0x11, 0xb1, 0x11, 0x66, 0xe6, 0xb3, 0xa3, 0x56, 0x1e, 0x59, 0x78, 0xf4, 0x78, 0x55,
0x78, 0x79, 0x43, 0xdb, 0xec, 0xcd, 0x20, 0x14, 0x70, 0x6c, 0x94, 0x1a, 0x58, 0x90, 0xc9, 0x13,
#y
1]
P = cv.encode_point(W2)
assert(P == W2_enc)
P = cv.decode_point(P)
assert(P == W2)
P = cv.encode_point(dbl_W2)
assert(P == dW2_enc)
P = cv.decode_point(P)
assert(P == dbl_W2)
P = cv.encode_point(W2,True)
assert(P == W2_enc_comp)
P = cv.decode_point(P)
assert(P == W2)
P = cv.encode_point(dbl_W2,True)
assert(P == dW2_enc_comp)
P = cv.decode_point(P)
assert(P == dbl_W2)
##################################
### Twisted Edward quick check ###
##################################
cv = Curve.get_curve('Ed25519')
W1 = Point(0x36ab384c9f5a046c3d043b7d1833e7ac080d8e4515d7a45f83c5a14e2843ce0e,
0x2260cdf3092329c21da25ee8c9a21f5697390f51643851560e5f46ae6af8a3c9,
cv)
W2 = Point(0x67ae9c4a22928f491ff4ae743edac83a6343981981624886ac62485fd3f8e25c,
0x1267b1d177ee69aba126a18e60269ef79f16ec176724030402c3684878f5b4d4,
cv)
#check generator
Bx = 15112221349535400772501151409588531511454012693041857206046113283949847762202
By = 46316835694926478169428394003475163141307993866256225615783033603165251855960
B = Point(Bx, By, cv)
assert(B == cv.generator)
#check add
sum_W1_W2 = Point(0x49fda73eade3587bfcef7cf7d12da5de5c2819f93e1be1a591409cc0322ef233,
0x5f4825b298feae6fe02c6e148992466631282eca89430b5d10d21f83d676c8ed,
cv)
dbl_W1 = Point(0x203da8db56cff1468325d4b87a3520f91a739ec193ce1547493aa657c4c9f870,
0x47d0e827cb1595e1470eb88580d5716c4cf22832ea2f0ff0df38ab61ca32112f,
cv)
s = W1+W2
assert(s == sum_W1_W2)
d = W1+W1
assert(d == dbl_W1)
#check mul
A = Point(0x74ad28205b4f384bc0813e6585864e528085f91fb6a5096f244ae01e57de43ae,
0x0c66f42af155cdc08c96c42ecf2c989cbc7e1b4da70ab7925a8943e8c317403d,
cv)
k = 0x035ce307f6524510110b4ea1c8af0e81fb705118ebcf886912f8d2d87b5776b3
kA = Point(0x0d968dd46de0ff98f4a6916e60f84c8068444dbc2d93f5d3b9cf06dade04a994,
0x3ba16a015e1dd42b3d088c7a68c344ec47aaba463f67f4e9099c634f64781e00,
cv)
mul = k*A
assert(mul == kA)
##################################
### Montgomery quick check ###
##################################
cv = Curve.get_curve('Curve25519')
#0x449a44ba44226a50185afcc10a4c1462dd5e46824b15163b9d7c52f06be346a0
k = binascii.unhexlify("a546e36bf0527c9d3b16154b82465edd62144c0ac1fc5a18506a2244ba449ac4")
k = decode_scalar_25519(k)
assert(k == 31029842492115040904895560451863089656472772604678260265531221036453811406496)
eP = binascii.unhexlify("e6db6867583030db3594c1a424b15f7c726624ec26b3353b10a903a6d0ab1c4c")
P = cv.decode_point(eP)
assert(P.x == 34426434033919594451155107781188821651316167215306631574996226621102155684838)
eQ = binascii.unhexlify("c3da55379de9c6908e94ea4df28d084f32eccf03491c71f754b4075577a28552")
Q = cv.decode_point(eQ)
kP = k*P
assert(kP.x == Q.x)
ekP = cv.encode_point(kP)
assert(ekP == eQ)
#0x4dba18799e16a42cd401eae021641bc1f56a7d959126d25a3c67b4d1d4e96648
k = binascii.unhexlify("4b66e9d4d1b4673c5ad22691957d6af5c11b6421e0ea01d42ca4169e7918ba0d")
k = decode_scalar_25519(k)
assert(k == 35156891815674817266734212754503633747128614016119564763269015315466259359304)
eP = binascii.unhexlify("e5210f12786811d3f4b7959d0538ae2c31dbe7106fc03c3efc4cd549c715a493")
P = cv.decode_point(eP)
assert(P.x == 8883857351183929894090759386610649319417338800022198945255395922347792736741)
eQ = binascii.unhexlify("95cbde9476e8907d7aade45cb4b873f88b595a68799fa152e6f8f7647aac7957")
Q = cv.decode_point(eQ)
kP = k*P
assert(kP.x == Q.x)
ekP = cv.encode_point(kP)
assert(ekP == eQ)
##OK!
print("All internal assert OK!")
finally:
pass
| [
2,
21004,
25,
41002,
12,
23,
198,
198,
2,
15069,
1584,
12,
5539,
25789,
1173,
14937,
45991,
1279,
771,
1173,
13,
6880,
45991,
31,
549,
6269,
13,
785,
22330,
12021,
6269,
35516,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
1062... | 1.675289 | 29,962 |
#!/usr/bin/env python
def ordinal(value):
"""
Converts zero or a *postive* integer (or their string
representations) to an ordinal value.
>>> for i in range(1,13):
... ordinal(i)
...
u'1st'
u'2nd'
u'3rd'
u'4th'
u'5th'
u'6th'
u'7th'
u'8th'
u'9th'
u'10th'
u'11th'
u'12th'
>>> for i in (100, '111', '112',1011):
... ordinal(i)
...
u'100th'
u'111th'
u'112th'
u'1011th'
"""
try:
value = int(value)
except ValueError:
return value
if value % 100//10 != 1:
if value % 10 == 1:
ordval = u"%d%s" % (value, "st")
elif value % 10 == 2:
ordval = u"%d%s" % (value, "nd")
elif value % 10 == 3:
ordval = u"%d%s" % (value, "rd")
else:
ordval = u"%d%s" % (value, "th")
else:
ordval = u"%d%s" % (value, "th")
return ordval
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
4299,
2760,
1292,
7,
8367,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1482,
24040,
6632,
393,
257,
1635,
7353,
425,
9,
18253,
357,
273,
511,
4731,
220,
198,
220,
220,
... | 1.782986 | 576 |
"""This module implements a low-level wrapper over Python-CAN's CAN API specific to Babydriver."""
import time
import can
from message_defs import BABYDRIVER_DEVICE_ID, BABYDRIVER_CAN_MESSAGE_ID
# The default CAN channel to use for this module. Changed dynamically by cli_setup.
default_channel = "can0" # pylint: disable=invalid-name
def get_bus(channel=None):
"""Returns a new Python-CAN Bus for sending/receiving messages."""
if channel is None:
channel = default_channel
return can.interface.Bus(bustype="socketcan", channel=channel, bitrate=500000)
class Message:
"""
An immutable wrapper over Python-CAN's can.Message to support our message and device ID
conventions. See https://python-can.readthedocs.io/en/master/message.html.
Attributes:
msg: The underlying can.Message.
message_id: The message ID of the CAN message.
device_id: The device ID of the CAN message.
data: The data associated with the message.
"""
def __init__(self, message_id=0, device_id=0, **kwargs):
"""Initialize a Message. See Python-CAN's can.Message for more info.
Args:
message_id: The message ID of the CAN message, used if arbitration_id is not passed.
device_id: The device ID of the CAN message, used if arbitration_id is not passed.
"""
if "arbitration_id" not in kwargs:
# our CAN system uses 6 bits of message ID, 1 bit for ACK/DATA, and 4 bits for device ID
kwargs["arbitration_id"] = (message_id << 5) | device_id
self.msg = can.Message(**kwargs)
@classmethod
def from_msg(cls, msg):
"""Helper to get a Message from a can.Message."""
message = cls()
message.msg = msg
return message
@property
def message_id(self):
"""The message ID of this CAN message."""
# message ID is bits 5-11
return (self.msg.arbitration_id >> 5) & 0b111111
@property
def device_id(self):
"""The device ID of this CAN message."""
# device ID is bits 0-3
return self.msg.arbitration_id & 0b1111
@property
def data(self):
"""The data associated with this CAN message."""
return self.msg.data
def send_message(
babydriver_id=None,
data=None,
channel=None,
msg_id=BABYDRIVER_CAN_MESSAGE_ID,
device_id=BABYDRIVER_DEVICE_ID,
):
"""Sends a CAN message.
Args:
babydriver_id: The babydriver ID (first byte of message data) of the message to send. If
None, the first byte of message data isn't overwritten.
data: The data to send in the CAN message. Must be a list of bytes (0-255). If babydriver_id
is None, this can be up to 8 bytes; otherwise, it can only be up to 7 bytes since the
first byte is the babydriver ID.
channel: The SocketCAN channel on which to send the message.
msg_id: The CAN message ID to use.
device_id: The device ID to use.
Raises:
can.CanError: If there was an error in transmitting the message.
"""
if data is None:
data = []
if babydriver_id is not None:
data = [babydriver_id] + data
if len(data) > 8:
raise ValueError("Only 8 bytes of data (including babydriver ID) may be sent")
if len(data) < 8 and msg_id == BABYDRIVER_CAN_MESSAGE_ID:
# pad to 8 bytes so that the firmware project will accept it
data += [0] * (8 - len(data))
data = bytearray(data)
bus = get_bus(channel)
msg = Message(
message_id=msg_id,
device_id=device_id,
data=data,
is_extended_id=False
)
bus.send(msg.msg)
def next_message(
babydriver_id=None,
channel=None,
timeout=1,
msg_id=BABYDRIVER_CAN_MESSAGE_ID,
):
"""Blocks until we receive a babydriver CAN message or we time out.
Args:
babydriver_id: A babydriver ID or list of IDs. If non-None and the received message's
babydriver ID (i.e. first byte of message data) isn't equal to this or an element of
this, raise an exception.
channel: The SocketCAN channel to send on (can0 or vcan0).
timeout: Timeout to wait for a message before raising an exception, in seconds.
msg_id: The CAN message ID or list of IDs to wait for, defaulting to the babydriver CAN
message. All other CAN messages will be ignored. If None, don't check the message ID
and return the first CAN message we see.
Returns:
A Message object representing the received CAN message.
Raises:
TimeoutError: if we time out waiting for an appropriate CAN message.
ValueError: if we receive a CAN message but its babydriver ID does not match.
"""
# make these iterable to support waiting on one or multiple message/babydriver IDs
if isinstance(babydriver_id, int):
babydriver_id = (babydriver_id,)
if isinstance(msg_id, int):
msg_id = (msg_id,)
bus = get_bus(channel)
time_left = timeout
current_time = time.time()
msg = None
while time_left > 0:
msg = bus.recv(timeout=time_left)
if msg is None:
# bus.recv timed out
break
msg = Message.from_msg(msg)
if msg_id is None or msg.message_id in msg_id:
break
# ignore messages that we aren't waiting for.
msg = None
new_time = time.time()
time_left -= new_time - current_time
current_time = new_time
if msg is None:
raise TimeoutError()
if babydriver_id is not None and (not msg.data or msg.data[0] not in babydriver_id):
raise ValueError("next_message expected babydriver ID {} but got {}".format(
babydriver_id,
msg.data[0] if msg.data else "empty message",
))
return msg
def can_pack(data_list):
"""
Converts list of tuples and combines them into an array
rendition. Each val is broken into individual byte values
and appended to bytearr output (LSB first)
Args:
List of tuples in form ((int) val, (int) len_in_bytes). val must be
in range [0, 2**len_in_bytes - 1]
Returns:
An array of byte values in little endian format, representing
message components input
Raises:
ValueError: if invalid values for val, len_in_bytes input
"""
bytearr = []
# Traverse list
for val, len_in_bytes in data_list:
# Error check input vals
if len_in_bytes < 1 or val < 0:
raise ValueError("len in bytes must be > 0; val must be non-negative")
if val >= 1 << (len_in_bytes * 8):
raise ValueError("Value {} exceeds allotted {} bytes. Max Val: {}".format(
val, len_in_bytes, 1 << (len_in_bytes * 8) - 1))
# Split val into bytes rendition, re-pack in little-endian
for _ in range(len_in_bytes):
int_out = val & 0xFF
bytearr.append(int_out)
val = val>>8
return bytearr
| [
37811,
1212,
8265,
23986,
257,
1877,
12,
5715,
29908,
625,
11361,
12,
44565,
338,
15628,
7824,
2176,
284,
14801,
26230,
526,
15931,
198,
198,
11748,
640,
198,
11748,
460,
198,
198,
6738,
3275,
62,
4299,
82,
1330,
347,
6242,
56,
7707,
... | 2.48795 | 2,863 |
#Author: Toms Bergmanis toms.bergmanis@gmail.com
#Usage example python3 get_stats.py 20-char-context-v1 test Latvian
import sys
from collections import defaultdict
model_name = sys.argv[1]
data_set = sys.argv[2] # either dev or test
for lang in sys.argv[3:]:
model=lang + "-" + model_name
train_inflections = []
train_inflections2lemmas = defaultdict(list)
dev_inflections = []
with open("models/{}/data/train-targets".format(model), "r") as t:
with open("models/{}/data/train-sources".format(model), "r") as s:
for line in s:
line_content = line.split("<lc>")[1].split("<rc>")
inflection = "".join(line_content[0].strip().split()).lower()
lemma = "".join(t.readline().strip().split()[1:-1]).lower()
train_inflections.append(inflection)
train_inflections2lemmas[inflection].append(lemma)
train_inflections = set(train_inflections)
correct_tokens = 0.0
total_number_of_tokens = 0.0
total_ambiguous_tokens = 1.0
correct_ambigous_tokens = 0.0
correct_unnseen_tokens = 0.0
total_unseen_tokens= 1.0
correct_seen_unambiguous_tokens = 0.0
total_seen_unambiguous_tokens = 1.0
with open("models/{}/data/{}-sources".format(model,data_set), "r") as i:
with open("models/{}/data/{}-targets".format(model,data_set), "r") as o:
with open("models/{}/best_model/{}-hypothesis".format(model,data_set), "r") as p:
for line in i:
try:
inflection = "".join(line.split("<lc>")[1].split("<rc>")[0].strip().split()).lower()
except:
inflection = "".join(line.split("<w>")[1].split("</w>")[0].strip().split()).lower()
lemma = "".join(o.readline().strip().split()[1:-1]).lower()
prediction = "".join(p.readline().strip().split()[1:-1]).lower()
if lemma == prediction:
correct_tokens += 1
total_number_of_tokens +=1
#ambiguous tokens
if len(set(train_inflections2lemmas[inflection])) > 1:
if prediction == lemma:
correct_ambigous_tokens+= 1
total_ambiguous_tokens += 1
#unseen tokens
elif not inflection in train_inflections:
if prediction == lemma:
correct_unnseen_tokens += 1.0
total_unseen_tokens+= 1
#seen unambiguous tokens
else:
if prediction == lemma:
correct_seen_unambiguous_tokens += 1.0
total_seen_unambiguous_tokens += 1
results = []
results.append(("{:.2f}%".format(100*float(correct_ambigous_tokens) / total_ambiguous_tokens)))
results.append(("{:.2f}%".format(100*float(correct_unnseen_tokens) / total_unseen_tokens )))
results.append(( "{:.2f}%".format(100*float(correct_seen_unambiguous_tokens) / total_seen_unambiguous_tokens)))
results.append(( "{:.2f}%".format(100*float(correct_tokens) / total_number_of_tokens)))
print(model, data_set, " ".join(results))
| [
2,
13838,
25,
309,
3150,
24626,
805,
271,
284,
907,
13,
3900,
805,
271,
31,
14816,
13,
785,
198,
2,
28350,
1672,
21015,
18,
651,
62,
34242,
13,
9078,
1160,
12,
10641,
12,
22866,
12,
85,
16,
1332,
5476,
85,
666,
220,
198,
11748,
... | 1.902059 | 1,797 |
import numpy as np
from ..Tools.Downloading._RebuildDataIndex import _RebuildDataIndex
from . import _Fields
def RebuildDataIndex(sc,Prod,L):
'''
Rebuilds the data index for a data product.
Inputs
======
sc : str
'a'|'b'|'c'|'d'|'e'
Prod: str
Product string (see below)
L : str or int
Level of data to download (0,1,2)
Available data products
=======================
Prod L Description
========================================================================
FIT 2 EFI/FGM Onboard Spin Fit Level 2 CDF
FIT 1 EFI/FGM Onboard Spin Fit Level 1 CDF
FIT 0 EFI/FGM Onboard Spin Fit Level 0 Packets
(Level 0 data might not work)
'''
idxfname = _Fields.idxfname.format(Prod,L,sc)
datapath = _Fields.datapath.format(Prod,L,sc)
vfmt = _Fields.vfmt
_RebuildDataIndex(datapath,idxfname,vfmt)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
11485,
33637,
13,
10002,
278,
13557,
28951,
3547,
6601,
15732,
1330,
4808,
28951,
3547,
6601,
15732,
198,
6738,
764,
1330,
4808,
15878,
82,
628,
198,
4299,
797,
11249,
6601,
15732,
7,
1416,
11,
... | 2.620253 | 316 |
# coding=utf-8
from pyramid.config import Configurator
from pyramid.security import remember
from pyramid_ldap3 import get_ldap_connector
from tracim_backend.config import CFG
from tracim_backend.extensions import hapic
from tracim_backend.lib.core.user import UserApi
from tracim_backend.lib.utils.request import TracimRequest
from tracim_backend.models.auth import AuthType
from tracim_backend.views.controllers import Controller
from tracim_backend.views.core_api.schemas import BasicAuthSchema
from tracim_backend.views.core_api.schemas import LoginOutputHeaders
from tracim_backend.views.core_api.schemas import NoContentSchema
from tracim_backend.views.core_api.schemas import UserSchema
from tracim_backend.views.swagger_generic_section import SWAGGER_TAG__AUTHENTICATION_ENDPOINTS
try: # Python 3.5+
from http import HTTPStatus
except ImportError:
from http import client as HTTPStatus
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
27944,
13,
11250,
1330,
17056,
333,
1352,
198,
6738,
27944,
13,
12961,
1330,
3505,
198,
6738,
27944,
62,
335,
499,
18,
1330,
651,
62,
335,
499,
62,
8443,
273,
198,
198,
6738,
491,
330,
320,
... | 3.071186 | 295 |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import uuid
import mock
import netaddr
from oslo.config import cfg
from oslo.db import exception as db_exc
from sqlalchemy import exc as sql_exc
import webob.exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as ntn_exc
import neutron.common.test_lib as test_lib
from neutron import context
from neutron.extensions import dvr
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import l3_ext_gw_mode
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as secgrp
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version as version_module
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import sync
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.dbexts import db as nsx_db
from neutron.plugins.vmware import nsxlib
from neutron.tests.unit import _test_extension_portbindings as test_bindings
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
import neutron.tests.unit.test_extension_security_group as ext_sg
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware.apiclient import fake
LOG = log.getLogger(__name__)
class TestL3SecGrpExtensionManager(TestL3ExtensionManager):
"""A fake extension manager for L3 and Security Group extensions.
Includes also NSX specific L3 attributes.
"""
def backup_l3_attribute_map():
"""Return a backup of the original l3 attribute map."""
return dict((res, attrs.copy()) for
(res, attrs) in l3.RESOURCE_ATTRIBUTE_MAP.iteritems())
def restore_l3_attribute_map(map_to_restore):
"""Ensure changes made by fake ext mgrs are reverted."""
l3.RESOURCE_ATTRIBUTE_MAP = map_to_restore
| [
2,
15069,
357,
66,
8,
2321,
4946,
25896,
5693,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 3.380499 | 841 |
#!/usr/bin/env python3
############################################################
## Jose F. Sanchez, Marta Lopez & Lauro Sumoy ##
## Copyright (C) 2019-2021 Lauro Sumoy Lab, IGTP, Spain ##
############################################################
from HCGB.functions import aesthetics_functions
"""
This module downloads data for genome annotation, miRNA, tRNA and piRNA analysis:
"""
## import useful modules
import os
import sys
import re
import time
from io import open
import shutil
import concurrent.futures
import pandas as pd
from termcolor import colored
## import my modules
from HCGB import sampleParser
from HCGB import functions
from XICRA.config import set_config
from XICRA.modules import help_XICRA
from XICRA.scripts import generate_DE
from XICRA.scripts import MINTMap_caller
##############################################
##############################################
##############################################
##############################################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
29113,
14468,
7804,
4242,
198,
2235,
5264,
376,
13,
21909,
11,
3981,
64,
22593,
1222,
40014,
305,
5060,
726,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22492,
19... | 3.864662 | 266 |
__author__ = 'Jonas Jaeger'
from os.path import join
import os
from datetime import datetime
import json
import shutil
import zipfile
import lost
import fsspec
import numpy as np
import cv2
import ast
from lost.logic.crypt import decrypt_fs_connection
#import ptvsd
DATA_ROOT_PATH = ""
MEDIA_ROOT_PATH = DATA_ROOT_PATH + "media/"
# MEDIA_UPLOAD_PATH = MEDIA_ROOT_PATH + "uploads/"
# MEDIA_CHUNK_PATH = MEDIA_ROOT_PATH + ".chunks/"
# SCRIPT_ROOT_PATH = DATA_ROOT_PATH + "script/"
PIPE_ROOT_PATH = DATA_ROOT_PATH + "pipes/"
INSTANCE_ROOT_PATH = DATA_ROOT_PATH + "instance/"
DEBUG_ROOT_PATH = DATA_ROOT_PATH + "debug/"
PACKED_PIPE_ROOT_PATH = DATA_ROOT_PATH + "packed_pipes/"
SIA_HISTORY_PATH = DATA_ROOT_PATH + "sia_history/"
SIA_HISTORY_BACKUP_PATH = DATA_ROOT_PATH + "sia_history/backup/"
PIPE_LOG_PATH = DATA_ROOT_PATH + "logs/pipes/"
APP_LOG_PATH = DATA_ROOT_PATH + "logs/"
# MIA_CROP_PATH = DATA_ROOT_PATH + "mia_crops/"
# JUPYTER_NOTEBOOK_OUTPUT_PATH = DATA_ROOT_PATH + "notebooks/jupyter_output.txt"
# MY_DATA_PATH = "my_data/"
def unzipdir(src, dst):
'''Unzip archive that contains a directory structure.
Args:
src: Path to zip file.
dst: Path to store extracted directory.
'''
archive = zipfile.ZipFile(src)
for file in archive.namelist():
archive.extract(file, dst)
def zipdir(src, dst):
'''Zip a directory
Args:
src: The directory to zip.
dst: Path to store the created zip file.
'''
dst_path = os.path.abspath(dst)
oldwd = os.getcwd()
os.chdir(src)
zipf = zipfile.ZipFile(dst_path, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk('.'):
for f in files:
zipf.write(os.path.join(root, f))
zipf.close()
os.chdir(oldwd)
def validate_action(db_man, path):
''' validates if move, edit or delete of a file or directory is allowed.
'''
for ds in db_man.get_all_datasources():
if ds.raw_file_path in path:
return False
return True
| [
834,
9800,
834,
796,
705,
18219,
292,
34521,
1362,
6,
198,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
11748,
4423,
346,
198,
11748,
19974,
7753,
198,
11748... | 2.348131 | 856 |
from __future__ import annotations
import asyncio
import logging
from typing import List, Optional, Union
import confluent_kafka
from confluent_avro import SchemaRegistry
from kafka_streamer.client import AsyncKafkaConsumer, AsyncKafkaProducer
from kafka_streamer.models import SchematicRecord, Serializable
from kafka_streamer.topic import RegexTopic, SingleTopic
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
11748,
18931,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
198,
198,
11748,
1013,
28216,
62,
74,
1878,
4914,
198,
6738,
1013,
28216,
62,
615,
305,
1330,
10011... | 3.514286 | 105 |
# Generated by Django 2.0.13 on 2020-01-29 20:09
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
1485,
319,
12131,
12,
486,
12,
1959,
1160,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.840909 | 44 |
"""Build, test, convert and upload a conda package.
For the upload step to work you have to log into your anaconda.org account
before you run the script. The steps for this are explained here:
https://conda.io/docs/user-guide/tutorials/build-pkgs.html
"""
from conda_build.api import build, convert
from os.path import split, join
from subprocess import run
if __name__ == '__main__':
platforms = ['osx-64', 'linux-32', 'linux-64', 'win-32', 'win-64']
built_packages = build('.', need_source_download=False)
converted_packages = []
for path in built_packages:
helper, package_name = split(path)
out_root, os = split(helper)
pfs = [pf for pf in platforms if pf != os]
convert(path, output_dir=out_root, platforms=pfs)
print('\n{} was converted to the following platforms: {}\n'.format(
package_name, pfs))
for pf in pfs:
converted_packages.append(join(out_root, pf, package_name))
all_packages = built_packages + converted_packages
for package in all_packages:
_, package_name = split(package)
run(['anaconda', 'upload', package])
print('\n{} was uploaded to anaconda.org'.format(package_name))
| [
37811,
15580,
11,
1332,
11,
10385,
290,
9516,
257,
1779,
64,
5301,
13,
198,
198,
1890,
262,
9516,
2239,
284,
670,
345,
423,
284,
2604,
656,
534,
281,
330,
13533,
13,
2398,
1848,
198,
19052,
345,
1057,
262,
4226,
13,
383,
4831,
329,
... | 2.65368 | 462 |
from setuptools import (
setup,
find_packages,
)
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md")) as rdme:
with open(path.join(here, "CHANGELOG.md")) as chlog:
readme = rdme.read()
changes = chlog.read()
long_description = readme + "\nCHANGELOG\n--------------------------------------\n" + changes
setup(
name="py_types",
version="0.1.1a",
description="Gradual typing for python 3.",
long_description=long_description,
url="https://github.com/zekna/py-types",
author="Zach Nelson",
author_email="kzacharynelson@gmail.com",
license="MIT",
classifiers=[
"Develpoment Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
keywords="type checking development schema",
packages=find_packages(exclude=["tests*"]),
install_requires=[],
extras_require={},
package_data={},
data_files=[],
entry_points={},
test_suite='nose2.collector.collector'
)
| [
6738,
900,
37623,
10141,
1330,
357,
198,
220,
220,
220,
9058,
11,
198,
220,
220,
220,
1064,
62,
43789,
11,
198,
8,
198,
6738,
28686,
1330,
3108,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753... | 2.570565 | 496 |
# Copyright (c) 2021, Ethan Henderson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import os
from nusex import TEMPLATE_DIR, Template
from nusex.errors import DeploymentError, DoesNotExist
from nusex.helpers import cprint
log = logging.getLogger(__name__)
| [
2,
15069,
357,
66,
8,
33448,
11,
28926,
22016,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
1708,
3403,
... | 3.524096 | 498 |
from datetime import datetime
import re
from typing import List
from openpyxl.workbook import Workbook
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.worksheet.page import PageMargins
from openpyxl.styles import Font, PatternFill, Alignment, Border, Side
from wrex.extraction.pub_extract import PubExtract
from wrex.meeting.meeting_section import MeetingSection
from wrex.meeting.section_kind import SectionKind
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
302,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
1280,
9078,
87,
75,
13,
1818,
2070,
1330,
5521,
2070,
198,
6738,
1280,
9078,
87,
75,
13,
5225,
25473,
13,
5225,
25473,
1330,
10933,
254... | 3.491935 | 124 |
from django.db import models
class Hash(models.Model):
"""
Hash Model
:var field text: Text field
:var field hash: Char field
"""
text = models.TextField()
hash = models.CharField(max_length=64)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
4871,
21059,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
21059,
9104,
628,
220,
220,
220,
1058,
7785,
2214,
2420,
25,
8255,
2214,
198,
220,
220,
220,
... | 2.73494 | 83 |
# Copyright (c) 2018-2020, Vanessa Sochat All rights reserved.
# See the LICENSE in the main repository at:
# https://www.github.com/openbases/openbases-python
from openbases.logger import bot
from openbases.utils import read_frontmatter
import os
import re
import sys
class Author:
'''an Author holds a name, orcid id, and affiliation'''
| [
2,
15069,
357,
66,
8,
2864,
12,
42334,
11,
42100,
1406,
17006,
1439,
2489,
10395,
13,
198,
2,
4091,
262,
38559,
24290,
287,
262,
1388,
16099,
379,
25,
198,
2,
220,
220,
220,
3740,
1378,
2503,
13,
12567,
13,
785,
14,
9654,
65,
1386... | 3.231481 | 108 |
stack = Stack()
stack.push(4)
stack.push(6)
stack.push(2)
print(stack.get_min_value())
stack.push(1)
print(stack.get_min_value())
| [
198,
25558,
796,
23881,
3419,
198,
25558,
13,
14689,
7,
19,
8,
198,
25558,
13,
14689,
7,
21,
8,
198,
25558,
13,
14689,
7,
17,
8,
198,
4798,
7,
25558,
13,
1136,
62,
1084,
62,
8367,
28955,
198,
25558,
13,
14689,
7,
16,
8,
198,
4... | 2.339286 | 56 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `lui_gui` package."""
import os
import random
import pathlib
from unittest import TestCase
import mock
from luigi.mock import MockTarget
from luigi import LocalTarget
from luigi.contrib.opener import OpenerTarget, NoOpenerError
class LuigiTestExternalTasks(TestCase):
""" Test Luigi function for external Mock Connections
Code Source Referenced: https://github.com/spotify/luigi/blob/master/test/contrib/opener_test.py
"""
def test_invalid_target(self):
'''Verify invalid types raises NoOpenerError
'''
self.assertRaises(NoOpenerError, OpenerTarget, 'foo://bar.txt')
@mock.patch('luigi.file.LocalTarget.__init__')
@mock.patch('luigi.file.LocalTarget.__del__')
def test_local_tmp_target(self, lt_del_patch, lt_init_patch):
'''Verify local target url with query string
'''
lt_init_patch.return_value = None
lt_del_patch.return_value = None
local_file = "file://{}?is_tmp".format(self.local_file)
OpenerTarget(local_file)
lt_init_patch.assert_called_with(self.local_file, is_tmp=True)
@mock.patch('luigi.contrib.s3.S3Target.__init__')
def test_s3_parse(self, s3_init_patch):
'''Verify basic s3 target url
'''
s3_init_patch.return_value = None
local_file = "s3://zefr/foo/bar.txt"
OpenerTarget(local_file)
s3_init_patch.assert_called_with("s3://zefr/foo/bar.txt")
@mock.patch('luigi.contrib.s3.S3Target.__init__')
def test_s3_parse_param(self, s3_init_patch):
'''Verify s3 target url with params
'''
s3_init_patch.return_value = None
local_file = "s3://zefr/foo/bar.txt?foo=hello&bar=true"
OpenerTarget(local_file)
s3_init_patch.assert_called_with("s3://zefr/foo/bar.txt",
foo='hello',
bar='true')
class LuigiLocalTargetTest(TestCase):
""" Test Luigi function processing of Local Targets
Code Source Referenced: https://github.com/spotify/luigi/blob/master/test/local_target_test.py
"""
PATH_PREFIX = '/tmp/test.txt'
class no_leaked_secrets(TestCase):
"""
Test cases to verify no secret variables were released
"""
def test_verify_no_dotenv(self):
"""
Verify no dotenv was leaked in repo
"""
assert not pathlib.Path(os.getcwd() + "/.env").exists() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
51,
3558,
329,
4600,
2290,
72,
62,
48317,
63,
5301,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
4738,
... | 2.255223 | 1,101 |
import rlp
from ethereum.utils import sha3, encode_hex
from ethereum import trie
def get_merkle_proof(db, root, value):
"""Get the merkle proof of a given value in trie
value must exist in trie or exception will be thrown
returns a list of nodes starting from root to leaf node
"""
assert db and root and value
key = sha3(value)
return trie._get_branch(db, root, trie.encode_bin(key))
def verify_merkle_proof(branch, root, key, value):
"""Verify if a given value exist in trie
returns true or false
"""
assert branch and root and key
return trie._verify_branch(branch, root, trie.encode_bin(key), value)
def store_merkle_branch_nodes(db, branch):
"""Store the nodes of the merkle branch into db
"""
nodes = [branch[-1]]
trie.hash_and_save(db, nodes[0])
for data in branch[-2::-1]:
marker, node = data[0], data[1:]
if marker == 1:
node = trie.decode_bin_path(node)
nodes.insert(0, trie.encode_kv_node(node, sha3(nodes[0])))
elif marker == 2:
nodes.insert(0, trie.encode_branch_node(sha3(nodes[0]), node))
elif marker == 3:
nodes.insert(0, trie.encode_branch_node(node, sha3(nodes[0])))
else:
raise Exception("Corrupted branch")
trie.hash_and_save(db, nodes[0])
def mk_tx_bundle(state, tx, state_root):
"""Generate transaction bundle for transaction which includes:
1. tx data
2. list of merkle proof of each account in read/write list
3. list of {sha3(code): code} pair
"""
from ethereum.state import Account
from ethereum.transactions import Transaction
tx_bundle = {"tx_rlpdata": rlp.encode(tx, Transaction)}
code_set = set()
account_proof_list = []
for acct in tx.read_write_union_list:
acct_proof = get_merkle_proof(state.trie.db, state_root, acct)
acct_rlp = acct_proof[-1]
code = rlp.decode(acct_rlp, Account, env=state.env, address=acct).code
if code:
code_set.add(code)
account_proof_list.append({acct: acct_proof})
tx_bundle["account_proof_list"] = account_proof_list
code_list = []
for code in code_set:
code_list.append({sha3(code): code})
tx_bundle["code_list"] = code_list
return tx_bundle
| [
11748,
374,
34431,
198,
198,
6738,
304,
17733,
13,
26791,
1330,
427,
64,
18,
11,
37773,
62,
33095,
198,
6738,
304,
17733,
1330,
1333,
68,
628,
198,
4299,
651,
62,
647,
74,
293,
62,
13288,
7,
9945,
11,
6808,
11,
1988,
2599,
198,
22... | 2.325651 | 998 |
/usr/local/Cellar/opencv/2.4.12_2/lib/python2.7/site-packages/cv.py | [
14,
14629,
14,
12001,
14,
34,
14203,
14,
9654,
33967,
14,
17,
13,
19,
13,
1065,
62,
17,
14,
8019,
14,
29412,
17,
13,
22,
14,
15654,
12,
43789,
14,
33967,
13,
9078
] | 2.030303 | 33 |
"""Helper kytos-challenge functions."""
# System imports
from struct import *
# Local imports
from packet import *
def unpack_header(bytes):
"""Unpack packet header content."""
header = unpack('>BBHI', bytes)
return header[0], header[1], header[2], header[3]
def unpack_packet(packet_name):
"""Unpack packet content."""
f = open(packet_name, 'rb')
version, type, length, xid = unpack_header(f.read(8))
header = Header(version, type, length, xid)
packet = Packet(header)
return packet
| [
37811,
47429,
479,
20760,
418,
12,
36747,
3540,
5499,
526,
15931,
198,
198,
2,
4482,
17944,
198,
6738,
2878,
1330,
1635,
198,
198,
2,
10714,
17944,
198,
6738,
19638,
1330,
1635,
198,
198,
4299,
555,
8002,
62,
25677,
7,
33661,
2599,
19... | 2.772487 | 189 |
"""empty message
Revision ID: 4727c742f8e5
Revises: 2923a924be67
Create Date: 2019-12-02 21:24:09.809578
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '4727c742f8e5'
down_revision = '2923a924be67'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
6298,
1983,
66,
22,
3682,
69,
23,
68,
20,
198,
18009,
2696,
25,
2808,
1954,
64,
24,
1731,
1350,
3134,
198,
16447,
7536,
25,
13130,
12,
1065,
12,
2999,
2310,
25,
1731,
25,
2931,
... | 2.526316 | 133 |
from sympy.combinatorics.graycode import (
GrayCode,
bin_to_gray,
random_bitstring,
get_subset_from_bitstring,
graycode_subsets,
gray_to_bin,
)
from sympy.testing.pytest import raises
| [
6738,
10558,
88,
13,
785,
8800,
1352,
873,
13,
44605,
8189,
1330,
357,
198,
220,
220,
220,
12723,
10669,
11,
198,
220,
220,
220,
9874,
62,
1462,
62,
44605,
11,
198,
220,
220,
220,
4738,
62,
2545,
8841,
11,
198,
220,
220,
220,
651,... | 2.386364 | 88 |
#!/usr/bin/env python
"""
Build and display orderbooks from a given pcap file
"""
import sys
import os.path
import gzip
import dpkt
import binascii
from mdp.secdef import SecDef
from mdp.orderbook import PacketProcessor
from mdp.orderbook import ConsolePrinter
from sbedecoder import MDPSchema
from sbedecoder import MDPMessageFactory
from sbedecoder import SBEParser
if __name__ == '__main__':
status = main()
sys.exit(status)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
15580,
290,
3359,
1502,
12106,
422,
257,
1813,
279,
11128,
2393,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
28686,
13,
6978,
198,
11748,
308,
13344,
198,
198,
11748... | 2.973333 | 150 |
# Overcommented for explanatory reasons
# Load the library that handles connecting to the internet
import requests
# Ask the user for the search term
query = input('What do you want to search?')
# Performing the search
searchres = requests.get('https://www.google.com/search?q='+ query)
# Make sure the request works (check it)
searchres.raise_for_status()
| [
2,
3827,
785,
12061,
329,
44742,
3840,
198,
2,
8778,
262,
5888,
326,
17105,
14320,
284,
262,
5230,
198,
11748,
7007,
198,
198,
2,
16981,
262,
2836,
329,
262,
2989,
3381,
220,
198,
22766,
796,
5128,
10786,
2061,
466,
345,
765,
284,
2... | 3.656566 | 99 |
"""
s_scatter3d
"""
| [
37811,
198,
82,
62,
1416,
1436,
18,
67,
198,
37811,
628
] | 1.909091 | 11 |
import sys
import os
simulator = sys.platform != "pyboard"
# to overwrite these settings create a config.py file
if simulator:
storage_root = "./fs"
try:
os.mkdir(storage_root)
except:
pass
else:
storage_root = ""
# pin that triggers QR code
# if command mode failed
QRSCANNER_TRIGGER = "D2"
| [
11748,
25064,
198,
11748,
28686,
198,
198,
14323,
8927,
796,
25064,
13,
24254,
14512,
366,
9078,
3526,
1,
198,
198,
2,
284,
49312,
777,
6460,
2251,
257,
4566,
13,
9078,
2393,
198,
198,
361,
35375,
25,
198,
220,
220,
220,
6143,
62,
1... | 2.550388 | 129 |
import numpy
def principal_axis(alpha_carbons):
"""
Calculate principal inertia axis for the structure along with its geometrical center
---
Parameters:
alpha_carbons: alpha carbons of the structure
---
Return:
center: geometrical center of the structure
axis_direction: direction of the axis
"""
# alpha carbons coordinates as a numpy array
coord = numpy.array(alpha_carbons, float)
# get geometrical center
center = numpy.mean(coord, 0)
coord = coord - center
# create inertia matrix and extract eigenvectors and values
inertia = numpy.dot(coord.transpose(), coord)
e_values, e_vectors = numpy.linalg.eig(inertia)
# sort eigenvalues
order = numpy.argsort(e_values)
# axis1 is the principal axis with the greatest eigenvalue
_, _, axis1 = e_vectors[:, order].transpose()
axis_direction = axis1 / numpy.linalg.norm(axis1)
return center, axis_direction
| [
11748,
299,
32152,
628,
198,
4299,
10033,
62,
22704,
7,
26591,
62,
7718,
23461,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
27131,
378,
10033,
48482,
16488,
329,
262,
4645,
1863,
351,
663,
4903,
908,
8143,
3641,
628,
220,
220... | 2.900302 | 331 |
"""empty message
Revision ID: e11c61ce67e7
Revises: 5db306adc6cc
Create Date: 2018-04-17 08:27:07.852716
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e11c61ce67e7'
down_revision = '5db306adc6cc'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
304,
1157,
66,
5333,
344,
3134,
68,
22,
198,
18009,
2696,
25,
642,
9945,
20548,
324,
66,
21,
535,
198,
16447,
7536,
25,
2864,
12,
3023,
12,
1558,
8487,
25,
1983,
25,
2998,
13,
5... | 2.544776 | 134 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from starchart.ml import contexts, jobs
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
39515,
433,
13,
4029,
1330,
26307,
11,
3946,
198
] | 2.558824 | 34 |
# -*- coding: utf-8 -*-
#*************************************************************
# Copyright (c) 2003-2012, Emerging Threats
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of the nor the names of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#*************************************************************
from IDSUtils import *
from IDSLogging import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
17174,
8412,
4557,
35625,
198,
2,
220,
15069,
357,
66,
8,
5816,
12,
6999,
11,
48297,
25238,
82,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
220,
220,
198,
... | 3.590164 | 488 |
"""Main interface to other modules
"""
import argparse
# multivariate
from mise.dl.dt_xgboost import dl_xgboost
from mise.dl.mlp_mul_ms import dl_mlp_mul_ms
from mise.dl.mlp_mul_ms_mccr import dl_mlp_mul_ms_mccr
from mise.dl.mlp_mul_transformer import dl_mlp_mul_transformer
from mise.dl.mlp_mul_transformer_mccr import dl_mlp_mul_transformer_mccr
# machine learning models
# univariate
from mise.dl.mlp_uni_ms import dl_mlp_uni_ms
from mise.dl.mlp_uni_ms_mccr import dl_mlp_uni_ms_mccr
from mise.dl.rnn_mul_lstnet_skip import dl_rnn_mul_lstnet_skip
from mise.dl.rnn_mul_lstnet_skip_mccr import dl_rnn_mul_lstnet_skip_mccr
from mise.dl.rnn_uni_attn import dl_rnn_uni_attn
from mise.dl.rnn_uni_attn_mccr import dl_rnn_uni_attn_mccr
from mise.stats.analysis import stats_analysis
# statistical models
from mise.stats.ARIMA import stats_arima
from mise.stats.impute import stats_imputation_stats
from mise.stats.OU import stats_ou
from mise.stats.preprocess import stats_parse, stats_preprocess
def compute_stats(_args):
"""
stats(_args)
Run statistical models
"""
sims = _args["stats"]
if len(sims) == 0:
# specify all simulation name
# sims =
pass
print("STAT SIMS: ", sims)
funcs = ["stats_" + sim for sim in sims]
for f in funcs:
globals()[f]()
def compute_dl(_args):
"""
dl(_args)
Run deep learning models
"""
sims = _args["dl"]
if len(sims) == 0:
# specify all simulation name
# sims =
pass
print("DL SIMS: ", sims)
funcs = ["dl_" + sim for sim in sims]
for f in funcs:
globals()[f]()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--stats", nargs="*", help="statistics simulations")
parser.add_argument("-d", "--dl", nargs="*", help="deep learning simulations")
args = vars(parser.parse_args())
# statistical models
if args["stats"] is not None:
compute_stats(args)
# machine learning
if args["dl"] is not None:
compute_dl(args)
| [
37811,
13383,
7071,
284,
584,
13103,
198,
37811,
198,
11748,
1822,
29572,
198,
198,
2,
1963,
42524,
198,
6738,
285,
786,
13,
25404,
13,
28664,
62,
87,
70,
39521,
1330,
288,
75,
62,
87,
70,
39521,
198,
6738,
285,
786,
13,
25404,
13,
... | 2.296703 | 910 |
import requests
import unittest
from cmc_api.common.api import Api
if __name__ == '__main__':
unittest.main() | [
11748,
7007,
198,
11748,
555,
715,
395,
198,
6738,
12067,
66,
62,
15042,
13,
11321,
13,
15042,
1330,
5949,
72,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
34... | 2.697674 | 43 |
import sys
import random
import os.path as osp
import time
import tqdm
import torch
import torch.nn.functional as F
from torch import tensor
from torch.optim import Adam
from sklearn.model_selection import StratifiedKFold
from torch_geometric.data import DataLoader, DenseDataLoader as DenseLoader
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
file_name=osp.join('/kaggle/working',str(random.randrange(sys.maxsize))+'.txt')
# with open(file_name,'w') as f: #clear the output file
# pass
loss, acc, duration = tensor(val_losses), tensor(accs), tensor(durations)
loss, acc = loss.view(-1, epochs), acc.view(-1, epochs)
loss, argmin = loss.min(dim=1)
acc = acc[torch.arange(acc_folds, dtype=torch.long), argmin]
loss_mean = loss.mean().item()
acc_mean = acc.mean().item()
acc_std = acc.std().item()
duration_mean = duration.mean().item()
print('Val Loss: {:.4f}, Test Accuracy: {:.3f} ± {:.3f}, Duration: {:.3f}'.
format(loss_mean, acc_mean, acc_std, duration_mean))
with open(file_name,'a') as f:
f.write('num_layers: {}, hidden: {}, Val Loss: {:.4f}, Test Accuracy: {:.3f} ± {:.3f}, Duration: {:.3f} \n'.
format(model.num_layers, model.hidden, loss_mean, acc_mean, acc_std, duration_mean))
return loss_mean, acc_mean, acc_std
def k_fold(dataset, folds):
skf = StratifiedKFold(folds, shuffle=True, random_state=12345)
test_indices, train_indices = [], []
for _, idx in skf.split(torch.zeros(len(dataset)), dataset.data.y):
test_indices.append(torch.from_numpy(idx))
val_indices = [test_indices[i - 1] for i in range(folds)]
for i in range(folds):
train_mask = torch.ones(len(dataset), dtype=torch.uint8)
train_mask[test_indices[i]] = 0
train_mask[val_indices[i]] = 0
train_indices.append(train_mask.nonzero().view(-1))
return train_indices, test_indices, val_indices
def num_graphs(data):
if data.batch is not None:
return data.num_graphs
else:
return data.x.size(0)
def train(model, optimizer, loader):
model.train()
total_loss = 0
for data in tqdm.tqdm(loader):
optimizer.zero_grad()
data = data.to(device)
out = model(data)
loss = F.nll_loss(out, data.y.view(-1))
loss.backward()
total_loss += loss.item() * num_graphs(data)
optimizer.step()
return total_loss / len(loader.dataset)
def eval_acc(model, loader):
model.eval()
correct = 0
for data in tqdm.tqdm(loader):
data = data.to(device)
with torch.no_grad():
pred = model(data).max(1)[1]
correct += pred.eq(data.y.view(-1)).sum().item()
return correct / len(loader.dataset)
def eval_loss(model, loader):
model.eval()
loss = 0
for data in tqdm.tqdm(loader):
data = data.to(device)
with torch.no_grad():
out = model(data)
loss += F.nll_loss(out, data.y.view(-1), reduction='sum').item()
return loss / len(loader.dataset)
def eval_loss_acc(model,loader):
model.eval()
loss = 0
correct=0
for data in tqdm.tqdm(loader):
data=data.to(device)
with torch.no_grad():
out=model(data)
pred=out.max(1)[1]
loss+=F.nll_loss(out,data.y.view(-1),reduction='sum').item()
correct+=pred.eq(data.y.view(-1)).sum().item()
return loss/len(loader.dataset), correct/len(loader.dataset) | [
11748,
25064,
198,
11748,
4738,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
640,
198,
198,
11748,
256,
80,
36020,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
1330,
11192,
273,
198... | 2.293149 | 1,518 |
from python.classes import Solution
solution = Solution('inputs/inputs_02.json', first_solution, second_solution)
| [
6738,
21015,
13,
37724,
1330,
28186,
628,
628,
198,
82,
2122,
796,
28186,
10786,
15414,
82,
14,
15414,
82,
62,
2999,
13,
17752,
3256,
717,
62,
82,
2122,
11,
1218,
62,
82,
2122,
8,
628
] | 3.4 | 35 |
from ecmwf.opendata import Client
| [
6738,
9940,
76,
86,
69,
13,
404,
437,
1045,
1330,
20985,
628
] | 2.916667 | 12 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module contains tests for some of the tests/util code.
from tests.util.filesystem_utils import prepend_with_fs
from tests.util.parse_util import get_bytes_summary_stats_counter
def test_get_bytes_summary_stats_counter():
"""Test get_bytes_summary_stats_counter(counter_name, runtime_profile) using a dummy
runtime profile.
"""
runtime_profile = "- ExampleCounter: (Avg: 8.00 KB (8192) ; " \
"Min: 6.00 KB (6144) ; " \
"Max: 10.00 KB (10240) ; " \
"Number of samples: 4)"
summary_stats = get_bytes_summary_stats_counter("ExampleCounter",
runtime_profile)
assert len(summary_stats) == 1
assert summary_stats[0].sum == 32768 and summary_stats[0].min_value == 6144 and \
summary_stats[0].max_value == 10240 and summary_stats[0].total_num_values == 4
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 2.982238 | 563 |
# TC008 - My blog post delete
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import time
opt = Options()
opt.headless = False
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opt)
driver.set_window_size(1000, 600, 600)
# Load page
driver.get("http://localhost:1667/")
time.sleep(3)
# Enter the data to be uploaded
email = 'testuser1@example.com'
username = 'testuser1'
pwd = 'Abcd123$'
# Fields xpath
email_x = '//*[@id="app"]/div/div/div/div/form/fieldset[1]/input'
pwd_x = '//*[@id="app"]/div/div/div/div/form/fieldset[2]/input'
username_x = '//*[@id="app"]/nav/div/ul/li[4]/a'
sign_button_x = '//*[@id="app"]/nav/div/ul/li[2]/a'
sign_in_btn_x = '//*[@id="app"]/div/div/div/div/form/button'
mytitle_btn_x = '//*[@id="app"]/div/div[2]/div/div/div[1]/ul/li[1]/a'
posttilte_x = '//*[@id="app"]/div/div[2]/div/div/div[2]/div/div/div[1]/a/h1'
delete_btn_x = '//*[@id="app"]/div/div[1]/div/div/span/button/span'
article_preview = '//*[@class="article-preview"]'
# Driver find
try:
# Sign in
sign_in(email, pwd)
time.sleep(2)
# Post find
find(username_x).click() # username click
time.sleep(2)
find(mytitle_btn_x).click() # my title click
time.sleep(2)
article_number = driver.find_elements_by_xpath(article_preview)
print(len(article_number))
original_num = int(len(article_number))
# Post delete
delete()
print(delete)
time.sleep(2)
# Control
article_number = driver.find_elements_by_xpath(article_preview)
print(len(article_number))
new_num = int(len(article_number))
print(new_num)
assert new_num < original_num
finally:
driver.close()
| [
2,
17283,
25257,
532,
2011,
4130,
1281,
12233,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
3992,
26230,
62,
37153,
13,
46659,
1330,
13282,
32103,
13511,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
... | 2.352075 | 747 |
from requests.utils import quote
urlString1 = "NIFTY 25"
urlString2 = 'NIFTY A\B\C\D'
urlString3 = '22/01/2014'
# Using urls library
print quote(urlString1, safe='')
print quote(urlString2, safe='')
print quote(urlString3, safe='')
| [
6738,
7007,
13,
26791,
1330,
9577,
198,
198,
6371,
10100,
16,
796,
366,
45,
5064,
9936,
1679,
1,
198,
6371,
10100,
17,
796,
705,
45,
5064,
9936,
317,
59,
33,
59,
34,
59,
35,
6,
198,
6371,
10100,
18,
796,
705,
1828,
14,
486,
14,
... | 2.571429 | 91 |
# Copyright (c) 2017, Brandon Jones.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sublime
import sublime_plugin
g_counter = 0
g_playing = False
settings = sublime.load_settings("semilive.sublime-settings")
| [
2,
15069,
357,
66,
8,
2177,
11,
14328,
5437,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
25423,
12340... | 3.798762 | 323 |
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.views.generic.base import TemplateView, View
from kolibri.content.models import ChannelMetadata, ContentNode
| [
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
8692,
1330,
37350,
7680... | 3.539683 | 63 |
import tensorflow as tf
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
save_weights_only=False,
save_best_only=False)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir,histogram_freq=1)
history = model.fit(
train_x, train_y,
batch_size=batch_size,
epochs=maxperiod,
#steps_per_epoch=10,
#validation_steps=10,
validation_data=(val_x, val_y),
validation_freq=1,
callbacks=[cp_callback,tensorboard]) | [
198,
11748,
11192,
273,
11125,
355,
48700,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
31396,
62,
47423,
796,
48700,
13,
6122,
292,
13,
13345,
10146,
13,
17633,
9787,
4122,
7,
7753,
6978,
28,... | 1.569106 | 492 |
import psdaq.configdb.configdb as cdb
import sys
import IPython
import numpy as np
import argparse
from psdaq.configdb.opaltt_config_store import opaltt_cdict
# Copy values and shape from config dict into cdict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Update weights and/or calib polynomial constants')
parser.add_argument('--weights', help='space-delimited file of weights', default='')
parser.add_argument('--calib', help='space-delimited file of coefficients', default='')
parser.add_argument('--dev', help='use development db', action='store_true')
parser.add_argument('--inst', help='instrument', type=str, default='tmo')
parser.add_argument('--alias', help='alias name', type=str, default='BEAM')
parser.add_argument('--name', help='detector name', type=str, default='tmoopal2')
parser.add_argument('--segm', help='detector segment', type=int, default=0)
parser.add_argument('--id', help='device id/serial num', type=str, default='serial1234')
parser.add_argument('--user', help='user for HTTP authentication', type=str, default='tstopr')
parser.add_argument('--password', help='password for HTTP authentication', type=str, default='pcds')
args = parser.parse_args()
weights = np.loadtxt(args.weights)
calib = np.loadtxt(args.calib)
dbname = 'configDB' #this is the name of the database running on the server. Only client care about this name.
detname = f'{args.name}_{args.segm}'
db = 'devconfigdb' if args.dev else 'configdb'
url = f'https://pswww.slac.stanford.edu/ws-auth/{db}/ws/'
create = False
mycdb = cdb.configdb(url, args.inst, create,
root=dbname, user=args.user, password=args.password)
cfg = mycdb.get_configuration(args.alias,detname)
if cfg is None: raise ValueError('Config for instrument/detname %s/%s not found. dbase url: %s, db_name: %s, config_style: %s'%(args.inst,detname,url,dbname,args.alias))
top = opaltt_cdict()
# Need our own function to copy into top
copyValues(cfg,top)
if len(weights.shape)==1:
if weights.shape[0]>0:
print(f'Storing weights of length {weights.shape[0]}')
top.set('fex.fir_weights', weights, 'DOUBLE', override=True)
else:
print('Weights not updated')
else:
raise ValueError('dimension of weights {} is > 1'.format(len(weights.shape)))
if len(calib.shape)==1:
if calib.shape[0]>0:
print(f'Storing calib of length {calib.shape[0]}')
top.set('fex.calib_poly', calib, 'DOUBLE', override=True)
else:
print('Calib not updated')
else:
raise ValueError('dimension of calib {} is > 1'.format(len(calib.shape)))
top.setInfo('opal', args.name, args.segm, args.id, 'No comment')
mycdb.modify_device(args.alias, top)
| [
11748,
26692,
48539,
13,
11250,
9945,
13,
11250,
9945,
355,
269,
9945,
198,
11748,
25064,
198,
11748,
6101,
7535,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
6738,
26692,
48539,
13,
11250,
9945,
13,
33067,
926,
62,
... | 2.598921 | 1,112 |
from asyncio import sleep, get_event_loop
from contextvars import copy_context
from functools import wraps, partial
from itertools import count
from random import randint
from typing import Tuple, Callable, TypeVar, Awaitable
T = TypeVar("T")
__all__ = ["cached", "retry", "run_in_executor"]
| [
6738,
30351,
952,
1330,
3993,
11,
651,
62,
15596,
62,
26268,
198,
6738,
4732,
85,
945,
1330,
4866,
62,
22866,
198,
6738,
1257,
310,
10141,
1330,
27521,
11,
13027,
198,
6738,
340,
861,
10141,
1330,
954,
198,
6738,
4738,
1330,
43720,
60... | 3.23913 | 92 |
# -*- coding: utf-8 -*-
from path import path
PATH = path(__file__).dirname()
BIN = PATH / 'node_modules' / '.bin'
SRC = PATH / 'src'
COFFEE_SOURCES = list(SRC.walkfiles('*.coffee'))
JS_SOURCES = list(SRC.walkfiles('*.js'))
SOURCES = COFFEE_SOURCES + JS_SOURCES
COFFEE_TARGETS = [
(PATH / src.relpath(SRC)).replace('.coffee', '.js')
for src in COFFEE_SOURCES
]
JS_TARGETS = [
PATH / src.relpath(SRC)
for src in JS_SOURCES
]
def task_build():
"""Build the sources
"""
yield {
'name': 'coffee',
'actions': [
[BIN / 'coffee', '--compile', '--map', '--output', '%s' % PATH, src]
for src in COFFEE_SOURCES
],
'targets': as_strings(COFFEE_TARGETS),
'file_dep': as_strings(COFFEE_SOURCES),
'watch': [str(SRC)],
}
yield {
'name': 'js',
'actions': [
['cp', src, dest]
for src, dest in zip(JS_SOURCES, JS_TARGETS)
],
'targets': as_strings(JS_TARGETS),
'file_dep': as_strings(JS_SOURCES),
'watch': [str(SRC)],
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
3108,
1330,
3108,
198,
198,
34219,
796,
3108,
7,
834,
7753,
834,
737,
15908,
3672,
3419,
198,
33,
1268,
796,
46490,
1220,
705,
17440,
62,
18170,
6,
1220,
45302,
... | 1.901042 | 576 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following lines in the
[options.entry_points] section in setup.cfg:
console_scripts =
fibonacci = nlpia_bot.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
import argparse
import logging
import sys
from chatbot.bots import Bot
from chatbot.contrib import (
ChoiceFeature,
DiceFeature,
DictionaryFeature,
PyPIFeature,
SlapbackFeature,
WikipediaFeature
)
# from tfw import __version__
__author__ = "hobs"
__copyright__ = "hobs"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='nlpia_bot {ver}'.format(ver=__version__))
parser.add_argument(
dest="nickname",
help="IRC nick (nickname or username) for the bot",
type=str,
metavar="STR")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def ircbot(args=None,
nickname='nlpia',
irc_server='chat.freenode.net',
port=6665,
server_password='my_bots_password',
channels=('#freenode', '#python'),
features=None):
"""Entry point for console_script for shell command `ircbot --nickname nlpia` ... """
nickname = getattr(args, 'nickname', nickname)
irc_server = getattr(args, 'irc_server', irc_server)
port = int(float(getattr(args, 'port', port)))
server_password = getattr(args, 'server_password', server_password)
channels = eval(str(getattr(args, 'channels', channels)))
features = features or (
PyPIFeature(), WikipediaFeature(), DictionaryFeature(),
DiceFeature(), ChoiceFeature(), SlapbackFeature())
bot = Bot(
nickname=nickname,
hostname=irc_server,
port=port,
server_password=server_password,
channels=channels,
features=features,
)
return bot.run()
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The ircbot returned: {}".format(ircbot(args)))
_logger.info("Script ends here")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
318,
257,
18328,
2393,
326,
460,
4691,
355,
257,
3599,
966,
329,
257,
11361,
198,
41947,
4226,
13,
... | 2.511052 | 1,493 |
#
# Copyright (c) 2018-2019 One Identity
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import requests
from base64 import b64encode
from safeguard.sessions.plugin.box_configuration import BoxConfiguration
from safeguard.sessions.plugin.logging import get_logger
STARLING_TOKEN_URL = "https://sts{}.cloud.oneidentity.com/auth/realms/StarlingClients/protocol/openid-connect/token"
CACHE_KEY = "join_access_token"
logger = get_logger(__name__)
| [
2,
198,
2,
220,
220,
15069,
357,
66,
8,
2864,
12,
23344,
1881,
27207,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
... | 3.589681 | 407 |
from stataLogObject.Supports import ForestPlotInvalidAttributes, FOREST_DICT, methods_in_line
from stataLogObject.Configs import Table
from miscSupports import flip_list, write_markdown
from csvObject import write_csv
| [
6738,
336,
1045,
11187,
10267,
13,
15979,
2096,
1330,
9115,
43328,
44651,
29021,
11,
7473,
6465,
62,
35,
18379,
11,
5050,
62,
259,
62,
1370,
198,
6738,
336,
1045,
11187,
10267,
13,
16934,
82,
1330,
8655,
198,
198,
6738,
12747,
15979,
... | 3.606557 | 61 |
import numpy as onp
from sum_tree import SumTreef as SumTree
from .experience_buffer import ExperienceBuffer
class PriorityBuffer(ExperienceBuffer):
"""
Extension of ExperienceBuffer, enables proportional prioritization of transitions.
"""
def add_transitions(self,
observation_tm1: onp.ndarray,
action_tm1: onp.ndarray,
reward_t: onp.ndarray,
observation_t: onp.ndarray,
terminal_t: onp.ndarray):
"""
Add a batch of transitions to buffer and initialize replay priority.
"""
batch_size = len(observation_tm1)
indices = self.get_update_indices(batch_size)
# new observation have highest priority
priorities = [self.max_priority for _ in range(batch_size)]
# update the priorities in the sum tree
self.sum_tree.update_values(indices, priorities)
# add observations to buffer
super(PriorityBuffer, self).add_transitions(
observation_tm1, action_tm1, reward_t, observation_t, terminal_t
)
def sample_batch(self, batch_size):
"""
Sample a batch of transitions from replay buffer.
Transitions are selected according to proportional prioritization.
"""
# sampling for proportional prioritization
# divide the range[0, 1] into batches and sample key from each batch
keys = onp.linspace(1. / batch_size, 1, batch_size)
keys -= onp.random.uniform(size=(batch_size,), high=1./batch_size)
# use the key to retrieve indices (key=1 corresponds to tree root value)
indices = self.sum_tree.get_indices(keys)
# get priorities from sum tree and apply softmax normalization
prios = onp.array(self.sum_tree.get_values(indices)) / self.sum_tree.get_total_val()
return indices, prios, self[indices]
def update_priorities(self, indices, priorities):
"""
Update priorities in sum tree of replay buffer.
"""
# add small offset to ensure that transitions with zero error can also be replayed
# interpolate between greedy prioritization and uniform random sampling
priorities = (priorities + 1e-10) ** self.alpha
self.max_priority = max(self.max_priority, onp.max(priorities))
self.min_priority = min(self.min_priority, onp.min(priorities))
self.sum_tree.update_values(indices, priorities)
def serializable(self):
"""
Get pickable representation of Replay Buffer.
"""
tree_size = self.sum_tree.get_capacity()
tree_index = range(tree_size)
lst_serialize = [self.max_priority,
self.min_priority,
self.alpha,
tree_size,
self.sum_tree.get_values(tree_index)]
return super().serializable(), lst_serialize
def load(self, lst_serializable):
"""
Load pickable representation of Replay Buffer. Inverse function of serializable
"""
super().load(lst_serializable[0])
self.max_priority = lst_serializable[1][0]
self.min_priority = lst_serializable[1][1]
self.alpha = lst_serializable[1][2]
capacity = lst_serializable[1][3]
tree_index = range(capacity)
self.sum_tree = SumTree(capacity)
self.sum_tree.update_values(tree_index, lst_serializable[1][4])
| [
11748,
299,
32152,
355,
319,
79,
198,
6738,
2160,
62,
21048,
1330,
5060,
27660,
69,
355,
5060,
27660,
198,
6738,
764,
23100,
1240,
62,
22252,
1330,
16386,
28632,
198,
198,
4871,
34416,
28632,
7,
44901,
28632,
2599,
198,
220,
220,
220,
... | 2.337954 | 1,515 |
import plugin
import os
plugin_class="targz"
| [
11748,
13877,
198,
11748,
28686,
198,
198,
33803,
62,
4871,
2625,
83,
853,
89,
1,
198
] | 2.875 | 16 |
class AssetListEntry(object):
"""Convenience class to lighten up asset list complexity during
export operations.
Each AssetListEntry stands for a single asset (asset_path) and all
the corresponding asset instances (objects). Each instance entry
is stored as a 2-tuple of the objects name as a string and an
optional program-specific reference to the object - provided for
convenience.
The reference may be used to simplify followup select-operations etc.
The "AssetPath" is supposed to be the file path, including the
file-extension, relative to the current projects Art-Source
folder. No absolute paths, but that is depending on the actual
pipeline-implementation, since all functions that deal with file
paths will be delegated to a pipeline module, and that may be
replaced by the user.
"""
def append(self, obj_name, obj_ref=None):
"""Add an instance-entry for this asset.
Args:
obj_name (str): Name of the instance node.
obj_ref: A program-specific reference to the object, which
can be used to easily access the object again later.
"""
self.obj_list.append((obj_name, obj_ref))
def get_export_object(self):
"""Get the instance-entry that should be used to export the
geometry of this asset.
This is simply the first entry in the list.
"""
try:
return self.obj_list[0]
except IndexError:
return None
| [
198,
198,
4871,
31433,
8053,
30150,
7,
15252,
2599,
198,
220,
220,
220,
37227,
3103,
574,
1240,
1398,
284,
1657,
268,
510,
11171,
1351,
13357,
1141,
198,
220,
220,
220,
10784,
4560,
13,
628,
220,
220,
220,
5501,
31433,
8053,
30150,
62... | 2.942197 | 519 |
# Assign the first element of the list to answer_1 on line 2
lst=[11, 100, 99, 1000, 999]
answer_1=lst[0]
print(answer_1)
#=====================================
#This time print the second element of the list directly on line 3. You should get 100.
lst=[11, 100, 101, 999, 1001]
print(lst[1])
#======================================
#Print the last element of the list through variable answer_1.
lst=[11, 100, 101, 999, 1001]
#Type your answer here.
answer_1=lst[-1]
print(answer_1)
#=====================================
#On line 3, add the string "pajamas" to the list with .append() method.
gift_list=['socks', '4K drone', 'wine', 'jam']
# Type your code here.
gift_list.append('pajamas')
print(gift_list)
#=====================================
#On line 3, this time add the sub-list: ["socks", "tshirt", "pajamas"] to the end of the gift_list.
gift_list=['socks', '4K drone', 'wine', 'jam']
# Type your code here.
gift_list.append(["socks", "tshirt", "pajamas"])
print(gift_list)
#======================================
#On line 3, this time insert "slippers" to index 3 of gift_list.
gift_list=['socks', '4K drone', 'wine', 'jam']
# Type your code here.
gift_list.insert(0, 'slippers')
print(gift_list)
#=======================================
#With .index() method you can learn the index number of an item inside your list. Assign the index no of 8679 to the variable answer_1.
lst=[55, 777, 54, 6, 76, 101, 1, 2, 8679, 123, 99]
# Type your code here.
answer_1=lst.index(8679)
print(answer_1)
#=========================================
#Using .append() method, add a new list to the end of the list which contains strings: "Navigator" and "Suburban".
lst=["CRV", "Outback", "XC90", "GL", "Cherokee", "Escalade"]
# Type your code here.
lst.append(['Navigator', 'Suburban'])
print(lst)
#=========================================
#Using .remove() method, clear the last element of the list.
lst=[55, 777, 54, 6, 76, 101, 1, 2, 8679, 123, 99]
# Type your code here.
lst.remove(99)
print(lst)
#========================================
#Using .reverse() method, reverse the list.
lst=[55, 777, 54, 6, 76, 101, 1, 2, 8679, 123, 99]
# Type your code here.
lst.reverse()
print(lst)
#=========================================
#Using .count() method, count how many times 6 occur in the list.
lst=[55, 6, 777, 54, 6, 76, 101, 1, 6, 2, 6]
# Type your code inside print() function.
answer_1=lst.count(6)
print(answer_1)
#==========================================
#What is the sum of all the numbers in the list?
lst=[55, 6, 777, 54, 6, 76, 101, 1, 6, 2, 6]
# Type your code on line 4:
answer_1=sum(lst)
print(answer_1)
#==========================================
#What is the minimum value in the list?
lst=[55, 6, 777, 54, 6, 76, 101, 1, 6, 2, 6]
# Type your code on line 4:
answer_1=max(lst)
print(answer_1)
#=========================================
#What is the maximum value in the list?
lst=[55, 6, 777, 54, 6, 76, 101, 1, 6, 2, 6]
# Type your code on line 4:
answer_1=min(lst)
print(answer_1) | [
2,
2195,
570,
262,
717,
5002,
286,
262,
1351,
284,
3280,
62,
16,
319,
1627,
362,
198,
198,
75,
301,
41888,
1157,
11,
1802,
11,
7388,
11,
8576,
11,
36006,
60,
198,
41484,
62,
16,
28,
75,
301,
58,
15,
60,
198,
4798,
7,
41484,
62... | 2.87441 | 1,059 |
import torch
from copy import deepcopy
# from .visualization import plot_distributions_2d
from itertools import chain
| [
11748,
28034,
198,
6738,
4866,
1330,
2769,
30073,
198,
2,
422,
764,
41464,
1634,
1330,
7110,
62,
17080,
2455,
507,
62,
17,
67,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 3 | 44 |
# standard
import unittest
import tempfile
import typing
# external
from encyclopedia import Unindexed
class EAV(dict, Unindexed):
'''
Container for storing small-ish EAV "triples" (Entity-Atrribute-Value).
- Focus of class is providing convenient dictionary-like access rather than data analysis functionality
- Internally, EAV is stored as a dictionary (key:E) of dictionaries (key:A,value:V)
- supports encyclopedic operations e.g. subtraction (difference) and addition (union)
Example set-ting:
eav[entity, attribute] = value
eav[[entity1, entity2,], attribute] = value
eav[[entiity1, entity2,], attribute] = [value1, value2,] # len(entities) must equal len(values)
eav[:, attribute] = value # assign all entities same value for attribute
Unsupported at this time:
eav[entity, :] = value # ERROR
eav[entity, [attribute1, attribute2,]] = [value1, value2,] # ERROR
Example get-ting:
eav[entity, attribute] # value for a specific attribute
eav[:, attribute] # new EAV with all entities and but only one attribute
eav[:, [attribute1, attribute2]] # new EAV with all entities and but only specified attributes
eav[entity,:] # new EAV with only one entity
eav[[entity1, entity2],:] # new EAV with only specified entities
ToDo:
- Implement with Relation to allow inversion(?)
'''
def __init__(self, data=None,
fmt: str = None, # forced input formatter (necessary for some formats),
fields = ('entity', 'attribute', 'value'), # when reading dictionary
vcast=None, # value cast, e.g. integer
acast=str, # attribute cast, for instance, a string
ecast=str, # entity cast, for instance, a string
defaults=None, # default values when an attribute is not found for an entity
vcasts=None): # per-attribute casts
'''
- fmt (one of the following ...)
- dict: dictionary of dictionaries (auto-detected)
- triple: list of EAV dictionaries/tuples (defaulted)
- column: list of records with field names as first row and entities on first column (must force this option)
- vcast: value cast
- acast: attribute cast
- ecast: entity cast
- defaults: dictionary of defaults for specific attributes
- vcasts: dictionary of casting for specific attributes
'''
self.fields = ENTITY, ATTRIBUTE, VALUE = fields
if fmt is None:
if data is None:
fmt = 'triple' # although doesn't matter
elif isinstance(data, dict):
fmt = 'dict'
elif isinstance(data, typing.Iterable):
fmt = 'triple'
else:
assert False # do not understand this data
if not defaults:
self.defaults = {} # keys are attributes
else:
self.defaults = defaults
if not vcasts:
self.vcasts = {} # keys are attributes
else:
self.vcasts = vcasts
self.vcast = vcast
self.ecast = ecast
self.acast = acast
Unindexed.__init__(self)
dict.__init__(self)
if data is not None:
get = iter(data)
if fmt == 'dict' :
d = data
for e in d:
for a in d[e]:
self[e, a] = d[e][a]
elif fmt == 'column':
fields = next(get)
d = {r[0]: dict(zip(fields[1:], r[1:])) for r in get}
for e in d:
for a in d[e]:
self[e, a] = d[e][a]
elif fmt == 'triple':
for d in data:
if isinstance(d, dict):
e, a, v = d[ENTITY], d[ATTRIBUTE], d[VALUE]
else:
e, a, v = d
self[e, a] = v
else:
print(fmt + ' not supported')
assert False
@staticmethod
@staticmethod
def copy_style(self):
'''
create empty EAV preserving casting and defaults.
'''
return EAV(data=None,
defaults=self.defaults,
vcasts=self.vcasts,
vcast=self.vcast,
ecast=self.ecast,
acast=self.acast,
)
def copy(self):
'''
deep copy of EAV. Preserves casting and defaults.
'''
return EAV(data=self,
defaults=self.defaults,
vcasts=self.vcasts,
vcast=self.vcast,
ecast=self.ecast,
acast=self.acast,
)
def attributes(self, entities=None):
'''
Computationally determine which attributes are used for certain entities
'''
result = []
for entity in self._check_entities(entities):
for attribute in self[entity].keys():
if attribute not in result:
result.append(attribute)
return result
def rename(self, renames, entities=None):
'''
rename attributes (not the entities)
'''
new = self.copy()
for entity in new._check_entities(entities):
for k, v in renames.items():
if k in new[entity]:
new[entity, v] = new[entity, k]
del new[entity, k]
return new
if __name__ == '__main__':
unittest.main()
| [
2,
3210,
198,
11748,
555,
715,
395,
198,
11748,
20218,
7753,
198,
11748,
19720,
198,
2,
7097,
198,
6738,
45352,
1330,
791,
9630,
276,
198,
198,
4871,
412,
10116,
7,
11600,
11,
791,
9630,
276,
2599,
198,
220,
220,
220,
705,
7061,
198... | 2.131538 | 2,600 |