content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from mitmproxy.test import tflow, taddons
from mitmproxy.addons.comment import Comment
| [
6738,
10255,
76,
36436,
13,
9288,
1330,
256,
11125,
11,
256,
39996,
198,
6738,
10255,
76,
36436,
13,
39996,
13,
23893,
1330,
18957,
628
] | 3.666667 | 24 |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""Home of estimator related functions.
"""
from __future__ import print_function
from tensorflow.python.estimator.estimator_lib import model_to_estimator
del print_function
| [
2,
770,
2393,
318,
337,
16219,
8881,
24700,
1137,
11617,
0,
2141,
407,
4370,
13,
198,
2,
2980,
515,
416,
25,
11192,
273,
11125,
14,
31391,
14,
15042,
14,
8612,
1352,
14,
17953,
62,
29412,
62,
15042,
13,
9078,
4226,
13,
198,
37811,
... | 3.135417 | 96 |
import os
import config
from sprites import *
from sprite_templates import *
import traceback
from xml.dom.minidom import parseString
import sys
LEVEL = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>scrollmode</key>
<integer>%(scrollmode)s</integer>
<key>level</key>
<dict>
<key>name</key>
<string>level 1 test level</string>
<!--
<key>background</key>
<string>%(background)s</string>
-->
<!-- SPRITES -->
<key>compounds</key>
<array>
%(sprites)s
</array>
<!-- JOINTS -->
<key>joints</key>
<array>
%(joints)s
</array>
<!-- CONTACTS -->
<key>contacts</key>
<array>
%(contacts)s
</array>
<!-- SHEETS -->
<key>sheets</key>
<array>
%(sheets)s
</array>
<!-- CANNONS -->
<key>cannons</key>
<array>
%(cannons)s
</array>
</dict>
</dict>
</plist>
"""
SHEETS = """
<dict>
<key>atlas</key>
<string>%(texture)s</string>
<key>id</key>
<integer>5</integer>
</dict>
<dict>
<key>atlas</key>
<string>%(texture2)s</string>
<key>id</key>
<integer>6</integer>
</dict>
"""
#define SCROLLMODE_STANDARD 1
#define SCROLLMODE_BATTLE 2
#define SCROLLMODE_LEFT 3
#define SCROLLMODE_RIGHT 4
if __name__ == "__main__":
#test plain levelbuilder
if 0:
lb = LevelBuilder("level_36.plist")
lb.useTexture("level_1_texture")
lb.addObject(Rotor.RotorSprite(x=180,y=110,speed=5,torque=10000))
#lb.addObject(Rotor.RotorSprite(x=100,y=60,speed=5,torque=10000))
#lb.addObject(Rotor.RotorSprite(x=300,y=200,speed=20,torque=10000))
lb.addObject(Hero.HeroSprite(x=20,y=10))
lb.addObject(Launcher.LauncherSprite(name='__launcher__1',x=260, y=50, trigger_x=400, trigger_y=100))
lb.addObject(Launcher.LauncherSprite(name='__launcher__2',x=100, y=50, trigger_x=300, trigger_y=100))
lb.addObject(Launcher.LauncherSprite(name='__launcher__3',x=300, y=250, trigger_x=50, trigger_y=300))
#lb.addObject(Enemy.EnemySprite(x=160,y=300,width=15,height=15))
lb.addObject(Friend.FriendSprite(x=50,y=160,width=50,height=50))
lb.addObject(SpikeyBuddy.SpikeyBuddySprite(x=50,y=80,width=50,height=50))
lb.addObject(Enemy.EnemySprite(x=300,y=100,width=50,height=50))
lb.addObject(Star.StarSprite(x=100,y=100,width=20,height=20))
#lb.addObject(Wizard.WizardSprite(x=300,y=50))
lb.addObject(Wizard.WizardSprite(x=25,y=50))
lb.render()
#test XML levelBuilder
if 1:
"""
<!-- Joints -->
<joint type='Joints.DistanceJoint'
id='J6'
body1='edge_1'
body2='edge_2'
damping='0.2'
freq='20'
texture_type='image'
texture = 'rect.png'
texture_width='20'
b1_Xoffset='0'
b1_Yoffset='0'
b2_Xoffset='0'
b2_Yoffset='0'/>
<joint type='Joints.RevoluteJoint'
id='J6'
body1='edge_1'
body2='edge_2'
motor_speed='50.0'
torque='1000.0'
enable_motor='false'
lower_angle='12'
upper_angle='45'
enable_limit='false'
collide_connected='false'
/>
<joint type='Joints.PrismaticJoint'
id='J6'
body1='edge_1'
motor_speed='50.0'
torque='1000.0'
enable_motor='false'
lower_translation='-100'
upper_translation='100'
enable_limit='true'
vertical='false'
/>
<sprite type = 'Hero.HeroSprite' x='55' y='40'/>
<sprite type = 'Bullet.BulletSprite' x='0' y='0' width='10' height='10' angle='0' restitution='0.5' static='false' friction='0.5' density='3' spawnEvent='onShoot'/>
<!-- alerts -->
<sprite type = 'Alert.Alert' x='215' y='250' width='100' height='500' name='ImageAlert' msg='pickupsexplained.png'/>
<!-- enemies -->
<sprite type = 'Enemy.EnemySprite' x='387' y='56' width='102' height='101' angle='0' restitution='0.2' static='false' friction='0.5' density='5' classname='BlobSprite' firstframe='monsterblob.png'/>
<sprite type = 'Enemy.EnemySprite' x='287' y='56' width='102' height='101' angle='0' restitution='0.2' static='false' friction='0.5' density='5' classname='PumpkinSprite' firstframe='pumpkin.png'/>
<sprite type = 'Enemy.EnemySprite' x='587' y='56' width='102' height='101' angle='0' restitution='0.2' static='false' friction='0.5' density='5' classname='LoveAlienSprite' firstframe='lovable_alien.png'/>
<sprite type = 'Enemy.EnemySprite' x='687' y='56' width='102' height='101' angle='0' restitution='0.2' static='false' friction='0.5' density='5' shape='rect' classname='SquareBlobSprite' firstframe='square_monsterblob.png'/>
<sprite type = 'Enemy.EnemySprite' x='987' y='56' width='102' height='101' angle='0' restitution='0.2' static='false' friction='0.5' density='5' />
<sprite type = 'EnemyEquipedRotor.EnemyEquipedRotorSprite' x='1300' y='200' speed='200' beam_length='250' torque='1000' />
<!-- pickups -->
<sprite type = 'Crate.CrateSprite' x='718' y='16' width='32' height='32' static='false' angle='0'/>
<sprite type = 'BulletTimePickup.BulletTimePickupSprite' x='1314' y='18' width='32' height='32' static='false' angle='0' />
<sprite type = 'Pickup.PickupSprite' x='672' y='17' width='32' height='32' static='false' angle='0'/>
<!-- TODO: what about bar_long.png-->
<sprite type = 'Beam.BeamSprite' x='1538' y='68' width='128' height='80' angle='-90' restitution='0.2' static='false' friction='0.5' density='5' />
<sprite type = 'Beam.BeamSprite' x='1538' y='68' width='10' height='10' angle='-90' restitution='0.2' static='true' friction='0.5' density='5' classname='SimpleScrollStrategySprite' />
<sprite type = 'Beam.BeamSprite' x='2508' y='97' width='240' height='36' angle='0' restitution='0.2' static='false' friction='0.5' density='20' classname='Destructable' firstframe ='brittle_brick.png' setName= 'dBeam'/>
<sprite type = 'Friend.FriendSprite' x='836' y='422' width='128' height='128' angle='0' restitution='0.2' static='false' friction='0.5' density='1' firstframe ='boulder.png' setName='Friend' />
<sprite type = 'Nut.NutSprite' x='600' y='200' />
<sprite type = 'Bomb.BombSprite' x='500' y='10' width='100' height='100' /><!-- TODO:include texture in main sheet -->
<!-- conversations -->
<sprite type = 'Speach.Speach' x='650' y='250' width='200' height='20' msg='Fok jou!#jonge!*Nee, fok jou!*aargh!*#'/>
<!-- cannon -->
<sprite type = 'Friend.FriendSprite' x='836' y='422' width='128' height='128' angle='0' restitution='0.2' static='false' friction='0.5' density='1' spawnEvent = 'onCreateBoulder' firstframe ='boulder.png' setName='Friend' />
<sprite type = 'Cannon.Cannon' event='onCreateBoulder' freq='120' max='25'/>
<!-- contacts / events -->
<sprite type = 'Contacts.Contact' body1='b2' body2='b1' event_name='onCustomEvent' />
<sprite type = 'EventTrigger.EventTrigger' x='2665' y='250' width='100' height='500' msg='onLevelCleared' eventName='onEventTriggerHit' />
<!-- info sign / watchtower/ teleporter -->
<sprite type = 'Visual.VisualSprite' x='300' y='25' width='250' height='50' firstframe='sign.png' msg='Psst! Remember,#aim for the fireballs. >>'/>
<EB type = 'Watchtower' x='200'/>
<sprite type = 'Teleporter.TeleporterSprite' level_id='leveldata/test'/>
<!-- monsters -->
<EB type='Slammer' x='200' />
<EB type='MuppetMotion' x='200' />
<EB type='DefenseTower' x='200' />
<EB type='SlingerBall' x='200' />
<EB type='MotorizedBall' x='200' />
<EB type='Stepper' x='200' />
<EB type='GooBalls' x='200' />
<EB type='WobblingBlob' x='200' />
<EB type='TestMonster' x='200' />
<EB type='Elephant' x='200' />
<EB type='Piston' x='200' />
<EB type='Jumper' x='200' />
<EB type='Catapult' x='200' />
<EB type='Snail' x='200' />
<EB type='MeatGrinder' x='200' />
<EB type='BlueMonster' x='200' />
<EB type='JibberLeg' x='200' />
<EB type='SteppingFucker' x='200' />
<EB type='Caterpillar' x='200' />
<EB type='ShoveIt' x='200' />
<EB type='Snake' x='200' />
<EB type='PumpkinBomber' x='200' />
<EB type='KingofBlobs' x='200' />
<EB type='WalkingPigTail' x='200' />
<EB type='JerkyBicycle' x='200' />
<EB type='JanssenWalker' x='200' />
<EB type='Walker' x='200' />
<EB type='BlueBlob' x='200' />
<EB type='Stryder' x='200' />
<EB type='LittleCrawler' x='200' />
<EB type='Bugger' x='200' />
<EB type='CaterpillarVersion2' x='200' />
<EB type='LittleFloater' x='200' />
<EB type='MagicWalker' x='200' />
<EB type='Flunker' x='200' />
<EB type='SpaceShip' x='200' />
<EB type='RoboCod' x='200' />
"""
#lb.addObject(Beam.BeamSprite(x=600+240, y=10,width=500,height=30,angle='0',restitution=0.2,static='true',friction=0.5,density=20,classname='SimpleScrollStrategySprite').setName('Beam'))
lb = XMLLevelBuilder("test.plist",background="test.png")
xml = """<level texture="monstersheet1">
<sprite type = 'Hero.HeroSprite' x='55' y='40'/>
<sprite type = 'Bullet.BulletSprite' x='0' y='0' width='10' height='10' angle='0' restitution='0.5' static='false' friction='0.5' density='3' spawnEvent='onShoot'/>
<sprite type = 'Enemy.EnemySprite' x='687' y='56' width='102' height='101' angle='0' restitution='0.2' static='false' friction='0.5' density='5' shape='rect' classname='SquareBlobSprite' firstframe='square_monsterblob.png' setName='b1'/>
<sprite type = 'BulletTimePickup.BulletTimePickupSprite' x='1000' y='16' width='32' height='32' static='false' angle='0' setName='b2'/>
<sprite type='Joints.DistanceJoint'
id='J6'
body1='b1'
body2='b2'
damping='0.2'
freq='20'
texture_type='line'
texture = 'rect.png'
texture_width='20'
b1_Xoffset='0'
b1_Yoffset='0'
b2_Xoffset='0'
b2_Yoffset='0'/>
<sprite type = 'Alert.Alert' x='215' y='250' width='100' height='500' msg='blah
blah
blah
blah'/>
<EB type='Conversation' x='200' pass_args='1' msg='blah
blah
blah
blah'/>
<sprite type = 'Teleporter.TeleporterSprite' level_id='leveldata/test'/>
</level>"""
lb.renderWithXML(xml)
| [
11748,
28686,
198,
11748,
4566,
198,
6738,
42866,
1330,
1635,
198,
6738,
33810,
62,
11498,
17041,
1330,
1635,
198,
11748,
12854,
1891,
198,
6738,
35555,
13,
3438,
13,
1084,
312,
296,
1330,
21136,
10100,
198,
11748,
25064,
628,
198,
2538,
... | 2.048159 | 5,378 |
#!/usr/bin/python3
import io
import csv
import json
import warnings
import pickle
import operator
import time
import logging
import math
import functools
import numpy
from sklearn.preprocessing import MinMaxScaler
from threading import Thread
from random import shuffle
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import cluster, mixture
from sklearn.neighbors import kneighbors_graph
from s3_helper import put_file, get_file
#Librerias locindoor
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from core.data_processor import DataLoader
from core.model import Model
from core.trajectories import Trajectories
from core.aps import Aps
from tensorflow.keras.models import load_model
# create logger with 'spam_application'
logger = logging.getLogger('learn')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('learn.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - [%(name)s/%(funcName)s] - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
33245,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
14601,
198,
11748,
2298,
293,
198,
11748,
10088,
198,
11748,
640,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
... | 3.203024 | 463 |
import logging
from os import getenv
from flask import Flask
from flask import session # pylint: disable=unused-import
from config import DATABASE_URL, ENV
from database import database
from views import login, new_tip, register, tips, tests, like
| [
11748,
18931,
198,
6738,
28686,
1330,
651,
24330,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
6246,
220,
1303,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
11748,
198,
6738,
4566,
1330,
360,
1404,
6242,
11159,
62,
21886,
11,... | 3.787879 | 66 |
# Copyright 2013 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication Tests."""
import os
import sys
import threading
import unittest
from urllib import quote_plus
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from pymongo import MongoClient, MongoReplicaSetClient
from pymongo.auth import HAVE_KERBEROS
from pymongo.errors import OperationFailure
from pymongo.read_preferences import ReadPreference
from test import version, host, port
from test.utils import is_mongos, server_started_with_auth
# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS.
GSSAPI_HOST = os.environ.get('GSSAPI_HOST')
GSSAPI_PORT = int(os.environ.get('GSSAPI_PORT', '27017'))
PRINCIPAL = os.environ.get('PRINCIPAL')
SASL_HOST = os.environ.get('SASL_HOST')
SASL_PORT = int(os.environ.get('SASL_PORT', '27017'))
SASL_USER = os.environ.get('SASL_USER')
SASL_PASS = os.environ.get('SASL_PASS')
SASL_DB = os.environ.get('SASL_DB', '$external')
class AutoAuthenticateThread(threading.Thread):
"""Used in testing threaded authentication.
"""
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
2211,
838,
5235,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 2.987037 | 540 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""far process RWMF."""
import os
from PIL import Image
if __name__ == '__main__':
root_path = './Real-World-Masked-Face-Dataset-master'
jpg_paths = ['RWMFD_part_1', 'RWMFD_part_2_pro']
target_txt = 'RWMF_label_train.txt'
if os.path.exists(target_txt):
os.remove(target_txt)
with open(target_txt, "w") as txt:
for jpg_path in jpg_paths:
cur_jpg_path = os.path.join(root_path, jpg_path)
for img_dir in os.listdir(cur_jpg_path):
cur_img_dir = os.path.join(cur_jpg_path, img_dir)
for img_name in os.listdir(cur_img_dir):
if not img_name.endswith('.jpg'):
continue
img_path = os.path.join(cur_img_dir, img_name)
try:
image = Image.open(img_path).convert('RGB')
except FileNotFoundError:
print('wrong img:', img_path)
continue
txt.write(img_path + ' ')
txt.write(str(-1) + ' ')
txt.write(str(-1) + ' ')
txt.write('0')
txt.write('\n')
| [
2,
15069,
33160,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.181075 | 856 |
#!/usr/bin/env python3
from model_weights import *
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='See what variables are stored in model.npy file')
parser.add_argument('load_from', metavar='LOAD_FROM', type=str,
help='the model.npy file')
args = parser.parse_args()
if args.load_from == "-":
args.load_from = "/dev/stdin"
m = Model(args.load_from)
for var, val in m.variables.items():
print(var, val.shape)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
2746,
62,
43775,
1330,
1635,
198,
11748,
1822,
29572,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,... | 2.429245 | 212 |
from czifiletools import pylibCZIrw_tools as cztrw
from pylibCZIrw import czi as pyczi
#from pylibCZIrw import Rectangle
from matplotlib import pyplot as plt
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\Z=4_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\T=3_Z=4_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\T=3_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_T=3_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_Z=4_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=1_3x3_T=1_Z=1_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_T=1_Z=1_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_T=1_Z=4_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_T=3_Z=1_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=1_3x3_T=3_Z=4_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_T=3_Z=4_CH=2.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\W96_B2+B4_S=2_T=1=Z=1_C=1_Tile=5x9.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\Multiscene_CZI_3Scenes.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\96well_S=192_2pos_CH=3.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\CellDivision_T=10_Z=20_CH=1_DCV.czi'
#filename = r'D:\Testdata_Zeiss\CZI_Testfiles\CellDivision_T=3_Z=5_CH=2_X=240_Y=170.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\S=1_HE_Slide_RGB.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\OverViewScan.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\DTScan_ID4.czi'
#filename = r'd:\Testdata_Zeiss\CZI_Testfiles\w96_A1+A2.czi'
filename = r'd:\Testdata_Zeiss\CZI_Testfiles\testwell96.czi'
#filename = r"D:\Testdata_Zeiss\Mitochondria_EM_with_DNN\original_data\mitochondria_train_01_seg_ov_small.czi"
#filename = r"/datadisk1/tuxedo/testpictures/Testdata_Zeiss/celldivision/CellDivision_T=10_Z=15_CH=2_DCV_small.czi"
#filename = r"/datadisk1/tuxedo/testpictures/Testdata_Zeiss/wellplate/testwell96_woatt_S1-5.czi"
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/S=1_3x3_T=1_Z=1_CH=2.czi'
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/S=2_3x3_T=3_Z=4_CH=2.czi'
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/S=2_3x3_T=1_Z=4_CH=2.czi'
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/S=2_3x3_T=3_Z=1_CH=2.czi'
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/S=2_3x3_T=3_Z=4_CH=2.czi'
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/S=1_HE_Slide_RGB.czi'
#filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/Multiscene_CZI_3Scenes.czi'
#filename = r'c:\Users\m1srh\Downloads\Overview.czi'
#filename = r'd:\Testdata_Zeiss\LatticeLightSheet\LS_Mitosis_T=150-300.czi'
# get all the metadata as a dictionary
md, md_add = cztrw.get_metadata_czi(filename, dim2none=True, convert_scunit=True)
# open the CZI document to read the
czidoc = pyczi.open_czi(filename)
# show all dimensions
total_bbox = czidoc.total_bounding_box
for k,v in total_bbox.items():
print(k, v)
# get information about the scenes etc.
sc_bbox = czidoc.scenes_bounding_rectangle
total_rect = czidoc.total_bounding_rectangle
pixeltype_ch = czidoc.get_channel_pixel_type(0)
pixeltypes = czidoc.pixel_types
print('Real Pixeltypes in CZI file : ', pixeltypes)
# read a simple 2d image plane
roi = (300, 300, 300, 600)
image2d_C0 = czidoc.read(plane={'C': 0}, scene=0, roi=roi, pixel_type="Gray8")
image2d_C1 = czidoc.read(plane={'C': 1}, scene=0, roi=roi, pixel_type="Gray8")
print(image2d_C0.shape, image2d_C1.shape)
print('Pixeltype after conversion during reading : ', image2d_C0.dtype, image2d_C1.dtype)
# Create two subplots and unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(image2d_C0, interpolation='nearest', cmap='Reds_r')
ax2.imshow(image2d_C1, interpolation='nearest', cmap='Greens_r')
plt.show()
# store metadata inside Pandas dataframe
mdf = cztrw.md2dataframe(md)
print(mdf)
| [
6738,
24785,
361,
576,
31391,
1330,
279,
2645,
571,
34,
57,
23820,
86,
62,
31391,
355,
24785,
2213,
86,
198,
6738,
279,
2645,
571,
34,
57,
23820,
86,
1330,
269,
17027,
355,
12972,
66,
17027,
198,
2,
6738,
279,
2645,
571,
34,
57,
2... | 2.129436 | 1,916 |
"""Helper functions for generating panoramics."""
import logging
import os
from contextlib import redirect_stderr
from glob import glob
from shutil import copyfile
import cv2
import numpy as np
import psutil
from tqdm import tqdm
logger = logging.getLogger(__name__)
def prepare_directories_for_stitching_in_rows(input_dir):
"""Prepare images for stitching by row.
Iterates over all images in the input directory, when it sees a black 'stop' image,
the end of the row is signalled, so all the previous images are added
to a numbered directory, corresponding to that row.
Args:
input_dir (str): input directory of images
Returns: Nothing. Indexes a side effect.
"""
image_files = sorted(glob(f"{input_dir}/*JPG"))
directory_number = 0
images_in_one_row = []
debug_logs = []
for img_name in tqdm(image_files):
image_pixel_mean = int(np.mean(cv2.imread(img_name)))
# if we see a black 'end of row' image, copy all previous images to a new numbered directory.
if image_pixel_mean < 10:
debug_logs.append((img_name, round(image_pixel_mean, 2)))
new_dir = f"{input_dir}/row_{str(directory_number).zfill(2)}"
create_dir_if_needed(new_dir)
for img_in_row in images_in_one_row:
copyfile(img_in_row, f'{new_dir}/{img_in_row.split("/")[-1]}')
images_in_one_row = []
directory_number += 1
else:
images_in_one_row.append(img_name)
logger.debug(debug_logs)
def stitch_images(image_list):
"""Stitch a list of images together.
And try to supress OpenCV/MPL std err output...
Args:
image_list (list)
Returns:
np.ndarray: stitched image
"""
# stitch modes: cv2.Stitcher_PANORAMA, cv2.Stitcher_SCANS
stitcher = cv2.Stitcher.create(cv2.Stitcher_SCANS)
with redirect_stderr("") as _:
_, stitched = stitcher.stitch(image_list)
return stitched
def create_dir_if_needed(dir_name, delete=False):
"""Create directory if needed.
Args:
dir_name (str)
delete (bool): whether to delete all the files in the directory or not
"""
if not os.path.isdir(dir_name):
logger.debug(f"Creating dir: {dir_name}")
os.mkdir(dir_name)
else:
if delete:
[os.remove(os.path.join(dir_name, f)) for f in os.listdir(dir_name)]
def get_memory_usage():
"""Returns memory usage of current process in MB. Used for logging.
Returns:
float: Memory usage of current process in MB.
"""
pid = os.getpid()
return round(psutil.Process(pid).memory_info().rss / 1e6, 2)
def crop_border(img, border_x, border_y):
"""Crop the border of an image. Due to peripheral blurring.
Args:
img (np.ndarray): input image
border_x (int): number of pixels to crop on each side of x border
border_y (int): number of pixels to crop on each side of y border
Returns:
np.ndarray - cropped image
"""
return img[border_y:-border_y, border_x:-border_x, :]
def sort_jpg_files_in_dir_alpha(in_dir):
"""Alphabetically sort all the jpg files in a directory and return as a list."""
return sorted(glob(in_dir + "/*JPG"))
| [
37811,
47429,
5499,
329,
15453,
3425,
273,
321,
873,
526,
15931,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
4732,
8019,
1330,
18941,
62,
301,
1082,
81,
198,
6738,
15095,
1330,
15095,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
19... | 2.466415 | 1,325 |
import dataclasses
from typing import Any, ClassVar, List, Optional, Sequence, Tuple, Type, Union
from dbdaora.keys import FallbackKey
from dbdaora.query import BaseQuery, Query, QueryMany
from .repositories import HashData, HashEntity, HashRepository
@dataclasses.dataclass(init=False)
@dataclasses.dataclass(init=False)
| [
11748,
4818,
330,
28958,
198,
6738,
19720,
1330,
4377,
11,
5016,
19852,
11,
7343,
11,
32233,
11,
45835,
11,
309,
29291,
11,
5994,
11,
4479,
198,
198,
6738,
20613,
6814,
5799,
13,
13083,
1330,
7218,
1891,
9218,
198,
6738,
20613,
6814,
... | 3.257426 | 101 |
# -*- coding: utf-8 -*-
from collections import defaultdict
import numpy as np
from endochrone import Base, Transformer
from endochrone.stats.measures import arg_nearest
__author__ = "nickwood"
__copyright__ = "nickwood"
__license__ = "mit"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
886,
5374,
33171,
1330,
7308,
11,
3602,
16354,
198,
6738,
886,
5374,
33171,
13,
34242,
... | 3.0625 | 80 |
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Circle
import numpy as np
import time
from rtd.simulator_files.planner.planner import get_env
from rtd.simulator_files.planner.planner import astar_planning
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
6738,
2603,
29487,
8019,
13,
8071,
2052,
1330,
16291,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
628,
198... | 3.231707 | 82 |
import json
import os
import pickle
from typing import Dict
import bibtexparser
import typer
from functional import pseq
from rich.console import Console
from rebiber.bib2json import normalize_title
console = Console()
app = typer.Typer()
@app.command()
@app.command()
if __name__ == "__main__":
app()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
6738,
19720,
1330,
360,
713,
198,
198,
11748,
275,
571,
16886,
48610,
198,
11748,
1259,
525,
198,
6738,
10345,
1330,
15838,
80,
198,
6738,
5527,
13,
41947,
1330,
24371,
198,
1... | 3.047619 | 105 |
import pexpect
import sys
from figcli.test.cli.actions.delete import DeleteAction
from figcli.test.cli.actions.put import PutAction
from figcli.test.cli.config import *
from figcli.test.cli.figgy import FiggyTest
from figcli.test.cli.test_utils import TestUtils
from figcli.utils.utils import *
import time
| [
11748,
613,
87,
806,
198,
11748,
25064,
198,
198,
6738,
2336,
44506,
13,
9288,
13,
44506,
13,
4658,
13,
33678,
1330,
23520,
12502,
198,
6738,
2336,
44506,
13,
9288,
13,
44506,
13,
4658,
13,
1996,
1330,
5930,
12502,
198,
6738,
2336,
44... | 3.252632 | 95 |
"""<changes description>
Revision ID: 3483e421713d
Revises: 784a82cec07a
Create Date: 2017-12-27 12:29:26.302823
"""
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '3483e421713d'
down_revision = '784a82cec07a'
branch_labels = None
depends_on = None
resource_tables = ['blueprints', 'plugins', 'secrets', 'snapshots', 'events',
'executions', 'logs', 'nodes', 'node_instances',
'deployments', 'deployment_modifications',
'deployment_updates', 'deployment_update_steps']
visibility_states = ['private', 'tenant', 'global']
DEFAULT_SYSTEM_ROLE_ID = 6
| [
37811,
27,
36653,
6764,
29,
198,
198,
18009,
1166,
4522,
25,
513,
38783,
68,
3682,
1558,
1485,
67,
198,
18009,
2696,
25,
767,
5705,
64,
6469,
344,
66,
2998,
64,
198,
16447,
7536,
25,
2177,
12,
1065,
12,
1983,
1105,
25,
1959,
25,
2... | 2.372822 | 287 |
# coding=utf-8
from django.core import validators
from django.db import models
class MoneyField(models.BigIntegerField):
"""
Stores money to nearest penny as integer. e.g. £10.22 would be 1022
"""
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
42625,
14208,
13,
7295,
1330,
4938,
2024,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
4871,
12911,
15878,
7,
27530,
13,
12804,
46541,
15878,
2599,
198,
220,
220,
220,
37227,
198,
22... | 3.014286 | 70 |
import logging
import json
from invokust.aws_lambda import LambdaLoadTest, results_aggregator
logging.basicConfig(level=logging.INFO)
###
# SETTINGS
###
# How long should the test run for in minutes?
# Note that Lambda invokations that are started cannot be stopped.
# Test times will actually be run in intervals of 3 minutes.
test_time = 6
# How many concurrent users to test for?
# threads x 25 = num of concurrent users
threads = 20
# What test file are we using?
test_file = 'locust_test_mail_list.py'
if __name__ == "__main__":
lambda_runtime = f"{test_time}m" if test_time < 3 else "3m"
lambda_payload = {
'locustfile': test_file,
'host': 'https://forms-staging.cdssandbox.xyz',
'num_users': 25,
'spawn_rate': 5,
'run_time': lambda_runtime
}
load_test = LambdaLoadTest(
lambda_function_name='LoadTesting',
threads=threads,
ramp_time=0,
time_limit=test_time*60,
lambda_payload=lambda_payload
)
load_test.run()
print_stats_exit(load_test)
output_file = open("threads_output.json", "w")
thread_output = {"threads": load_test.get_locust_results() }
json.dump(thread_output, output_file)
output_file.close() | [
11748,
18931,
198,
11748,
33918,
198,
6738,
800,
482,
436,
13,
8356,
62,
50033,
1330,
21114,
6814,
8912,
14402,
11,
2482,
62,
9460,
2301,
1352,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
8,
198,
198,
... | 2.620985 | 467 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Your FindElements object will be instantiated and called as such:
# obj = FindElements(root)
# param_1 = obj.find(target)
| [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
1188,
28,
15,
11,
1364,
28,
14202,
11,
826,
28,
14202,
2599,
198,
2,
220,
220,
220,
220,... | 2.556452 | 124 |
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.JetMonitor_cfi import hltJetMETmonitoring
### HLT_PFJet Triggers ###
# HLT_PFJet450
PFJet450_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet450/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 112 ,
xmin = 0.,
xmax = 1120.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet450_v*"])
)
# HLT_PFJet40
PFJet40_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet40/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 100.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet40_v*"])
)
# HLT_PFJet60
PFJet60_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet60/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 75 ,
xmin = 0.,
xmax = 150.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet60_v*"])
)
# HLT_PFJet80
PFJet80_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet80/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100,
xmin = 0.,
xmax = 200.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet80_v*"])
)
# HLT_PFJet140
PFJet140_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet140/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 70 ,
xmin = 0.,
xmax = 350.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet140_v*"])
)
# HLT_PFJet200
PFJet200_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet200/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 500.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet200_v*"])
)
# HLT_PFJet260
PFJet260_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet260/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 65,
xmin = 0.,
xmax = 650.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet260_v*"])
)
# HLT_PFJet320
PFJet320_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet320/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 80 ,
xmin = 0.,
xmax = 800.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet320_v*"])
)
# HLT_PFJet400
PFJet400_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet400/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100 ,
xmin = 0.,
xmax = 1000.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet400_v*"])
)
# HLT_PFJet500
PFJet500_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/PF/HLT_PFJet500/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 125,
xmin = 0.,
xmax = 1250)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJet500_v*"])
)
### HLT_PFJetFwd Triggers ###
# HLT_PFJetFwd450
PFJetFwd450_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd450/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 112 ,
xmin = 0.,
xmax = 1120.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd450_v*"])
)
# HLT_PFJetFwd40
PFJetFwd40_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd40/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 100.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd40_v*"])
)
# HLT_PFJetFwd60
PFJetFwd60_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd60/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 75 ,
xmin = 0.,
xmax = 150.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd60_v*"])
)
# HLT_PFJetFwd80
PFJetFwd80_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd80/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100,
xmin = 0.,
xmax = 200.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd80_v*"])
)
# HLT_PFJetFwd140
PFJetFwd140_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd140/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 70 ,
xmin = 0.,
xmax = 350.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd140_v*"])
)
# HLT_PFJetFwd200
PFJetFwd200_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd200/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 500.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd200_v*"])
)
# HLT_PFJetFwd260
PFJetFwd260_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd260/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 65 ,
xmin = 0.,
xmax = 650.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd260_v*"])
)
# HLT_PFJetFwd320
PFJetFwd320_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd320/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 80 ,
xmin = 0.,
xmax = 800.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd320_v*"])
)
# HLT_PFJetFwd400
PFJetFwd400_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd400/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100 ,
xmin = 0.,
xmax = 1000.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd400_v*"])
)
# HLT_PFJetFwd500
PFJetFwd500_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4Fwd/PF/HLT_PFJetFwd500/',
histoPSet = dict(jetPtThrPSet = dict(
nbins = 125,
xmin = 0.,
xmax = 1250)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_PFJetFwd500_v*"])
)
### HLT_AK8 Triggers ###
# HLT_AK8PFJet40
AK8PFJet40_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet40/',
jetSrc = "ak8PFJetsPuppi",
histoPSet =dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 100.)),
ispfjettrg = True,
iscalojettrg = False,
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet40_v*"])
)
# HLT_AK8PFJet60
AK8PFJet60_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet60/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 75 ,
xmin = 0.,
xmax = 150.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet60_v*"])
)
# HLT_AK8PFJet80
AK8PFJet80_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet80/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100 ,
xmin = 0.,
xmax = 200.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet80_v*"])
)
# HLT_AK8PFJet140
AK8PFJet140_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet140/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 70 ,
xmin = 0.,
xmax = 350.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet140_v*"])
)
# HLT_AK8PFJet200
AK8PFJet200_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet200/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50,
xmin = 0.,
xmax = 500.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet200_v*"])
)
# HLT_AK8PFJet260
AK8PFJet260_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet260/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 65 ,
xmin = 0.,
xmax = 650.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet260_v*"])
)
# HLT_AK8PFJet320
AK8PFJet320_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet320/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 80 ,
xmin = 0.,
xmax = 800.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet320_v*"])
)
# HLT_AK8PFJet400
AK8PFJet400_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet400/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100 ,
xmin = 0.,
xmax = 1000.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet400_v*"])
)
# HLT_AK8PFJet450
AK8PFJet450_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet450/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 112 ,
xmin = 0.,
xmax = 1120.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet450_v*"])
)
# HLT_AK8PFJet500
AK8PFJet500_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8/PF/HLT_AK8PFJet500/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 125,
xmin = 0.,
xmax = 1250)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJet500_v*"])
)
### HLT_AK8Fwd Triggers ###
# HLT_AK8PFJetFwd40
AK8PFJetFwd40_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd40/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 100.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd40_v*"])
)
# HLT_AK8PFJetFwd60
AK8PFJetFwd60_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd60/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 75 ,
xmin = 0.,
xmax = 150.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd60_v*"])
)
# HLT_AK8PFJetFwd80
AK8PFJetFwd80_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd80/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100 ,
xmin = 0.,
xmax = 200.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd80_v*"])
)
# HLT_AK8PFJetFwd140
AK8PFJetFwd140_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd140/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 70 ,
xmin = 0.,
xmax = 350.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd140_v*"])
)
# HLT_AK8PFJetFwd200
AK8PFJetFwd200_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd200/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 50 ,
xmin = 0.,
xmax = 500.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd200_v*"])
)
# HLT_AK8PFJetFwd260
AK8PFJetFwd260_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd260/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 65 ,
xmin = 0.,
xmax = 650.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd260_v*"])
)
# HLT_AK8PFJetFwd320
AK8PFJetFwd320_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd320/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 80 ,
xmin = 0.,
xmax = 800.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd320_v*"])
)
# HLT_AK8PFJetFwd400
AK8PFJetFwd400_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd400/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 100 ,
xmin = 0.,
xmax = 1000.)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd400_v*"])
)
# HLT_AK8PFJetFwd450
AK8PFJetFwd450_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd450/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 112 ,
xmin = 0.,
xmax = 1120.)),
numGenericTriggerEventPSet = dict(hltPaths =["HLT_AK8PFJetFwd450_v*"])
)
# HLT_AK8PFJetFwd500
AK8PFJetFwd500_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK8Fwd/PF/HLT_AK8PFJetFwd500/',
jetSrc = "ak8PFJetsPuppi",
ispfjettrg = True,
iscalojettrg = False,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 125,
xmin = 0.,
xmax = 1250)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_AK8PFJetFwd500_v*"])
)
# HLT_CaloJet500_NoJetID
CaloJet500_NoJetID_Prommonitoring = hltJetMETmonitoring.clone(
FolderName = 'HLT/JME/Jets/AK4/Calo/HLT_CaloJet500_NoJetID/',
jetSrc = "ak4CaloJets",
ispfjettrg = False,
iscalojettrg = True,
histoPSet = dict(jetPtThrPSet = dict(
nbins = 125,
xmin = 0.,
xmax = 1250)),
numGenericTriggerEventPSet = dict(hltPaths = ["HLT_CaloJet500_NoJetID_v*"])
)
HLTJetmonitoring = cms.Sequence(
PFJet40_Prommonitoring
*PFJet60_Prommonitoring
*PFJet80_Prommonitoring
*PFJet140_Prommonitoring
*PFJet200_Prommonitoring
*PFJet260_Prommonitoring
*PFJet320_Prommonitoring
*PFJet400_Prommonitoring
*PFJet450_Prommonitoring
*PFJet500_Prommonitoring
*PFJetFwd40_Prommonitoring
*PFJetFwd60_Prommonitoring
*PFJetFwd80_Prommonitoring
*PFJetFwd140_Prommonitoring
*PFJetFwd200_Prommonitoring
*PFJetFwd260_Prommonitoring
*PFJetFwd320_Prommonitoring
*PFJetFwd400_Prommonitoring
*PFJetFwd450_Prommonitoring
*PFJetFwd500_Prommonitoring
*AK8PFJet450_Prommonitoring
*AK8PFJet40_Prommonitoring
*AK8PFJet60_Prommonitoring
*AK8PFJet80_Prommonitoring
*AK8PFJet140_Prommonitoring
*AK8PFJet200_Prommonitoring
*AK8PFJet260_Prommonitoring
*AK8PFJet320_Prommonitoring
*AK8PFJet400_Prommonitoring
*AK8PFJet500_Prommonitoring
*AK8PFJetFwd450_Prommonitoring
*AK8PFJetFwd40_Prommonitoring
*AK8PFJetFwd60_Prommonitoring
*AK8PFJetFwd80_Prommonitoring
*AK8PFJetFwd140_Prommonitoring
*AK8PFJetFwd200_Prommonitoring
*AK8PFJetFwd260_Prommonitoring
*AK8PFJetFwd320_Prommonitoring
*AK8PFJetFwd400_Prommonitoring
*AK8PFJetFwd500_Prommonitoring
*CaloJet500_NoJetID_Prommonitoring
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
6738,
360,
48,
44,
28657,
13,
48344,
13,
42273,
35479,
62,
66,
12463,
1330,
289,
2528,
42273,
47123,
41143,
278,
198,
198,
21017,
367,
27734,
62,
42668,
42273,
... | 1.837356 | 9,487 |
import exceRNApipeline.pipeline.__main__ as pipeline
from unittest import TestCase
import sys
import os
import shutil
_FILE_DIR = os.path.dirname(__file__)
_TEMP_DIR = os.path.join(_FILE_DIR, "_test_tmp") | [
11748,
43748,
27204,
79,
541,
4470,
13,
79,
541,
4470,
13,
834,
12417,
834,
355,
11523,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
4423,
346,
628,
198,
62,
25664,
62,
34720,
796,
286... | 2.783784 | 74 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.tests.clock import clock_object
import time
| [
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,
351,
428,
198,
2,
2393,
11,
921,
460,
7330,
530,
379,
... | 3.329545 | 88 |
import re
import shlex
import subprocess
import sys
from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast
import click
from click.utils import make_default_short_help
from neuro_sdk import ConfigError
from .root import Root
from .utils import NeuroClickMixin, Option
| [
11748,
302,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
5345,
11,
309,
29291,
11,
4479,
11,
3350,
198,
198,
11748,
3904,
198,
6738,
3904,
13,
... | 3.511905 | 84 |
import requests, socket
import barcode
from barcode.writer import ImageWriter
c128 = barcode.get_barcode_class('CODE128')
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
url = 'http://{}:5000'.format(host_ip)
res = requests.get('{}/get_all_books'.format(url))
if res.status_code == 200:
books = res.json()['books']
for book in books:
code = c128(book['barcode_id'], writer=ImageWriter())
fullname = code.save('../../Books/{}'.format(book['name']))
| [
11748,
7007,
11,
17802,
198,
11748,
2318,
8189,
198,
6738,
2318,
8189,
13,
16002,
1330,
7412,
34379,
198,
198,
66,
12762,
796,
2318,
8189,
13,
1136,
62,
65,
5605,
1098,
62,
4871,
10786,
34,
16820,
12762,
11537,
198,
198,
4774,
62,
367... | 2.735955 | 178 |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.filters import BaseFilterBackend
from rest_framework.decorators import action
from rest_framework.response import Response
from .models import Album
from .serializers import AlbumSerializer
from WeebFM.permissions import IsStaffOrReadOnly
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
13,
10379,
1010,
1330,
7308,
22417,
7282,
437,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
2223,
198... | 4.202532 | 79 |
from .models import Image,Category,Location
from django.http import HttpResponse,Http404
from django.shortcuts import render
import datetime as dt
# Create your views here.
| [
6738,
764,
27530,
1330,
7412,
11,
27313,
11,
14749,
198,
6738,
42625,
14208,
13,
4023,
220,
1330,
367,
29281,
31077,
11,
43481,
26429,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
11748,
4818,
8079,
355,
288,
83,
198,
198... | 3.433962 | 53 |
import unittest
import pysal
suite = unittest.TestLoader().loadTestsFromTestCase(Test_Components)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| [
198,
11748,
555,
715,
395,
198,
11748,
279,
893,
282,
628,
198,
198,
2385,
578,
796,
555,
715,
395,
13,
14402,
17401,
22446,
2220,
51,
3558,
4863,
14402,
20448,
7,
14402,
62,
7293,
3906,
8,
198,
198,
361,
11593,
3672,
834,
6624,
705... | 2.616438 | 73 |
#
# Explore
# - The Adventure Interpreter
#
# Copyright (C) 2006 Joe Peterson
#
import sys
import Explore
filename = None
no_delay = False
trs_compat = False
for arg_num in range(len(sys.argv)):
if sys.argv[arg_num] == "-f":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
filename = sys.argv[arg_num + 1]
else:
print >> sys.stderr, "Error: Missing adventure filename"
sys.exit(1)
elif sys.argv[arg_num] == "-q":
quiet = True
elif sys.argv[arg_num] == "-c":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
command = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "-r":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
resume = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "-s":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
last_suspend = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "--one-shot":
one_shot = True
elif sys.argv[arg_num] == "--no-title":
show_title = False
elif sys.argv[arg_num] == "--title-only":
show_title_only = True
elif sys.argv[arg_num] == "--no-delay":
no_delay = True
elif sys.argv[arg_num] == "--trs-compat":
trs_compat = True
Explore.play(filename, no_delay, trs_compat)
| [
2,
198,
2,
41401,
198,
2,
220,
220,
220,
532,
383,
9553,
4225,
3866,
353,
198,
2,
198,
2,
15069,
357,
34,
8,
4793,
220,
5689,
18914,
198,
2,
198,
198,
11748,
25064,
198,
198,
11748,
41401,
198,
198,
34345,
796,
6045,
198,
3919,
... | 2.010363 | 772 |
#!/usr/bin/python
import math
import json
import random
import rospy
from geometry_msgs.msg import PoseStamped
import tf2_ros
import tf2_geometry_msgs
from utils import *
if __name__ == '__main__':
local_goal_creator = LocalGoalCreator()
try:
local_goal_creator.process()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
10688,
198,
11748,
33918,
198,
11748,
4738,
198,
198,
11748,
686,
2777,
88,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
37557,
1273,
13322,
198,
11748,
48700,
17,
62,
4951,
... | 2.621212 | 132 |
# https://codeforces.com/problemset/problem/281/A
capital_Input = input("")
compare_List = list(capital_Input)
a = compare_List[0].upper()
print(a+capital_Input[1:])
| [
2,
3740,
1378,
19815,
891,
273,
728,
13,
785,
14,
1676,
22143,
316,
14,
45573,
14,
30368,
14,
32,
198,
27544,
62,
20560,
796,
5128,
7203,
4943,
198,
5589,
533,
62,
8053,
796,
1351,
7,
27544,
62,
20560,
8,
198,
64,
796,
8996,
62,
... | 2.634921 | 63 |
# coding: utf-8
# Distributed under the terms of the MIT License.
""" This submodule implements some common workflow steps for use in
more complicated workflows.
"""
import copy
import logging
__all__ = ("castep_prerelax", "castep_scf")
LOG = logging.getLogger("run3")
def castep_scf(
computer,
calc_doc,
seed,
elec_energy_tol=None,
write_checkpoint="ALL",
required_keys=None,
forbidden_keys=None,
):
""" Run a singleshot SCF calculation.
Parameters:
computer (:obj:`matador.compute.ComputeTask`): the object that will be calling CASTEP.
calc_doc (dict): the structure to run on.
seed (str): root filename of structure.
Keyword arguments:
elec_energy_tol (float or str): keyword to pass to ``elec_energy_tol``.
write_checkpoint (bool or str): keyword to pass to CASTEP's ``write_checkpoint`` parameter. If
``True`` (``False``), CASTEP parameter set to ``ALL`` (``NONE``).
required_keys (:obj:`list` of :obj:`str`): list of keys required in calc doc to perform
the calculation.
forbidden_keys (:obj:`list` of :obj:`str`): list of keys to scrub from calc doc to perform
the calculation.
Returns:
bool: whether or not the SCF was successful.
"""
LOG.info("Performing singleshot CASTEP SCF...")
scf_doc = copy.deepcopy(calc_doc)
scf_doc["write_checkpoint"] = _parse_write_checkpoint(write_checkpoint)
scf_doc["task"] = "singlepoint"
if elec_energy_tol is not None:
scf_doc["elec_energy_tol"] = elec_energy_tol
required = []
forbidden = [
"spectral_task",
"spectral_kpoints_list",
"spectral_kpoints_path",
"spectral_kpoints_mp_spacing",
"spectral_kpoints_path_spacing",
]
computer.validate_calc_doc(scf_doc, required, forbidden)
return computer.run_castep_singleshot(scf_doc, seed, keep=True, intermediate=True)
def castep_prerelax(
computer,
calc_doc,
seed,
write_checkpoint="all",
required_keys=None,
forbidden_keys=None,
):
""" Run a self-consistent (i.e. restarted) geometry optimisation.
Optionally write a check file containing the final structure and density.
Parameters:
computer (:obj:`ComputeTask`): the object that will be calling CASTEP.
calc_doc (dict): the structure to run on.
seed (str): root filename of structure.
Keyword arguments:
write_checkpoint (bool or str): keyword to pass to CASTEP's ``write_checkpoint`` parameter. If
``True`` (``False``), CASTEP parameter set to ``ALL`` (``NONE``).
required_keys (:obj:`list` of :obj:`str`): list of keys required in calc doc to perform
the calculation.
forbidden_keys (:obj:`list` of :obj:`str`): list of keys to scrub from calc doc to perform
the calculation.
Returns:
bool: whether or not the relaxation was successful.
"""
LOG.info("Performing CASTEP pre-relax...")
relax_doc = copy.deepcopy(calc_doc)
relax_doc["write_checkpoint"] = _parse_write_checkpoint(write_checkpoint)
if "geom_max_iter" not in relax_doc:
relax_doc["geom_max_iter"] = 100
relax_doc["task"] = "geometryoptimisation"
computer.validate_calc_doc(relax_doc, required_keys, forbidden_keys)
computer.calc_doc = relax_doc
return computer.run_castep_relaxation(intermediate=True)
def _parse_write_checkpoint(write_checkpoint):
""" Returns the appropriate value of ``write_checkpoint``. """
if isinstance(write_checkpoint, bool):
if not write_checkpoint:
write_checkpoint = "NONE"
else:
write_checkpoint = "ALL"
if write_checkpoint.upper() not in ("NONE", "MINIMAL", "ALL", "BOTH", "FULL"):
LOG.warning(
f"Invalid value of `write_checkpoint` provided: {write_checkpoint}, using 'ALL'"
)
write_checkpoint = "ALL"
return write_checkpoint
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
198,
37811,
770,
850,
21412,
23986,
617,
2219,
30798,
4831,
329,
779,
287,
198,
3549,
8253,
670,
44041,
13,
198,
198,
37811,
198,
... | 2.535193 | 1,577 |
from Regex_To_Dfa import *
#inp = "(01*1)*1"
print("\nProject 5 : Construction of an Dfa equivalent to a Regular Expression\nBy Patrick Lenis & Paul Cvasa")
print("Regular expression examples : xy, x+y, x*, (xy*y)*y\n")
inp = input("Your regular expression : ")
nfaObj = NFAfromRegex(inp)
nfa = nfaObj.getNFA()
print ("\nNFA: ")
nfaObj.displayNFA()
# dfaObj = DFAfromNFA(nfa)
# dfa = dfaObj.getDFA()
#print ("\nDFA: ")
#dfaObj.displayDFA()
| [
6738,
797,
25636,
62,
2514,
62,
35,
13331,
1330,
1635,
201,
198,
201,
198,
2,
259,
79,
796,
30629,
486,
9,
16,
27493,
16,
1,
201,
198,
4798,
7203,
59,
77,
16775,
642,
1058,
20395,
286,
281,
360,
13331,
7548,
284,
257,
23603,
41986... | 2.22488 | 209 |
"""
testgen.py: simple test for a sequence generator
"""
import unittest
#from gen123 import gen123
from class123 import gen123
if __name__ == "__main__":
unittest.main() | [
37811,
198,
9288,
5235,
13,
9078,
25,
2829,
1332,
329,
257,
8379,
17301,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
2,
6738,
2429,
10163,
1330,
2429,
10163,
198,
6738,
1398,
10163,
1330,
2429,
10163,
198,
220,
220,
220,
220,
220... | 2.787879 | 66 |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
from scrapy_selenium import SeleniumRequest
from selenium.webdriver.common.keys import Keys
import time
import datetime | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
198,
6738,
15881,
88,
13,
19738,
273,
1330,
9683,
273,
198,
6738,
15881,
88,
62,
741,
47477,
1330,
15300,
47477,
18453,
198,
6738,
384,
11925,
1505,
13... | 3.288136 | 59 |
from __future__ import division
from six.moves import range
from dials.array_family import flex
import math
from rstbx.symmetry.constraints.parameter_reduction \
import symmetrize_reduce_enlarge
from scitbx.matrix import sqr, col
from xfel.merging.algorithms.error_model.error_modeler_base import error_modeler_base
from xfel.merging.algorithms.error_model.sdfac_refine_lbfgs import finite_difference
from libtbx import group_args
"""
Classes to support propagating erros after postrefinement in cxi.merge
"""
# Bucket to hold refinable error terms
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
2237,
13,
76,
5241,
1330,
2837,
198,
6738,
5980,
82,
13,
18747,
62,
17989,
1330,
7059,
198,
11748,
10688,
198,
6738,
374,
301,
65,
87,
13,
1837,
3020,
11973,
13,
1102,
2536,
6003,
13,
... | 2.994624 | 186 |
import aredis
import asyncio
import uvloop
import time
import sys
from functools import wraps
from argparse import ArgumentParser
if sys.version_info[0] == 3:
long = int
@timer
@timer
@timer
@timer
@timer
@timer
@timer
@timer
@timer
if __name__ == '__main__':
print('WITH ASYNCIO ONLY:')
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
print('WITH UVLOOP:')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| [
11748,
389,
6381,
198,
11748,
30351,
952,
198,
11748,
334,
85,
26268,
198,
11748,
640,
198,
11748,
25064,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
361,
25064,
13,
9641,
62,
10951,
5... | 2.580189 | 212 |
from decimal import Decimal
import json
import os
from django.test.testcases import TestCase
from custom.ilsgateway.api import Product, ILSUser, SMSUser, Location, ProductStock, StockTransaction
| [
6738,
32465,
1330,
4280,
4402,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
42625,
14208,
13,
9288,
13,
9288,
33964,
1330,
6208,
20448,
198,
6738,
2183,
13,
4487,
10494,
1014,
13,
15042,
1330,
8721,
11,
314,
6561,
12982,
11,
29287,
... | 3.862745 | 51 |
import pickle
import os.path
import catlas
dir_path = os.path.dirname(os.path.realpath(__file__))
| [
11748,
2298,
293,
198,
11748,
28686,
13,
6978,
198,
11748,
3797,
21921,
198,
198,
15908,
62,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
628
] | 2.702703 | 37 |
from portfolio import Operation
from tinvest import Operation as TinkoffOperation
| [
6738,
15320,
1330,
14680,
198,
6738,
19783,
4223,
1330,
14680,
355,
309,
676,
2364,
32180,
628
] | 5.1875 | 16 |
_base_ = [
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='MJODNet',
pretrained='open-mmlab://regnetx_400mf',
backbone=dict(
type='RegNet',
arch='regnetx_400mf',
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='DepthwiseSeparableDilatedEncoder',
in_channels=384,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[1, 2, 3, 4]),
bbox_head=dict(
type='MJODNetHead',
num_classes=34,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco_mahjong/'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
| [
62,
8692,
62,
796,
685,
198,
220,
220,
220,
705,
40720,
62,
8692,
62,
14,
1416,
704,
5028,
14,
15952,
5950,
62,
16,
87,
13,
9078,
3256,
705,
40720,
62,
8692,
62,
14,
12286,
62,
43282,
13,
9078,
6,
198,
60,
198,
19849,
796,
8633,... | 1.968107 | 1,944 |
from types import GeneratorType
import httpx
import pytest
from fintoc.client import Client
| [
6738,
3858,
1330,
35986,
6030,
198,
198,
11748,
2638,
87,
198,
11748,
12972,
9288,
198,
198,
6738,
277,
600,
420,
13,
16366,
1330,
20985,
628,
198
] | 3.692308 | 26 |
# django imports
from django.contrib import admin
# app imports
from .models import Article, Author
admin.site.register(Article)
admin.site.register(Author)
| [
2,
42625,
14208,
17944,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
598,
17944,
198,
6738,
764,
27530,
1330,
10172,
11,
6434,
628,
198,
28482,
13,
15654,
13,
30238,
7,
14906,
8,
198,
28482,
13,
15654,
13,
30238,
... | 3.478261 | 46 |
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
# from configure import args
from load_data import find_node
# Hierarchical Memory Extractor 分层记忆抽取器
class HME(nn.Module):
'''
分层记忆抽取器:输入一个bag embedding,和分层关系,输出对应的标签
'''
| [
11748,
28034,
201,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
11748,
28034,
13,
40085,
355,
6436,
201,
198,
11... | 1.815534 | 206 |
from bs4 import BeautifulSoup
from urllib.request import urlopen
url = "https://coinmarketcap.com"
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
628,
198,
6371,
796,
366,
5450,
1378,
3630,
10728,
11128,
13,
785,
1,
198,
6494,
796,
19016,
9654,
7,
6371,
8,
198,
82,
10486,
796,
... | 2.907407 | 54 |
import tkinter as tk
from tkinter import ttk
from copy import copy
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
198,
6738,
4866,
1330,
4866,
628
] | 3.136364 | 22 |
from sklearn_pandas import DataFrameMapper
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer
from sklearn2pmml.decoration import Alias, CategoricalDomain, ContinuousDomain
from sklearn2pmml.preprocessing import ExpressionTransformer
import pandas
df = pandas.read_csv("audit.csv")
cat_columns = ["Education", "Employment", "Marital", "Occupation"]
cont_columns = ["Age", "Hours", "Income"]
X = df[cat_columns + cont_columns]
y = df["Adjusted"]
mapper = DataFrameMapper(
[([cat_column], [CategoricalDomain(), LabelBinarizer()]) for cat_column in cat_columns] +
[(cont_columns, ContinuousDomain())] +
[(["Income", "Hours"], Alias(ExpressionTransformer("X[0] / (X[1] * 52.0)"), "Hourly_Income", prefit = True))]
)
feature_eng_pipeline = Pipeline([
("mapper", mapper)
])
Xt = feature_eng_pipeline.fit_transform(X)
Xt = Xt.astype(float)
from sklearn2pmml import make_tpot_pmml_config
from tpot.config import classifier_config_dict
# Classes supported by TPOT
tpot_config = classifier_config_dict
# Union between classes supported by TPOT and SkLearn2PMML
tpot_pmml_config = make_tpot_pmml_config(tpot_config)
# Exclude ensemble model types
tpot_pmml_config = { key: value for key, value in tpot_pmml_config.items() if not (key.startswith("sklearn.ensemble.") or key.startswith("xgboost.")) }
# Exclude some more undesirable elementary model types
del tpot_pmml_config["sklearn.neighbors.KNeighborsClassifier"]
from tpot import TPOTClassifier
classifier = TPOTClassifier(generations = 7, population_size = 11, scoring = "roc_auc", config_dict = tpot_pmml_config, random_state = 13, verbosity = 2)
classifier.fit(Xt, y)
tpot_pipeline = classifier.fitted_pipeline_
from sklearn2pmml import make_pmml_pipeline, sklearn2pmml
# Combine fitted sub-pipelines to a fitted pipeline
pipeline = Pipeline(feature_eng_pipeline.steps + tpot_pipeline.steps)
pmml_pipeline = make_pmml_pipeline(pipeline, active_fields = X.columns.values, target_fields = [y.name])
#pmml_pipeline.verify(X.sample(50, random_state = 13, replace = False), precision = 1e-11, zeroThreshold = 1e-11)
sklearn2pmml(pmml_pipeline, "TPOTAudit.pmml", with_repr = True) | [
6738,
1341,
35720,
62,
79,
392,
292,
1330,
6060,
19778,
44,
11463,
198,
6738,
1341,
35720,
13,
79,
541,
4470,
1330,
37709,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
33,
22050,
7509,
198,
6738,
1341,
35720,
17,
79,
3020,
75... | 2.761724 | 789 |
import sys
import subprocess
import pandas as pd
import os
from pathlib import Path
import time
import datetime
import ast
from configparser import ConfigParser
'''Reading the Config file from command line argument'''
parser = ConfigParser()
pd.set_option('display.max_columns', None)
config_file = sys.argv[1]
parser.read(config_file)
'''Printing the variables '''
data_path = parser.get('paths', 'data_path')
action_path = data_path + "/" + "action"
print("action_path is " + str(action_path))
apps_home_path = parser.get('paths', 'apps_home')
print("apps_home_path is " + str(apps_home_path))
'''Creating DF for apps to be tracked from config file'''
all_apps = parser.get('yarn_apps', 'appname')
list_apps = ast.literal_eval(all_apps)
# print("List of Apps are : " + str(list_apps))
df_apps = pd.DataFrame(list_apps, columns=['app_name', 'app_schedule', 'app_user', 'app_submit_file', 'app_type'])
print("df_apps are " + str(df_apps))
filename = sys.argv[2]
a_path = Path(action_path)
modTimeEpoc = os.path.getmtime(action_path + "/" + filename)
currTimeEpoc = ts = datetime.datetime.now().timestamp()
td = round(round(currTimeEpoc - modTimeEpoc) / 60)
if td <= 5:
print("Input config file is fresh ( " + str(td) + " minutes old ). Proceed to Action !!")
app_home = Path(apps_home_path)
a_path = Path(action_path)
print("changing to the action_path directory" )
os.chdir(a_path)
print("current directory is " + str(Path.cwd()))
with open(filename) as f:
Line = f.readline().strip('\n').split(' ')[1].replace('[', '').replace(']', '').split(', ')
grepstrng = '.+?(?=bin)'
for x in Line:
''' preparing the bash commands for yarn '''
print("Working on app name : " + x)
''' Finding the spark-submit file for current appname'''
app_submit_file = df_apps.loc[df_apps['app_name'] == x]['app_submit_file'].values[0]
print("app_submit_file for " + str(x) + "is :" + str(app_submit_file))
# yarn_cmd = ['find', app_home, '-type', 'f', '-name', '*.sh']
# grep_cmd_exe = ['xargs', 'grep', '-ril', str(x)]
echo_cmd = ['echo', app_submit_file]
grep_cmd_home = ['grep', '-oP', grepstrng]
o1 = subprocess.run(echo_cmd, stdout=subprocess.PIPE)
o2 = subprocess.run(grep_cmd_home, input=o1.stdout, stdout=subprocess.PIPE)
# o3 = subprocess.run(grep_cmd_home, input=o2.stdout, stdout=subprocess.PIPE)
# exe_file = str(o2.stdout).split("'")[1].strip('\\n')
# print("Spark-submit file for appname " + str(x) + " is " + str(exe_file))
exe_file_home = str(o2.stdout).split("'")[1].strip('\\n')
print("Home directory for appname " + str(x) + " is " + str(exe_file_home))
print("*******************************************************")
print("Running the Spark Submit script for App name : " + str(x))
print("*******************************************************")
os.chdir(exe_file_home)
subprocess.call(app_submit_file)
else:
print("Looks like input config is too old( " + str(td) + " minutes old ) Aborting action !!")
| [
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
6468,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,... | 2.426958 | 1,328 |
from cube2common.constants import weapon_types
from spyd.registry_manager import register
from spyd.utils.constrain import constrain_range
@register('room_player_event_handler')
| [
6738,
23441,
17,
11321,
13,
9979,
1187,
1330,
4282,
62,
19199,
198,
6738,
599,
5173,
13,
2301,
4592,
62,
37153,
1330,
7881,
198,
6738,
599,
5173,
13,
26791,
13,
1102,
2536,
391,
1330,
1500,
3201,
62,
9521,
628,
198,
31,
30238,
10786,
... | 3.529412 | 51 |
from pymodbus.interfaces import IModbusFramer
import struct
# Unit ID, Function Code
BYTE_ORDER = '>'
FRAME_HEADER = 'BB'
# Transaction Id, Protocol ID, Length, Unit ID, Function Code
SOCKET_FRAME_HEADER = BYTE_ORDER + 'HHH' + FRAME_HEADER
# Function Code
TLS_FRAME_HEADER = BYTE_ORDER + 'B'
class ModbusFramer(IModbusFramer):
"""
Base Framer class
"""
def _validate_unit_id(self, units, single):
"""
Validates if the received data is valid for the client
:param units: list of unit id for which the transaction is valid
:param single: Set to true to treat this as a single context
:return: """
if single:
return True
else:
if 0 in units or 0xFF in units:
# Handle Modbus TCP unit identifier (0x00 0r 0xFF)
# in asynchronous requests
return True
return self._header['uid'] in units
def sendPacket(self, message):
"""
Sends packets on the bus with 3.5char delay between frames
:param message: Message to be sent over the bus
:return:
"""
return self.client.send(message)
def recvPacket(self, size):
"""
Receives packet from the bus with specified len
:param size: Number of bytes to read
:return:
"""
return self.client.recv(size)
| [
6738,
12972,
4666,
10885,
13,
3849,
32186,
1330,
314,
5841,
10885,
21055,
263,
198,
11748,
2878,
198,
198,
2,
11801,
4522,
11,
15553,
6127,
198,
17513,
9328,
62,
12532,
1137,
796,
705,
29,
6,
198,
10913,
10067,
62,
37682,
1137,
796,
7... | 2.347245 | 599 |
# -*- coding:utf-8 -*-
# !/usr/bin/env python3
"""
download tie from Baidu Tieba
"""
import http.client
import os
import re
import urllib.error
import urllib.parse
import urllib.request
from minghu6.http.request import headers
from color import color
from minghu6.text.encoding import get_decode_html
# 处理页面标签类
# 百度贴吧爬虫类
if __name__ == '__main__':
cli()
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
37811,
201,
198,
15002,
9839,
422,
347,
1698,
84,
36286,
7012,
201,
198,
37811,
201,
198,
201... | 1.98995 | 199 |
from __future__ import print_function
# Standard Library Imports
from typing import List, Iterator, NamedTuple, NoReturn
from functools import partial
from copy import deepcopy
import binascii
import argparse
import pickle
import json
import abc
import re
# Package imports
from kodi_addon_dev.repo import LocalRepo
from kodi_addon_dev.support import logger
from kodi_addon_dev.utils import ensure_native_str, urlparse, real_input
import xbmcgui
try:
from shutil import get_terminal_size
except ImportError:
# noinspection PyUnresolvedReferences
from backports.shutil_get_terminal_size import get_terminal_size
# The Processed Listitem Named tuple, Make listitems easier to work with
Listitem = NamedTuple("Listitem", (("count", int), ("isfolder", bool), ("size_of_name", int), ("item", dict)))
__all__ = ["BaseDisplay", "CMDisplay"]
class BaseDisplay(object):
"""Base Class to for Displaying Kodi Listitems."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
# noinspection PyTypeChecker
def _formatter(self, text):
"""Convert kodi formating into real text"""
text = ensure_native_str(text)
# Search & replace localization strings
text = re.sub(r"\$LOCALIZE\[(\d+)\]", self._localize, text)
text = re.sub(r"\$ADDON\[(\S+?)\s(\d+)\]", self._localize_addon, text)
text = re.sub(r"\[COLOR\s\w+\](.+?)\[/COLOR\]", partial(self.formatter, "COLOR"), text)
# Common formatting
for common in ("I", "B", "UPPERCASE", "LOWERCASE", "CAPITALIZE", "LIGHT"):
text = re.sub(r"\[{0}\](.+?)\[/{0}\]".format(common), partial(self.formatter, common), text)
return text.replace("[CR]", "\n")
@staticmethod
def formatter(name, match):
"""
Convert a kodi formating.
:param str name: The name of the formatting e.g. UPPERCASE, B, COLOR
:param match: A re.Match object with the matching text located at group(1), group(0) for the full text.
:returns: The formatted string
:rtype: str
"""
# Strip out formating and reutrn text untouched
if name in ("B", "I", "LIGHT", "COLOR"):
return match.group(1)
elif name == "UPPERCASE":
return match.group(1).upper()
elif name == "LOWERCASE":
return match.group(1).lower()
elif name == "CAPITALIZE":
return match.group(1).capitalize()
else:
return match.group(0)
def _localize(self, match):
"""$LOCALIZE[12345] - for specifying a localized string."""
string_id = int(match.group(1))
text = match.group(0)
return self.__localize(text, string_id, "resource.language.en_gb")
def _localize_addon(self, match):
"""$ADDON[script.music.foobar 12345] - for specifying a string provided by an addon."""
text = match.group(0)
addon_id = match.group(1)
string_id = int(match.group(2))
return self.__localize(text, string_id, addon_id)
def __localize(self, text, string_id, addon_id): # type: (str, int, str) -> str
"""Return the localized string if available else leave string untouched"""
strings = self.cached.request_addon(addon_id).strings
return strings[string_id] if string_id in strings else text
@staticmethod
class CMDisplay(BaseDisplay):
"""Display manager that will display kodi listitem in a basic non tty terminal window."""
def input(self, msg): # type: (str) -> str
"""Ask for user input."""
try:
return real_input(msg)
except KeyboardInterrupt:
return ""
@staticmethod
def notify(*msg, **kwargs):
"""
Notify the user with givin message.
If skip is set to True then the user will be asked if they want to continue, returning True if so.
Else False will be returned.
"""
skip = kwargs.get("skip", True)
print(*msg)
if skip:
try:
real_input("Press enter to continue, or Ctrl+C to Quit:")
except KeyboardInterrupt:
return False
else:
print()
return True
else:
return False
def show(self, items, current_path): # type: (List[Listitem], str) -> Listitem
"""Show a list of all the avilable listitems and allow user to make there selection."""
# Process all listitems into a Tuple of [count, isfolder, len label, listitem]
lines = self._detailed_view(items) if self.settings.detailed else self._compact_view(items)
lines = lines if self.settings.no_crop else map(self._line_limiter, lines)
terminal_width = self._terminal_width
# Construct the full list of line to display
output = ["=" * terminal_width, current_path, "=" * terminal_width]
output.extend(lines)
output.append("=" * terminal_width)
print("\n".join(output))
# Return the full list of listitems
return self._user_choice(items)
@staticmethod
def _compact_view(items): # type: (List[Listitem]) -> Iterator[str]
"""Display listitems in a compact view, one line per listitem."""
# Calculate the max length of required lines
title_len = max(item.size_of_name for item in items)
num_len = len(str(len(items)))
title_len += num_len + 4
# Create a line output for each listitem entry
for count, isfolder, _, item in items:
# Folder/Video icon, + for Folder, - for Video
label = ("{}. + {}" if isfolder else "{}. - {}").format(str(count).rjust(num_len), item.pop("label"))
yield "{} Listitem({})".format(label.ljust(title_len), item)
def _detailed_view(self, items): # type: (List[Listitem]) -> Iterator[str]
"""Display listitems in a detailed view, each component of a listitem will be on it's own line."""
terminal_width = self._terminal_width
# Create a line output for each component of a listitem
for count, _, size_of_name, item in items:
# Show the title in it's own area
yield "{}. {}".format(count, item.pop("label"))
yield "#" * terminal_width
# Show all the rest of the listitem
for key, value in item.items():
if isinstance(value, dict):
yield ("{}:".format(key.title())).ljust(size_of_name)
for name, sub in value.items():
yield "- {}{}".format(name.ljust(size_of_name), sub)
else:
yield "{}{}".format(key.title().ljust(size_of_name), value)
yield ""
@staticmethod
def _user_choice(items): # type: (List[Listitem]) -> Listitem
"""Ask user to select an item, returning selection as an integer."""
while True:
try:
# Ask user for selection, Returning None if user entered nothing
choice = real_input("Choose an item: ")
except KeyboardInterrupt:
break
if choice:
try:
# Convert choice to an integer
choice = int(choice)
except ValueError:
print("You entered a non-numerical value, Plean enter a numerical value or leave black to exit.")
else:
try:
return items[choice]
except IndexError:
print("Choise is out of range, Please choose from above list.")
else:
break
@property
def _terminal_width(self):
"""Ensures a line minimum of 80."""
return max(get_terminal_size((300, 25)).columns, 80)
def _line_limiter(self, line): # type: (str) -> str
"""Limit the length of a output line to fit within the terminal window."""
terminal_width = self._terminal_width
return "%s..." % (line[:terminal_width-3]) if len(line) > terminal_width else line
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
2,
8997,
10074,
1846,
3742,
198,
6738,
19720,
1330,
7343,
11,
40806,
1352,
11,
34441,
51,
29291,
11,
1400,
13615,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
4866,
1330,
... | 2.368223 | 3,449 |
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
import json
import unittest
from tests.helpers.unittest_base import BaseTestCase
# -- BQ model imports
from rdr_service.model import (
bq_code, bq_genomics, bq_hpo, bq_organization, bq_participant_summary, bq_pdr_participant_summary,
# bq_questionnaires <-- to do: add tests for schemas in these files?
bq_site, bq_workbench_workspace, bq_workbench_researcher
)
from rdr_service.resource import schemas as rschemas
# Common fields from all the BQ schemas which should be excluded from comparison. If the resource schema contains these
# field names, the names will be translated automatically by the pipeline to add an 'orig_' prefix for the
# BigQuery or PostgreSQL tables
_default_exclusions = ['id', 'created', 'modified']
# Any additional "per schema" exclusions that may not be present in the corresponding resource schema
bq_field_exclusions = {
'CodeSchema': ['bq_field_name'],
# Genomic schemas that have orig_* fields already and also id/created/modified
'GenomicManifestFileSchema': ['orig_id'],
'GenomicManifestFeedbackSchema': ['orig_id'],
'GenomicGCValidationMetricsSchema': ['orig_id', 'orig_created', 'orig_modified'],
'ParticipantSchema': ['addr_state', 'addr_zip', 'biospec', 'consents']
}
# Fields from the resource schemas that do not exist in the BQ schema
rsc_field_exclusions = {
'HPOSchema': list(rschemas.HPOSchema.Meta.pii_fields),
'PatientStatusSchema': list(rschemas.participant.PatientStatusSchema.Meta.pii_fields),
'GenomicManifestFileSchema': _default_exclusions,
'GenomicManifestFeedbackSchema': _default_exclusions,
'GenomicGCValidationMetricsSchema': _default_exclusions,
'WorkbenchResearcherSchema': list(rschemas.WorkbenchResearcherSchema.Meta.pii_fields),
'ParticipantSchema': list(rschemas.ParticipantSchema.Meta.pii_fields) +
['addresses']
}
# For field name translations that have been vetted after verifying the differences between BQ schemas
# and the resource schemas
bq_field_name_mappings = {
# Each item is a dict where key is the bq field name and value is the related resource schema field name
'ModuleStatusSchema': {
'mod_created': 'module_created',
'mod_authored': 'module_authored',
},
}
class ResourceSchemaTest(BaseTestCase):
"""
Test that the resource schema definitions/fields align with the BigQuery schemas
NOTE: These tests may be deprecated if use of BigQuery PDR is discontinued
TODO: Add more detail about implementing test cases that include handling field name prefixes, exclusions, etc.
"""
@staticmethod
def _get_bq_field_list(bq_schema, rsc_name, bq_prefix='', exclusions=[]):
""" Return a filtered BQ schema column/field name list, with any specified prefix stripped """
fields = []
# If the schemas have pre-identified diffs in their field naming conventions, automatically translate the
# bq schema field name to its resource schema field name
mapped_fields = bq_field_name_mappings[rsc_name] if rsc_name in bq_field_name_mappings.keys() else None
for field in json.loads(bq_schema.to_json()):
name = field['name']
if name not in exclusions:
if mapped_fields and name in mapped_fields.keys():
fields.append(mapped_fields.get(name))
else:
fields.append(name[len(bq_prefix):] if name.startswith(bq_prefix) else name)
return fields
# -- Create a test for each schema defined in the rdr_service/resource/schemas directory (including sub-schemas)
# Participant data schemas
# Consent schema is depreciated in Resources, but not BigQuery.
# def test_consent_resource_schema(self):
# self._verify_resource_schema('ConsentSchema',
# rschemas.participant.ConsentSchema(),
# bq_participant_summary.BQConsentSchema())
# TODO: Questionnaire-related schemas
# Genomic-related schemas
# TODO: Confirm relationship of the id and orig_id fields (and/or similar created, modified) in genomic schemas
# Researcher workbench related schemas
@unittest.skip('Confirm if WorkspaceAgeSchema needs a SchemaID defined')
@unittest.skip('Confirm if WorkspaceRaceEthnicitySchema needs a SchemaID defined')
| [
2,
198,
2,
770,
2393,
318,
2426,
284,
262,
2846,
290,
3403,
5447,
287,
262,
198,
2,
2393,
705,
43,
2149,
24290,
3256,
543,
318,
636,
286,
428,
2723,
2438,
5301,
13,
198,
2,
198,
11748,
33918,
198,
11748,
555,
715,
395,
198,
198,
... | 2.804833 | 1,614 |
n = int(input())
result = 0
for num in range(1, n+1) :
result += num
print(result) | [
77,
796,
493,
7,
15414,
28955,
198,
20274,
796,
657,
198,
1640,
997,
287,
2837,
7,
16,
11,
299,
10,
16,
8,
1058,
198,
220,
220,
220,
1255,
15853,
997,
198,
4798,
7,
20274,
8
] | 2.457143 | 35 |
import struct
import socket
import select
## This module provides library to send and receive messages to NOX's messenger
#
# This is a rewrite of noxmsg.py from OpenRoads (OpenFlow Wireless)
#
# @author ykk (Stanford University)
# @date January, 2010
# @see messenger
def stringarray(string):
"""Output array of binary values in string.
"""
arrstr = ""
if (len(string) != 0):
for i in range(0,len(string)):
arrstr += "%x " % struct.unpack("=B",string[i])[0]
return arrstr
def printarray(string):
"""Print array of binary values
"""
print "Array of length "+str(len(string))
print stringarray(string)
class channel:
"""TCP channel to communicate to NOX with.
"""
def __init__(self,ipAddr,portNo=2603,debug=False):
"""Initialize with socket
"""
##Socket reference for channel
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((ipAddr,portNo))
self.debug = debug
##Internal buffer for receiving
self.__buffer = ""
##Internal reference to header
self.__header = messenger_msg()
def baresend(self, msg):
"""Send bare message"""
self.sock.send(msg)
def send(self,msg):
"""Send message
"""
msgh = messenger_msg()
remaining = msgh.unpack(msg)
if (msgh.length != len(msg)):
msgh.length = len(msg)
msg = msgh.pack()+remaining
self.baresend(msg)
if (self.debug):
printarray(msg)
def receive(self, recvLen=0,timeout=0):
"""Receive command
If length == None, nonblocking receive (return None or message)
With nonblocking receive, timeout is used for select statement
If length is zero, return single message
"""
if (recvLen==0):
#Receive full message
msg=""
length=len(self.__header)
while (len(msg) < length):
msg+=self.sock.recv(1)
#Get length
if (len(msg) == length):
self.__header.unpack(msg)
length=self.__header.length
return msg
elif (recvLen==None):
#Non-blocking receive
ready_to_read = select.select([self.sock],[],[],timeout)[0]
if (ready_to_read):
self.__buffer += self.sock.recv(1)
if (len(self.__buffer) >= len(self.__header)):
self.__header.unpack(self.__buffer)
if (self.__header.length == len(self.__buffer)):
msg = self.__buffer
self.__buffer = ""
return msg
return None
else:
#Fixed length blocking receive
return self.sock.recv(recvLen)
def __del__(self):
"""Terminate connection
"""
emsg = messenger_msg()
emsg.type = MSG_DISCONNECT
emsg.length = len(emsg)
self.send(emsg.pack())
self.sock.shutdown(1)
self.sock.close()
class sslChannel(channel):
"""SSL channel to communicate to NOX with.
"""
def __init__(self, ipAddr, portNo=1304,debug=False):
"""Initialize with SSL sock
"""
NOXChannel.__init__(self, ipAddr, portNo,debug)
##Reference to SSL socket for channel
self.sslsock = socket.ssl(self.sock)
def baresend(self, msg):
"""Send bare message"""
self.sslsock.write(msg)
class messenger_msg:
"""Automatically generated Python class for messenger_msg
Date 2010-01-20
Created by lavi.pythonize.msgpythonizer
"""
def __init__(self):
"""Initialize
Declare members and default values
"""
self.length = 0
self.type = 0
self.body= []
def __assert(self):
"""Sanity check
"""
if (not (self.type in msg_type_values)):
return (False, "type must have values from msg_type")
return (True, None)
def pack(self, assertstruct=True):
"""Pack message
Packs empty array used as placeholder
"""
if(assertstruct):
if(not self.__assert()[0]):
return None
packed = ""
packed += struct.pack("!HB", self.length, self.type)
for i in self.body:
packed += struct.pack("!B",i)
return packed
def unpack(self, binaryString):
"""Unpack message
Do not unpack empty array used as placeholder
since they can contain heterogeneous type
"""
if (len(binaryString) < 3):
return binaryString
(self.length, self.type) = struct.unpack_from("!HB", binaryString, 0)
return binaryString[3:]
def __len__(self):
"""Return length of message
"""
l = 3
l += len(self.body)*1
return l
msg_type = ['MSG_DISCONNECT', 'MSG_ECHO', 'MSG_ECHO_RESPONSE', 'MSG_AUTH', 'MSG_AUTH_RESPONSE', 'MSG_AUTH_STATUS', 'MSG_NOX_STR_CMD']
MSG_DISCONNECT = 0
MSG_ECHO = 1
MSG_ECHO_RESPONSE = 2
MSG_AUTH = 3
MSG_AUTH_RESPONSE = 4
MSG_AUTH_STATUS = 5
MSG_NOX_STR_CMD = 10
msg_type_values = [0, 1, 2, 3, 4, 5, 10]
| [
11748,
2878,
198,
198,
11748,
17802,
198,
11748,
2922,
198,
198,
2235,
770,
8265,
3769,
5888,
284,
3758,
290,
3328,
6218,
284,
8005,
55,
338,
31228,
198,
2,
198,
2,
770,
318,
257,
28183,
286,
645,
87,
19662,
13,
9078,
422,
4946,
291... | 2.209947 | 2,272 |
import os
import shutil
import tarfile
import tempfile
import zipfile
import traceback
import requests
import apkutils2
from logzero import logger
from adbutils import device as get_device
device = get_device()
atx_agent_version = "0.10.0"
__all__ = ["get_atx_agent_bundle", "get_whatsinput_apk"]
def get_atx_agent_bundle() -> str:
"""
bundle all platform atx-agent binary into one zip file
"""
version = atx_agent_version
target_zip = f"vendor/atx-agent-{version}.zip"
if not os.path.isfile(target_zip):
os.makedirs("vendor", exist_ok=True)
create_atx_agent_bundle(version, target_zip)
return target_zip
def get_stf_binaries() -> str:
"""
Download from https://github.com/openatx/stf-binaries
Tag 0.2, support to Android P
Tag 0.3.0 use stf/@devicefarmer
"""
version = "0.3.0"
target_path = f"vendor/stf-binaries-{version}.zip"
mirror_download(
f"https://github.com/openatx/stf-binaries/archive/{version}.zip", target_path
)
return target_path
def push_stf(path: str, dest: str, zipfile_path: str, mode=0o755):
"""push minicap and minitouch from zip"""
with zipfile.ZipFile(zipfile_path) as z:
if path not in z.namelist():
logger.warning("stf stuff %s not found", path)
return
src_info = z.getinfo(path)
dest_info = device.sync.stat(dest)
if dest_info.size == src_info.file_size and dest_info.mode & mode == mode:
logger.debug("already pushed %s", path)
return
with z.open(path) as f:
device.sync.push(f, dest, mode)
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
13422,
7753,
198,
11748,
20218,
7753,
198,
11748,
19974,
7753,
198,
11748,
12854,
1891,
198,
11748,
7007,
198,
11748,
2471,
74,
26791,
17,
198,
6738,
2604,
22570,
1330,
49706,
198,
6738,
51... | 2.356936 | 692 |
test = { 'name': 'q1.3',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> assert np.abs(v_direct_forward - 1280) / 1280 < 0.1;\n'
'>>> assert v_head_forward > 7000;\n'
'>>> assert v_direct_reverse < 2000;\n'
'>>> assert v_head_reverse > 2000\n',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
9288,
796,
1391,
220,
220,
705,
3672,
10354,
705,
80,
16,
13,
18,
3256,
198,
220,
220,
220,
705,
13033,
10354,
352,
11,
198,
220,
220,
220,
705,
2385,
2737,
10354,
685,
220,
220,
1391,
220,
220,
705,
33964,
10354,
685,
220,
220,
1... | 1.465368 | 462 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.utils import timezone
ABOUT_PAGE = {
'title': 'About',
'slug': 'about',
'content': 'About placeholder text',
'publish_date': timezone.now(),
'created': timezone.now(),
'modified': timezone.now()
}
FAQ_PAGE = {
'title': 'Frequently Asked Questions',
'slug': 'faq',
'content': 'FAQ placeholder text',
'publish_date': timezone.now(),
'created': timezone.now(),
'modified': timezone.now()
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
628,
198,
6242,
12425,
62,
4537,
8264,
796,
1391,
19... | 2.536585 | 205 |
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Self-extracting binary.
Generate a binary suitable for self-extraction:
self_extract_binary(
name = "install.sh",
launcher = "launcher.sh",
resources = ["path1/file1", "path2/file2"],
flatten_ressources = ["path3/file3"],
)
will generate a file 'install.sh' with a header (launcher.sh)
and a ZIP footer with the following entries:
path1/
path1/file1
path2/
path2/file2
file3
"""
def _self_extract_binary(ctx):
"""Implementation for the self_extract_binary rule."""
# This is a bit complex for stripping out timestamps
zip_artifact = ctx.new_file(ctx.label.name + ".zip")
touch_empty_files = [
"mkdir -p $(dirname ${tmpdir}/%s); touch ${tmpdir}/%s" % (f, f)
for f in ctx.attr.empty_files
]
cp_resources = [
("mkdir -p $(dirname ${tmpdir}/%s)\n" % r.short_path +
"cp %s ${tmpdir}/%s" % (r.path, r.short_path))
for r in ctx.files.resources
]
cp_flatten_resources = [
"cp %s ${tmpdir}/%s" % (r.path, r.basename)
for r in ctx.files.flatten_resources
]
ctx.action(
inputs = ctx.files.resources + ctx.files.flatten_resources,
outputs = [zip_artifact],
command = "\n".join([
"tmpdir=$(mktemp -d ${TMPDIR:-/tmp}/tmp.XXXXXXXX)",
"trap \"rm -fr ${tmpdir}\" EXIT"
] + touch_empty_files + cp_resources + cp_flatten_resources + [
"find ${tmpdir} -exec touch -t 198001010000.00 '{}' ';'",
"(d=${PWD}; cd ${tmpdir}; zip -rq ${d}/%s *)" % zip_artifact.path,
]),
mnemonic = "ZipBin",
)
ctx.action(
inputs = [ctx.file.launcher, zip_artifact],
outputs = [ctx.outputs.executable],
command = "\n".join([
"cat %s %s > %s" % (ctx.file.launcher.path,
zip_artifact.path,
ctx.outputs.executable.path),
"zip -qA %s" % ctx.outputs.executable.path
]),
mnemonic = "BuildSelfExtractable",
)
self_extract_binary = rule(
_self_extract_binary,
executable = True,
attrs = {
"launcher": attr.label(
mandatory=True,
allow_files=True,
single_file=True),
"empty_files": attr.string_list(default=[]),
"resources": attr.label_list(
default=[],
allow_files=True),
"flatten_resources": attr.label_list(
default=[],
allow_files=True),
},
)
| [
2,
15069,
1853,
383,
347,
41319,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.294428 | 1,328 |
#!/usr/local/bin/python3
'''
libradius(3) wrapper
'''
import socket
from ctypes import *
from ctypes import util
from collections import namedtuple
from enum import IntEnum, unique
RadiusAttribute = namedtuple('RadiusAttribute', ['type', 'data', 'datalen', 'vendor'])
# allow unknown attributes as pure integers
@unique
@unique
@unique
@unique
@unique
@unique
@unique
@unique
@unique
radlib = CDLL(util.find_library("radius"))
libc = CDLL(util.find_library("c"))
libc.free.argtypes = [c_void_p]
radlib.rad_acct_open.restype = c_void_p
radlib.rad_auth_open.restype = c_void_p
radlib.rad_close.argtypes = [ c_void_p ]
radlib.rad_add_server.argtypes = [ c_void_p, c_char_p, c_int, c_char_p, c_int, c_int ]
radlib.rad_add_server_ex.argtypes = [ c_void_p, c_char_p, c_int, c_char_p, c_int, c_int, c_int, c_void_p ]
radlib.rad_config.argtypes = [ c_void_p, c_char_p ]
radlib.rad_init_send_request.argtypes = [ c_void_p, POINTER(c_int), POINTER(timeval) ]
radlib.rad_continue_send_request.argtypes = [ c_void_p, c_int, POINTER(c_int), POINTER(timeval) ]
radlib.rad_create_request.argtypes = [ c_void_p, c_int ]
radlib.rad_create_response.argtypes = [ c_void_p, c_int ]
radlib.rad_cvt_addr.restype = c_uint
radlib.rad_cvt_addr.argtypes = [ c_void_p ]
radlib.rad_cvt_int.restype = c_uint
radlib.rad_cvt_int.argtypes = [ c_void_p ]
radlib.rad_cvt_string.restype = POINTER(c_char)
radlib.rad_cvt_string.argtypes = [ c_void_p, c_size_t ]
radlib.rad_get_attr.argtypes = [ c_void_p, c_void_p, c_void_p ]
radlib.rad_get_vendor_attr.artgtypes = [ POINTER(c_uint), c_void_p, POINTER(c_size_t) ]
radlib.rad_put_addr.argtypes = [ c_void_p, c_int, c_uint ]
radlib.rad_put_attr.argtypes = [ c_void_p, c_int, c_void_p, c_size_t ]
radlib.rad_put_int.argtypes = [ c_void_p, c_int, c_uint ]
radlib.rad_put_string.argtypes = [ c_void_p, c_int, c_char_p ]
radlib.rad_put_message_authentic.argtypes = [ c_void_p ]
radlib.rad_put_vendor_addr.argtypes = [ c_void_p, c_int, c_int, c_uint ]
radlib.rad_put_vendor_attr.argtypes = [ c_void_p, c_int, c_int, c_void_p, c_size_t ]
radlib.rad_put_vendor_int.argtypes = [ c_void_p, c_int, c_int, c_uint ]
radlib.rad_put_vendor_string.argtypes = [ c_void_p, c_int, c_int, c_char_p ]
radlib.rad_request_authenticator.restype = c_ssize_t
radlib.rad_request_authenticator.argtypes = [ c_void_p, POINTER(c_char), c_size_t ]
radlib.rad_receive_request.argtypes = [ c_void_p ]
radlib.rad_send_request.argtypes = [ c_void_p ]
radlib.rad_send_response.argtypes = [ c_void_p ]
radlib.rad_server_open.restype = c_void_p
radlib.rad_server_open.argtypes = [ c_int ]
radlib.rad_server_secret.restype = c_char_p
radlib.rad_server_secret.argtypes = [ c_void_p ]
radlib.rad_bind_to.argtypes = [ c_void_p, c_uint ]
radlib.rad_demangle.restype = POINTER(c_char)
radlib.rad_demangle.argtypes = [ c_void_p, c_void_p, c_size_t ]
radlib.rad_demangle_mppe_key.restype = POINTER(c_char)
radlib.rad_demangle_mppe_key.argtypes = [ c_void_p, c_void_p, c_size_t, c_void_p ]
radlib.rad_strerror.restype = c_char_p
radlib.rad_strerror.argtypes = [ c_void_p ]
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
198,
198,
7061,
6,
198,
75,
2889,
324,
3754,
7,
18,
8,
29908,
198,
7061,
6,
198,
198,
11748,
17802,
198,
6738,
269,
19199,
1330,
1635,
198,
6738,
269,
19199,
1330,
7736,
198,
67... | 2.233429 | 1,388 |
import torch
from models.resnet import ResNet
from shapley.dshap import DShap
from utils.dataset import FashionMNISTDataModule
from shapley.knn_shapley import KNNShapley
if __name__ == "__main__":
save_dir = "fmnist_shapley.pt"
scores = main()
print(scores)
torch.save(scores, save_dir) | [
11748,
28034,
220,
198,
198,
6738,
4981,
13,
411,
3262,
1330,
1874,
7934,
220,
198,
6738,
427,
499,
1636,
13,
67,
1477,
499,
1330,
360,
2484,
499,
220,
198,
6738,
3384,
4487,
13,
19608,
292,
316,
1330,
30958,
39764,
8808,
6601,
26796,... | 2.504065 | 123 |
import yaml
from unittest import TestCase, main
from dynamic_yaml import load, dump
if __name__ == '__main__':
import sys
sys.exit(main())
| [
11748,
331,
43695,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
11,
1388,
198,
198,
6738,
8925,
62,
88,
43695,
1330,
3440,
11,
10285,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 2.781818 | 55 |
from flask_cors import CORS
if __name__ == "__main__":
app = Flask(__name__)
app.secret_key = "averysecretkey" # secret key for encoding and decoding session data
#app.permanent_session_lifetime = datetime.timedelta(hours=12)
CORS(app, supports_credentials=True)
app.run() # for development
#app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080))) # for production
| [
6738,
42903,
62,
66,
669,
1330,
327,
20673,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
598,
796,
46947,
7,
834,
3672,
834,
8,
198,
220,
220,
220,
598,
13,
21078,
62,
2539,
796,
366,
1244... | 2.668831 | 154 |
#
# ISC License
#
# Copyright (C) 2021-present DS-Homebrew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import os
import json
IS_DOCKER = os.environ.get('IS_DOCKER', '')
# Load config
settingsf = open('settings.json')
settings = json.load(settingsf)
TOKEN = settings['DEFAULT']['TOKEN']
PREFIX = [x for x in settings['DEFAULT']['PREFIX']]
STATUS = settings['DEFAULT']['STATUS']
staff_roles = [x for x in settings['MODERATOR']]
NINUPDATE = settings['CHANNEL']['NINUPDATES']
GUILD = settings.get('GUILD')
| [
2,
198,
2,
3180,
34,
13789,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
12,
25579,
17400,
12,
16060,
11269,
198,
2,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14,
273,
14983,
428,
3788,
329,
597,
198,
2,
4007,
3... | 3.115789 | 380 |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from distutils.core import setup
from Cython.Build import cythonize
import numpy
from distutils.extension import Extension
from subprocess import check_output
from distutils import sysconfig
import re
extra_compile_args = ['-std=c++11']
# Super hacky way of determining if clang or gcc is being used
CC = sysconfig.get_config_vars().get('CC', 'gcc').split(' ')[0]
out = check_output([CC, '--version'])
if re.search('apple *llvm', str(out.lower())):
extra_compile_args.append('-stdlib=libc++')
extensions = [
Extension(
"hype.graph_dataset",
["hype/graph_dataset.pyx"],
include_dirs=[numpy.get_include()],
extra_compile_args=extra_compile_args,
language='c++',
),
]
setup(
ext_modules=cythonize(extensions),
)
| [
2,
15069,
357,
66,
8,
2864,
12,
25579,
11,
3203,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
42... | 2.814016 | 371 |
#!/usr/bin/python3
import sys
import argparse
import time
import json
import requests
import socketio
from utils import create_hmac_sig
URL_BASE = "http://localhost:5000/"
WS_URL = "ws://localhost:5000/"
EXIT_NO_COMMAND = 1
if __name__ == "__main__":
# parse arguments
parser = construct_parser()
args = parser.parse_args()
# set appropriate function
function = None
if args.command == "websocket":
function = websocket
elif args.command == "watch":
function = watch
elif args.command == "register":
function = register
elif args.command == "check":
function = check
elif args.command == "claim":
function = claim
elif args.command == "merchanttx":
function = merchanttx
elif args.command == "rates":
function = rates
elif args.command == "wallet_address":
function = wallet_address
elif args.command == "banks":
function = banks
elif args.command == "settlement":
function = settlement
elif args.command == "settlement_set_txid":
function = settlement_set_txid
else:
parser.print_help()
sys.exit(EXIT_NO_COMMAND)
if function:
function(args)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
640,
198,
11748,
33918,
198,
198,
11748,
7007,
198,
11748,
17802,
952,
198,
198,
6738,
3384,
4487,
1330,
2251,
62,
71,
20285,
62,
8... | 2.554865 | 483 |
import pygame
import pymunk
class Bullet:
"""Class for making a bullet
Arguments:
pos {tuple} -- Starting position of the bullet.
vel {tuple} -- Starting velocity of the bullet.
radius {int} -- Radius of bullet (to make relative to tank size).
"""
| [
11748,
12972,
6057,
198,
11748,
279,
4948,
2954,
628,
198,
4871,
18003,
25,
198,
220,
220,
220,
37227,
9487,
329,
1642,
257,
10492,
628,
220,
220,
220,
20559,
2886,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1426,
1391,
83,
29291,
... | 2.87 | 100 |
"""
Lift Curve Widget
-----------------
"""
from collections import namedtuple
import numpy as np
import sklearn.metrics as skl_metrics
from AnyQt import QtWidgets
from AnyQt.QtGui import QColor, QPen, QPalette, QFont
from AnyQt.QtCore import Qt
import pyqtgraph as pg
import Orange
from Orange.widgets import widget, gui, settings
from Orange.widgets.evaluate.utils import check_results_adequacy
from Orange.widgets.utils import colorpalette, colorbrewer
from Orange.widgets.evaluate.owrocanalysis import convex_hull
from Orange.widgets.widget import Input
from Orange.widgets import report
CurvePoints = namedtuple("CurvePoints", ["cases", "tpr", "thresholds"])
CurvePoints.is_valid = property(lambda self: self.cases.size > 0)
LiftCurve = namedtuple("LiftCurve", ["points", "hull"])
LiftCurve.is_valid = property(lambda self: self.points.is_valid)
PlotCurve = namedtuple("PlotCurve", ["curve", "curve_item", "hull_item"])
if __name__ == "__main__":
main()
| [
37811,
198,
43,
2135,
46300,
370,
17484,
198,
1783,
12,
198,
198,
37811,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1341,
35720,
13,
4164,
10466,
355,
1341,
75,
62,
4164,
10466,
198,
... | 2.978723 | 329 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Header, Menu
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
| [
2,
46424,
347,
43312,
38644,
38559,
24290,
9878,
11290,
46424,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
220,
13096,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
1378... | 3.492701 | 274 |
import re
from collections import defaultdict
import discord
from discord import app_commands
from discord.ext import commands
from utils import (GIRContext, cfg, get_ios_cfw, transform_context,
transform_groups)
from utils.framework import (DeviceTransformer, VersionOnDevice,
always_whisper,
ensure_invokee_role_lower_than_bot, whisper)
from utils.views import (Confirm, device_autocomplete,
ios_on_device_autocomplete)
| [
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
36446,
198,
6738,
36446,
1330,
598,
62,
9503,
1746,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
3384,
4487,
1330,
357,
38,
4663,
21947,
11,
30218,
70,
11,
651,
6... | 2.352679 | 224 |
import numpy as np
import pytest
from gcm_filters.kernels import ALL_KERNELS, GridType
@pytest.fixture(scope="module", params=list(GridType))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
308,
11215,
62,
10379,
1010,
13,
74,
44930,
1330,
11096,
62,
42,
28778,
37142,
11,
24846,
6030,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
2625,
21412,
160... | 2.92 | 50 |
def pressure_ensemble(val):
"""
:param val: string, Name of the ensemble
:return: boolean, returns True if pressure should be specified
"""
if val=='NPE_F' or val=='NPE_I' or val=='NPT_F' or val=='NPT_I' or val=='NPT_GEMC':
return True
else:
return False
def temperature_ensemble(val):
"""
:param val: string, Name of the ensemble
:return: boolean, returns True if temperature should be specified
"""
if val=='MSST' or val=='MSST_DAMPED' or val=='NPT_F' or val=='NPT_I' or val=='NVT' or val=='NVT_ADIABATIC' or val=='NVT_GEMC' or val=='NPT_GEMC':
return True
else:
return False
| [
4299,
3833,
62,
1072,
11306,
7,
2100,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
201,
198,
220,
220,
220,
1058,
17143,
1188,
25,
4731,
11,
6530,
286,
262,
34549,
201,
198,
220,
220,
220,
1058,
7783,
25,
25131,
11,
5860,
6407,
... | 2.228758 | 306 |
import Tkinter as tk
from Tkinter import *
import BeautifulSoup
from BeautifulSoup import BeautifulSoup
import re
import config
import os
| [
11748,
309,
74,
3849,
355,
256,
74,
201,
198,
6738,
309,
74,
3849,
1330,
1635,
201,
198,
11748,
23762,
50,
10486,
201,
198,
6738,
23762,
50,
10486,
1330,
23762,
50,
10486,
201,
198,
11748,
302,
201,
198,
11748,
4566,
201,
198,
11748,
... | 3.12766 | 47 |
"""Module for generating test messages for the new APEL system.
It will currently generate job messages or summary messages, depending on
command-line arguments, but shouldn't be difficult to extend to other types of
message. Create a subclass of RecordGenerator for each message type.
"""
import getopt
import os
import random
import sys
import datetime
from time import mktime
# Arbitrary strings for filling messages.
sample_strings = '''Site Navigation
Home
Latest Information
Email Hoaxes
Internet Scams
Previous Issues
Site FAQ's
Hoax-Slayer Social
HS About
Privacy Policy
HS Site Map
True Emails
Virus Hoaxes
Giveaway Hoaxes
Charity Hoaxes
Bogus Warnings
Email Petitions
Chain Letters
Unsubstantiated
Missing Child Hoaxes'''.splitlines()
# Some example DNs
dns = ['/C=UK/O=eScience/OU=CLRC/L=RAL/CN=apel-dev.esc.rl.ac.uk/emailAddress=sct-certificates@stfc.ac.uk',
'/C=UK/O=eScience/OU=CLRC/L=RAL/CN=apel-consumer2.esc.rl.ac.uk/emailAddress=sct-certificates@stfc.ac.uk',
'/c=cy/o=cygrid/o=hpcl/cn=mon101.grid.ucy.ac.cy',
'/c=hu/o=niif ca/ou=grid/ou=niif/cn=host/egi1.grid.niif.hu',
'/dc=org/dc=balticgrid/ou=mif.vu.lt/cn=host/grid9.mif.vu.lt',
'/dc=es/dc=irisgrid/o=pic/cn=mon01.pic.es',
'/dc=ro/dc=romaniangrid/o=ifin-hh/cn=tbit03.nipne.ro']
# Possible acceptable values representing null
null_values = ['NULL', 'Null', 'null', 'NONE', 'None', 'none', '']
class RecordGenerator(object):
"""Don't create a RecordGenerator object - create a subclass
which defines the appropriate header, mandatory and optional
fields etc."""
def _get_record(self, keys, job_id):
"""Get a record with all of the keys listed in keys."""
record = {}
for key in keys:
if key in self._int_fields:
record[key] = str(get_random_int())
elif key in self._float_fields:
record[key] = str(get_random_float())
else:
record[key] = get_random_string(sample_strings)
record['job_id'] = job_id
return record
def _get_full_record(self, job_id):
"""Get a record string with all possible fields."""
return self._get_record(self._all_fields, job_id)
def _get_minimal_record(self, job_id):
"""Get a record string with only the necessary fields."""
return self._get_record(self._mandatory_fields, job_id)
def _get_incomplete_record(self, job_id):
"""Get a record without one of the mandatory fields."""
# copy the list
all_keys = [s for s in self._all_fields]
# remove a mandatory item
to_remove = get_random_string(self._mandatory_fields)
all_keys.remove(to_remove)
return self._get_record(all_keys, job_id)
def _get_valid_none_record(self, job_id):
"""Get a record giving one of the optional fields null values."""
rec_dict = self._get_record(self._all_fields, job_id)
to_edit = get_random_string(self._optional_fields)
rec_dict[to_edit] = get_random_string(null_values)
return rec_dict
def get_message(self, prefix):
"""Get a valid message string."""
message = self._header + "\n"
for i in range(self._recs_per_msg):
dict = self._get_valid_none_record(prefix + str(i))
for key in dict.keys():
message += key
message += ": "
message += dict[key]
message += "\n"
message += "%%\n"
return message
def get_message_ordered(self, prefix):
"""Get a valid message string, with its fields in the correct order."""
message = self._header + "\n"
for i in range(self._recs_per_msg):
dict = self._get_valid_none_record(prefix + str(i))
# go through in the order of all_fields
for key in self._all_fields:
if key in dict.keys():
message += key
message += ": "
message += dict[key]
message += "\n"
message += "%%\n"
return message
def get_message_lowercase(self, prefix):
"""Get a message with its keys in lower-case."""
message = self._header + "\n"
for i in range(self._recs_per_msg):
dict = self._get_valid_none_record(prefix + str(i))
for key in dict.keys():
message += key.lower()
message += ": "
message += dict[key].lower()
message += "\n"
message += "%%\n"
return message
def write_messages(self):
"""Write the specified number of messages to the specified directory."""
if not os.path.exists(self._msg_path):
print "Creating directory: " + self._msg_path + "..."
os.makedirs(self._msg_path)
print "Writing to directory " + self._msg_path + "..."
for i in range(self._no_msgs):
prefix = get_prefix(i)
filepath = os.path.join(self._msg_path, str(i).zfill(14))
f = open(filepath, 'w')
f.write(self.get_message_ordered(prefix))
f.close()
print "Done."
class JobRecordGenerator(RecordGenerator):
"""Generates job record messages for testing the new APEL system."""
def __init__(self, recs_per_msg, no_msgs, dir):
"""Call the parent constructor, then set up specific variables."""
super(JobRecordGenerator, self).__init__(recs_per_msg, no_msgs)
# Variables which control the operation of the generator.
if msg_dir is None:
self._msg_path = "job-msgs"
else:
self._msg_path = msg_dir
self._msg_path = os.path.abspath(self._msg_path)
print "Creating " + str(self._no_msgs) + " messages of " + str(self._recs_per_msg) + " records each."
self._header = "APEL-individual-job-message: v0.2"
# Fields which are required by the message format.
self._mandatory_fields = ["Site", "SubmitHost", "LocalJobId", "WallDuration",
"CpuDuration", "StartTime", "EndTime", "ServiceLevelType",
"ServiceLevel"]
# All fields in the standard order
self._all_fields = ["Site", "SubmitHost", "LocalJobId", "LocalUserId",
"GlobalUserName", "FQAN", "WallDuration", "CpuDuration",
"Processors", "NodeCount", "StartTime", "EndTime",
"MemoryReal", "MemoryVirtual", "ServiceLevelType",
"ServiceLevel"]
# Fields whose values should be integers
self._int_fields = ["WallDuration", "CpuDuration",
"Processors", "NodeCount", "StartTime", "EndTime",
"MemoryReal", "MemoryVirtual"]
# Fields whose values should be integers
self._float_fields = ["ServiceLevel"]
# Some example FQANs, some of which aren't actual FQANs.
self._fqans = ['/atlas/higgs/Role=NULL/Capability=NULL',
'/cms/uscms/Role=cmsphedex',
'/cms/uscms',
'/not a real fqan',
'junk']
self._factors = ['HEPSPEC', 'Si2k']
RecordGenerator._get_optional_fields(self)
def _get_record(self, keys, job_id):
"""Add job-specific items to the record after calling the generic get_record() method."""
# Call parent class method
record = RecordGenerator._get_record(self, keys, job_id)
record['GlobalUserName'] = get_random_string(dns)
record['FQAN'] = get_random_string(self._fqans)
record['LocalJobId'] = job_id
record['ServiceLevelType'] = get_random_string(self._factors)
if int(record['StartTime']) > int(record['EndTime']):
record['EndTime'] = record['StartTime'] + str(get_random_int(1, 1000))
return record
class SummaryRecordGenerator(RecordGenerator):
"""Generator for summary messages, defining parts specific to these."""
def __init__(self, recs_per_msg, no_msgs, msg_path):
"""Define constants used by the summary records."""
super(SummaryRecordGenerator, self).__init__(recs_per_msg, no_msgs)
if msg_dir is None:
self._msg_path = "summary-msgs"
else:
self._msg_path = msg_dir
self._msg_path = os.path.abspath(self._msg_path)
self._header = "APEL-summary-job-message: v0.2"
# Fields which are required by the message format.
self._mandatory_fields = ["Site", "Month", "Year", "WallDuration",
"CpuDuration", "NormalisedWallDuration", "NormalisedCpuDuration",
"NumberOfJobs"]
# All fields in the standard order
self._all_fields = ["Site", "Month", "Year", "GlobalUserName", "Group", "VOGroup",
"VORole", "EarliestEndTime", "LatestEndTime", "WallDuration", "CpuDuration", "NormalisedWallDuration",
"NormalisedCpuDuration", "NumberOfJobs"]
# Fields whose values should be integers, except EarliestEndTime and LatestEndTime
self._int_fields = ["Month", "Year", "WallDuration", "CpuDuration",
"NormalisedWallDuration", "NormalisedCpuDuration",
"NumberOfJobs"]
# Fields whose values should be integers
self._float_fields = []
RecordGenerator._get_optional_fields(self)
def _get_record(self, keys, job_id):
"""Get a record, then add summary-specific items."""
record = RecordGenerator._get_record(self, keys, job_id)
record['GlobalUserName'] = get_random_string(dns)
record['Month'] = str(get_random_int(end=12))
record['Year'] = str(get_random_int(2000, 2010))
# The rest of this method is to get EarliestEndTime and
# LatestEndTime to fall within the correct month.
month_start = datetime.datetime(int(record['Year']),
int(record['Month']), 1)
month_end = month_start + datetime.timedelta(28)
start_epoch = mktime(month_start.timetuple())
end_epoch = mktime(month_end.timetuple())
rnd_epoch1 = get_random_int(start_epoch, end_epoch)
rnd_epoch2 = get_random_int(start_epoch, end_epoch)
if rnd_epoch1 > rnd_epoch2:
record['EarliestEndTime'] = str(rnd_epoch2)
record['LatestEndTime'] = str(rnd_epoch1)
else:
record['EarliestEndTime'] = str(rnd_epoch1)
record['LatestEndTime'] = str(rnd_epoch2)
return record
def get_random_int(start=1, end=1000000):
"""Get an random integer between start and end inclusive."""
x = random.random()
i = int(x*(end + 1 - start) + start)
return i
def get_random_float():
"""Get a random float."""
x = random.random()
return x * 1000
def get_random_string(strings):
"""Get one of a list of strings at random."""
x = random.random()
i = int(x * len(strings))
return strings[i]
def usage():
"""Print a usage message."""
print "Usage: " + sys.argv[0] + \
""" [-r <recs-per-msg> -m <no-msgs> -d <directory>] jobs|summaries
Defaults: recs-per-msg: 1000
no-msgs: 100
directory: ./job-msgs | ./sum-msgs
"""
if __name__ == '__main__':
"""Parse the command-line arguments and create the appropriate type of
message."""
recs_per_msg = None
no_msgs = None
msg_dir = None
opts = None
args = None
try:
opts, args = getopt.getopt(sys.argv[1:], "r:m:d:")
except getopt.GetoptError, e:
print "Invalid arguments."
usage()
sys.exit()
try:
for o, a in opts:
if o == "-r":
recs_per_msg = int(a)
elif o == "-m":
no_msgs = int(a)
elif o == "-d":
msg_dir = a
except ValueError:
print "Invalid arguments."
usage()
sys.exit()
if "jobs" in args:
jrg = JobRecordGenerator(recs_per_msg, no_msgs, msg_dir)
jrg.write_messages()
elif "summaries" in args:
srg = SummaryRecordGenerator(recs_per_msg, no_msgs, msg_dir)
srg.write_messages()
else:
print "Neither job nor summary records specified."
usage()
sys.exit()
| [
37811,
26796,
329,
15453,
1332,
6218,
329,
262,
649,
3486,
3698,
1080,
13,
198,
198,
1026,
481,
3058,
7716,
1693,
6218,
393,
10638,
6218,
11,
6906,
319,
198,
21812,
12,
1370,
7159,
11,
475,
6584,
470,
307,
2408,
284,
9117,
284,
584,
... | 2.244385 | 5,565 |
from .defs import *
from .enums import *
from .models import *
HOMEPAGE_URL = "https://warframe.market"
API_BASE_URL = "https://api.warframe.market/v1"
WSS_BASE_URL = "wss://warframe.market/socket"
| [
6738,
764,
4299,
82,
1330,
1635,
198,
6738,
764,
268,
5700,
1330,
1635,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
39069,
4537,
8264,
62,
21886,
796,
366,
5450,
1378,
5767,
14535,
13,
10728,
1,
198,
17614,
62,
33,
11159,
62,
21886,
... | 2.584416 | 77 |
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 Ahmet Bakan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""Extract a selection of atoms from a PDB file."""
__author__ = 'Ahmet Bakan'
__copyright__ = 'Copyright (C) 2010-2012 Ahmet Bakan'
from actions import *
def prody_select(opt):
"""Write selected atoms from a PDB file in PDB format."""
import prody
LOGGER = prody.LOGGER
pdb = prody.parsePDB(opt.pdb)
prefix = opt.output
if not prefix:
prefix = pdb.getTitle() + '_selected'
pdbselect = pdb.select(opt.selstr)
if pdbselect is None:
opt.subparser.error('Selection {0:s} do not match any atoms.'
.format(repr(opt.selstr)))
LOGGER.info('{0:d} atoms are selected.'.format(len(pdbselect)))
LOGGER.info('Writing ' + prefix + '.pdb')
prody.writePDB(prefix + '.pdb', pdbselect)
| [
2,
1041,
35,
88,
25,
317,
11361,
15717,
329,
31702,
33806,
14691,
198,
2,
220,
198,
2,
15069,
357,
34,
8,
3050,
12,
6999,
7900,
4164,
17466,
272,
198,
2,
220,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
2... | 2.933202 | 509 |
import pycryptonight
import ecdsa
from Crypto.Hash import RIPEMD160
import hashlib
from log import Logger
import base58
import os
import time
import string
from merklelib import MerkleTree
string
def fast_hash(data: bytes):
"""
Hashing function used for Corner hashes.
:param data: bytes
:return: str
"""
return pycryptonight.cn_fast_hash(data).hex()
def get_hash(data: bytes, hash_func=hashlib.sha256):
"""
Hashing function used in key pairs.
:param hash_func: function
:param data: bytes
:return: bytes
"""
h = hash_func()
h.update(data)
return h.digest()
class Address:
"""Address object"""
@property
class SK:
"""Signing Key object"""
def __init__(self, sk: ecdsa.SigningKey = None):
"""
Generates a new Signing Key if sk is None.
:type sk: ecdsa.SigningKey
"""
self._sk = sk
if self._sk is None:
self._sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1,
hashfunc=hashlib.sha256)
self._vk = self._sk.verifying_key
self.vk = VK(self._vk)
self.address = self.vk.address
self.b58 = self.vk.b58
def sign(self, data: bytes):
"""
Generates a Signature for the given data.
:param data: bytes
:return: Signature
"""
return Signature(self._sk.sign(pycryptonight.cn_fast_hash(data)),
self.vk)
@property
def raw(self):
"""
Raw representation of SK based on the Discorn Protocol.
:return: bytes
"""
return self._sk.to_string()
def from_raw(raw: bytes):
"""
Decodes a raw Signing Key.
:param raw: bytes
:return: SK
"""
return SK(ecdsa.SigningKey.from_string(raw))
class VK:
"""Verifying Key object"""
def __init__(self, vk: ecdsa.VerifyingKey):
"""
Initialise a Verifying Key instance.
:param vk: ecdsa.VerifyingKey
"""
self._vk = vk
address = get_hash(pycryptonight.cn_fast_hash(self._vk.to_string()),
RIPEMD160.new)
self.address = address + pycryptonight.cn_fast_hash(address)[:8]
self.b58 = base58.b58encode(self.address, base58.BITCOIN_ALPHABET)
def verify(self, signature: bytes, data: bytes):
"""
Verifies raw signature.
:param signature: bytes
:param data: bytes
:return: bool
"""
try:
return self._vk.verify(signature, pycryptonight.cn_fast_hash(data))
except ecdsa.keys.BadSignatureError:
return False
@property
def from_raw(raw):
"""
Decodes a raw Verifying key
:param raw: bytes
:return: VK
"""
return VK(ecdsa.VerifyingKey.from_string(raw,
curve=ecdsa.SECP256k1,
hashfunc=hashlib.sha256))
class Signature:
"""Signature object"""
def __init__(self, signature: bytes, vk: VK):
"""
Initialises a Signature instance.
:param signature: bytes
:param vk: VK
"""
self.signature = signature
self.vk = vk
@property
def raw(self):
"""
Raw signature representation based on the Discorn Protocol.
:return: bytes
"""
return self.vk.raw + self.signature
def verify(self, data: bytes):
"""
Verifies the Signature against data.
:param data: bytes
:return: bool
"""
return self.vk.verify(self.signature, data)
def from_raw(raw):
"""
Decodes a raw Signature.
:param raw: bytes
:return: Signature
"""
return Signature(raw[64:], VK.from_raw(raw[:64]))
def decode_Tx(raw):
"""
Decodes a raw Tx
:param raw: bytes
:returns: Tx
"""
pass
class Block(Logger):
"""Block and header definition."""
NONCE_SIZE = 4
def __init__(self,
blockchain=None,
name: str = 'block',
height: int = 0,
version: int = 0,
coinbase: Corner = None,
corners: list = None,
timestamp=0,
previous_hash=pycryptonight.cn_slow_hash(b''),
nonce=(0).to_bytes(NONCE_SIZE, 'big')):
"""
Initialises a Block instance.
:param blockchain: Blockchain
:param name: str
:param height: int
:param version: int
:param coinbase: Corner
:param corners: Corner list
:param timestamp: int
:param previous_hash: bytes
:param nonce: bytes
"""
super().__init__(f"{name} - {height} :")
self.blockchain = blockchain
self.version = version
self.timestamp = timestamp
self.previous_hash = previous_hash
self.nonce = nonce
self.coinbase = coinbase
self._corners = [] if corners is None else corners
self.merkle_tree = MerkleTree(self.corners, fast_hash)
self.hash = self.get_hash()
self.difficulty = 4
@property
def corners(self):
"""
list of coinbase and other corners.
:return: Corner list
"""
return [self.coinbase] + self._corners
def compute_tree(self, new_data=None):
"""
Computes the Merkle Tree associated with the corners in the block.
:param new_data: Corner list
:return: None
"""
if new_data is None:
self.merkle_tree = MerkleTree(self.corners, fast_hash)
else:
self.merkle_tree.extend(new_data)
@property
def header(self):
"""
Raw representation of block header based on the Discorn Protocol.
:return: bytes
"""
res = self.version.to_bytes(2, 'big')
res += self.timestamp.to_bytes(8, 'big')
res += len(self.corners).to_bytes(3, 'big')
res += bytes.fromhex(self.merkle_tree.merkle_root)
res += self.previous_hash
res += self.nonce
return res
def random_nonce(self):
"""
Generates a random nonce for this block. (Mining OP)
:return: None
"""
self.timestamp = time.time_ns()
self.nonce = os.urandom(self.NONCE_SIZE)
def mine(self, difficulty=None):
"""
Mines the given block for the given difficulty.
:param difficulty: int
:return: None
"""
difficulty = self.difficulty if difficulty is None else difficulty
while int.from_bytes(self.get_hash(), 'big') >= (1 << (256 - difficulty)):
self.log(f"new hash : {self.hash.hex()}")
self.random_nonce()
self.log(f"Mined !! : {self.hash.hex()}")
def get_hash(self):
"""
Calculates the block's hash.
:return: bytes
"""
self.hash = pycryptonight.cn_slow_hash(self.header, 4)
return self.hash
class BlockChain(Logger):
"""BlockChain data model."""
def __init__(self, name: str = 'Main'):
"""
Initialises a Blockchain instance.
:param name: str | Used in logs
:return: None
"""
super().__init__(name)
self.block_hashes = []
self.blocks = {}
self.corners = {}
self.unconfirmed_corners = {}
def new_head(self, block: Block):
"""
Sets the given block as the new Blockchain head.
:param block: Block
:return: None
"""
self.block_hashes.append(block.hash)
self.blocks.update({block.hash: block})
self.log(f"New head : [{len(self.blocks)}] - {block.hash.hex()}")
def get_block_template(self):
"""
Get a Block instance to be mined based on the current chainstate.
:returns: Block
"""
block = Block(self,
corners=[
corner for corner in self.unconfirmed_corners.items()],
timestamp=time.time_ns(),
previous_hash=self.block_hashes[-1])
return block
class Guild(Logger):
"""Guild object"""
def __init__(self,
vk=None,
sk=None,
genesis=None,
chain=None,
name='Main-Guild'):
"""
Initialises a Guild instance with a chain and private key.(new if None)
"""
super().__init__(name)
if vk is None:
if sk is None:
self.sk = SK()
self.vk = self.sk.vk
else:
self.vk = self.sk.vk
else:
self.vk = vk
if chain is None:
self.chain = BlockChain()
if genesis is None:
genesis = Block(self.chain)
self.chain.new_head(genesis)
else:
self.chain = chain
@property
# TODO : Guilds are hosted on chains otherwise chains won't have enough participants to be viable.
| [
11748,
12972,
29609,
261,
432,
198,
11748,
9940,
9310,
64,
198,
6738,
36579,
13,
26257,
1330,
44967,
3620,
35,
14198,
198,
11748,
12234,
8019,
198,
6738,
2604,
1330,
5972,
1362,
198,
11748,
2779,
3365,
198,
11748,
28686,
198,
11748,
640,
... | 2.064105 | 4,477 |
import pathlib
import re
OCR_AP = {
'dir': 'ap',
'last': '問80'
}
OCR_KOUDO = {
'dir': 'koudo',
'last': '問30',
}
# 最後の問題が終わったと判断するための正規表現
AFTER_LAST_QUESTION = re.compile(r'[0-9]|-')
if __name__ == '__main__':
main()
| [
11748,
3108,
8019,
198,
11748,
302,
198,
198,
4503,
49,
62,
2969,
796,
1391,
198,
220,
220,
220,
705,
15908,
10354,
705,
499,
3256,
198,
220,
220,
220,
705,
12957,
10354,
705,
161,
243,
237,
1795,
6,
198,
92,
198,
4503,
49,
62,
42... | 1.506173 | 162 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 09:15:23 2019
Modified from the cornstover biorefinery constructed in Cortes-Peña et al., 2020,
with modification of fermentation system for organic acids instead of the original ethanol
[1] Cortes-Peña et al., BioSTEAM: A Fast and Flexible Platform for the Design,
Simulation, and Techno-Economic Analysis of Biorefineries under Uncertainty.
ACS Sustainable Chem. Eng. 2020, 8 (8), 3302–3310.
https://doi.org/10.1021/acssuschemeng.9b07040.
Naming conventions:
D = Distillation column
F = Flash tank
H = Heat exchange
M = Mixer
P = Pump (including conveying belt)
R = Reactor
S = Splitter (including solid/liquid separator)
T = Tank or bin for storage
U = Other units
PS = Process specificiation, not physical units, but for adjusting streams
Processes:
100: Feedstock preprocessing
200: Pretreatment
300: Conversion
400: Separation
500: Wastewater treatment
600: Facilities
@author: yalinli_cabbi
"""
# %% Setup
import biosteam as bst
import thermosteam as tmo
import flexsolve as flx
import numpy as np
from biosteam import main_flowsheet as F
from biorefineries import BST222
from biosteam import System
from thermosteam import Stream
from biorefineries.BDO import units, facilities
from biorefineries.BDO._process_specification import ProcessSpecification
from biorefineries.BDO.process_settings import price
from biorefineries.BDO.utils import find_split, splits_df, baseline_feedflow
from biorefineries.BDO.chemicals_data import BDO_chemicals, chemical_groups, \
soluble_organics, combustibles
from biorefineries.BDO.tea import BDOTEA
bst.speed_up()
flowsheet = bst.Flowsheet('BDO')
bst.main_flowsheet.set_flowsheet(flowsheet)
# Speeds up ShortcutDistillation
bst.units.ShortcutColumn.minimum_guess_distillate_recovery = 0
# Chemical Engineering Plant Cost Index from Chemical Engineering Magzine
# (https://www.chemengonline.com/the-magazine/)
# Year 1997 1998 2009 2010 2016
# CE 386.5 389.5 521.9 550.8 541.7
# Baseline cost year is 2016
bst.CE = 541.7
_labor_2007to2016 = 22.71 / 19.55
# Set default thermo object for the system
tmo.settings.set_thermo(BDO_chemicals)
# %%
# =============================================================================
# Feedstock
# =============================================================================
feedstock = Stream('feedstock',
baseline_feedflow.copy(),
units='kg/hr',
price=price['Feedstock'])
U101 = units.FeedstockPreprocessing('U101', ins=feedstock)
# Handling costs/utilities included in feedstock cost thus not considered here
U101.cost_items['System'].cost = 0
U101.cost_items['System'].kW = 0
# %%
# =============================================================================
# Pretreatment streams
# =============================================================================
# To be used for feedstock conditioning, flow is adjusted in PretreatmentMixer
pretreatment_feedstock_water = Stream('pretreatment_feedstock_water',
T=95+273.15, units='kg/hr')
# For pretreatment, baseline is (18+4.1) mg/g dry biomass
# based on P21 in Humbird et al., 93% purity
feedstock_dry_mass = feedstock.F_mass - feedstock.imass['H2O']
pretreatment_sulfuric_acid = Stream('pretreatment_sulfuric_acid',
H2SO4=feedstock_dry_mass*22.1/1000*0.93,
H2O=feedstock_dry_mass*22.1/1000*0.07,
units='kg/hr')
# Flow adjusted in SulfuricAcidMixer, stream 516 in Humbird et al.
pretreatment_acid_water = Stream('pretreatment_acid_water', T=114+273.15)
# To be added to the feedstock/sulfuric acid mixture,
# will be adjusted by the SteamMixer
pretreatment_steam = Stream('pretreatment_steam', phase='g',
T=268+273.15, P=13*101325,
Water=(3490+24534)*U101.feedstock_flow_rate/2205,
units='kg/hr')
# For neutralization of pretreatment hydrolysate
ammonia = Stream('ammonia', units='kg/hr', phase='l')
# To be used for ammonia addition, will be updated by AmmoniaMixer
pretreatment_ammonia_water = Stream('pretreatment_ammonia_water', units='kg/hr')
# =============================================================================
# Pretreatment units
# =============================================================================
# Prepare sulfuric acid
T201 = units.SulfuricAcidAdditionTank('T201', ins=pretreatment_sulfuric_acid)
M201 = units.SulfuricAcidMixer('M201', ins=(T201-0, pretreatment_acid_water))
# Mix sulfuric acid and feedstock, adjust water loading
M202 = units.PretreatmentMixer('M202', ins=(U101-0, M201-0,
pretreatment_feedstock_water))
# Mix feedstock/sulfuric acid mixture and steam
M203 = units.SteamMixer('M203', ins=(M202-0, pretreatment_steam), P=5.5*101325)
R201 = units.PretreatmentReactorSystem('R201', ins=M203-0,
outs=('R201_g', 'R201_l'))
R201_H = bst.units.HXutility('R201_H', ins=R201-0, V=0, rigorous=True)
# Pump bottom of the pretreatment products to the oligomer conversion tank
T202 = units.BlowdownTank('T202', ins=R201-1)
T203 = units.OligomerConversionTank('T203', ins=T202-0)
F201 = units.PretreatmentFlash('F201', ins=T203-0, outs=('F201_g', 'F201_l'),
P=101325, Q=0)
F201_H = bst.units.HXutility('F201_H', ins=F201-0, V=0, rigorous=True)
# Neutralize pretreatment hydrolysate
M204 = units.AmmoniaMixer('M204', ins=(ammonia, pretreatment_ammonia_water))
M204.specification = update_ammonia_and_mix
T204 = units.AmmoniaAdditionTank('T204', ins=(F201-1, M204-0))
T204_P = units.HydrolysatePump('T204_P', ins=T204-0)
M205 = bst.units.Mixer('M205', ins=(R201_H-0, F201_H-0))
M205_P = units.BDOPump('M205_P', ins=M205-0, outs='condensed_pretreatment_waste_vapor')
# %%
# =============================================================================
# Conversion streams
# =============================================================================
# Flow and price will be updated in EnzymeHydrolysateMixer
enzyme = Stream('enzyme', units='kg/hr', price=price['Enzyme'])
# Used to adjust enzymatic hydrolysis solid loading, will be updated in EnzymeHydrolysateMixer
enzyme_water = Stream('enzyme_water', units='kg/hr')
# Corn steep liquor as nitrogen nutrient for microbes, flow updated in R301
CSL = Stream('CSL', units='kg/hr')
# Lime for neutralization of produced acid
# fermentation_lime = Stream('fermentation_lime', units='kg/hr')
# For diluting concentrated, inhibitor-reduced hydrolysate
dilution_water = Stream('dilution_water', units='kg/hr')
# =============================================================================
# Conversion units
# =============================================================================
# Cool hydrolysate down to fermentation temperature at 50°C
H301 = bst.units.HXutility('H301', ins=T204_P-0, T=50+273.15)
# Mix enzyme with the cooled pretreatment hydrolysate
M301 = units.EnzymeHydrolysateMixer('M301', ins=(H301-0, enzyme, enzyme_water))
# Mix pretreatment hydrolysate/enzyme mixture with fermentation seed
M302 = bst.units.Mixer('M302', ins=(M301-0, ''))
# Saccharification and Cofermentation
# R301 = units.SaccharificationAndCoFermentation('R301',
# ins=(M302-0, CSL),
# outs=('fermentation_effluent',
# 'sidedraw'))
# Saccharification
R301 = units.Saccharification('R301',
ins=M302-0,
outs=('saccharification_effluent',
'sidedraw'))
# Remove solids from fermentation broth, modified from the pressure filter in Humbird et al.
S301_index = [splits_df.index[0]] + splits_df.index[2:].to_list()
S301_cell_mass_split = [splits_df['stream_571'][0]] + splits_df['stream_571'][2:].to_list()
S301_filtrate_split = [splits_df['stream_535'][0]] + splits_df['stream_535'][2:].to_list()
S301 = units.CellMassFilter('S301', ins=R301-0, outs=('solids', ''),
moisture_content=0.35,
split=find_split(S301_index,
S301_cell_mass_split,
S301_filtrate_split,
chemical_groups))
# S302 = bst.units.Splitter('S302', ins=S301-1, outs=('to_cofermentation',
# 'to_evaporator'),
# split=0.2)
# def adjust_M303_water():
# M303.ins[1].imol['Water'] = (M303.water_multiplier - 1) * M303.ins[0].imol['Water']
# M303._run()
# M303 = bst.units.Mixer('M303', ins=(S302-1, ''))
# M303.water_multiplier = 6.5
# M303.specification = adjust_M303_water
# F301 = bst.units.MultiEffectEvaporator('F301', ins=M303-0, outs=('F301_l', 'F301_g'),
# P = (101325, 73581, 50892, 32777, 20000), V = 0.9695)
# F301_H = bst.units.HXutility('F301_H', ins=F301-0, T=30+273.15)
# F301_H_P = units.BDOPump('F301_H_P', ins=F301_H-0)
F301 = bst.units.MultiEffectEvaporator('F301', ins=S301-1, outs=('F301_l', 'F301_g'),
P = (101325, 73581, 50892, 32777, 20000), V = 0.797)
# F301.V = 0.797 for sugars concentration of 591.25 g/L (599.73 g/L after cooling to 30 C)
F301_P = units.BDOPump('F301_P', ins=F301-0)
M303 = bst.units.Mixer('M303', ins=(F301_P-0, dilution_water))
M303.water_multiplier = 1.001
M303.specification = adjust_M303_water
M303_H = bst.units.HXutility('M303_H', ins=M303-0, T=30+273.15)
M303_H_P = units.BDOPump('M303_H_P', ins=M303_H-0)
# Cofermentation
R302 = units.CoFermentation('R302',
ins=('', M303_H_P-0, CSL),
outs=('fermentation_effluent', 'CO2'))
# ferm_ratio is the ratio of conversion relative to the fermenter
R303 = units.SeedTrain('R303', ins=R301-1, outs=('seed',), ferm_ratio=0.9)
T301 = units.SeedHoldTank('T301', ins=R303-0, outs=1-M302)
# %%
# =============================================================================
# Separation streams
# =============================================================================
# This flow will be automatically updated in CellMassFilter
# separation_sulfuric_acid = Stream('separation_sulfuric_acid', units='kg/hr')
# # # To be mixed with sulfuric acid, will be updated in SulfuricAdditionTank
# # separation_acid_water = Stream('separation_acid_water', units='kg/hr')
# separation_DPHP = Stream('DPHP', DPHP =feedstock_dry_mass*22.1/1000*0.93,
# H2O=feedstock_dry_mass*22.1/1000*0.07, units='kg/hr')
# # Ethanol for esterification reaction, will be updated in the EsterificationReactor
# separation_ethanol = Stream('separation_ethanol', Ethanol=feedstock_dry_mass*22.1/1000*0.93,
# H2O=feedstock_dry_mass*22.1/1000*0.07, units='kg/hr')
# For ester hydrolysis
# separation_hydrolysis_water = Stream('separation_hydrolysis_water', units='kg/hr')
# =============================================================================
# Separation units
# =============================================================================
# Remove solids from fermentation broth, modified from the pressure filter in Humbird et al.
S401_index = [splits_df.index[0]] + splits_df.index[2:].to_list()
S401_cell_mass_split = [splits_df['stream_571'][0]] + splits_df['stream_571'][2:].to_list()
S401_filtrate_split = [splits_df['stream_535'][0]] + splits_df['stream_535'][2:].to_list()
S401 = bst.units.SolidsCentrifuge('S401', ins=R302-0, outs=('cell_mass', ''),
# moisture_content=0.50,
split=find_split(S401_index,
S401_cell_mass_split,
S401_filtrate_split,
chemical_groups), solids =\
['Xylan', 'Glucan', 'Lignin', 'FermMicrobe',\
'Ash', 'Arabinan', 'Galactan', 'Mannan'])
M401 = bst.units.Mixer('M401', ins=(S401-1, '', '', '', ''))
M401.specification = adjust_M401_ethanol_and_DPHP
M401_P = units.BDOPump('M401_P', ins=M401-0, outs='mixed_stream')
# k_23bdo = 28.34
# k_glucose = 0.046
# k_etoh = 1
# k_h2o = 0
split = [0.001 for i in range(len(BDO_chemicals))]
# split[BDO_chemicals.index('Dipotassium hydrogen phosphate')] = 0
S402 = bst.units.LiquidsSplitSettler('S402', ins = M401_P-0, split=split)
S402.specification = adjust_S402_split
# Separate out the majority of water,
# no need to include agitator thus using biosteam Flash
# F401 = bst.units.Flash('F401', ins=S402-1, outs=('F401_g', 'F401_l'),
# # LHK=('AceticAcid', '2,3-Butanediol'),
# # is_divided=True,
# # product_specification_format='Recovery',
# # Lr=0.8, Hr=0.8, k=1.2,
# T = 379, P = 101325,
# vessel_material = 'Stainless steel 316')
F401 = bst.units.Flash('F401', ins=S402-1, outs=('F401_g', 'F401_l'),
# LHK=('AceticAcid', '2,3-Butanediol'),
# is_divided=True,
# product_specification_format='Recovery',
# Lr=0.8, Hr=0.8, k=1.2,
T = 379, P = 101325,
vessel_material = 'Stainless steel 316')
# def adjust_F401_V():
# F401.V = F401.ins[0].imol['H2O']/F401.ins[0].F_mol
# F401._run()
# F401.specification = adjust_F401_V
# # Condense waste vapor for recycling
F401_H = bst.units.HXutility('F401_H', ins=F401-0, V=0, rigorous=True)
F401_P = units.BDOPump('F401_P', ins=F401-1)
# Placeholder separation operation for DPHP recovery; consider gravimetric separation
# (rho_DPHP ~= 2.5 g/cm3, rho_Glucose ~= 1.5 g/cm3)
S403 = units.DPHP_Separation('S403', ins = F401_P-0, outs = ('separated_DPHP', 'impurities'))
S403_DPHP_recovery = 0.90 # w/w
S403_power_utility = 136850 # kW
S403.specification = adjust_S403_streams
# DPHP recycle
S403-0-4-M401
# D401 = bst.units.BinaryDistillation('D401', ins=F401_H-0, outs=('D401_g', 'D401_l'),
# LHK=('Ethanol', 'Water'),
# is_divided=True,
# product_specification_format='Recovery',
# Lr=0.99, Hr=0.99, k=1.2,
# vessel_material = 'Stainless steel 316')
# D401_H = bst.units.HXutility('D401_H', ins=D401-0, V=0, rigorous=True)
# D401_P = units.BDOPump('D401_P', ins=D401-1)
# Separate out ethanol
# H_D401 = bst.units.HXutility('H_D401', ins=S402-0, V=0.75, rigorous=True)
D401 = bst.units.ShortcutColumn('D401', ins=S402-0,
outs=('D401_g', 'D401_l'),
LHK=('Ethanol', 'BDO'),
is_divided=True,
product_specification_format='Recovery',
Lr=0.999, Hr=0.999, k=1.2,
vessel_material = 'Stainless steel 316')
# def adjust_D401_V():
# D401_ins_0 = D401.ins[0]
# D401.V = D401_ins_0.imol['Ethanol']/ (D401_ins_0.F_mol*.95)
# D401._run()
# D401 = bst.units.Flash('D401', ins=S402-0,
# outs=('D401_g', 'D401_l'),
# V=0.79, P = 101325,
# vessel_material = 'Stainless steel 316')
# D401.process_specification = adjust_D401_V
D401_H = bst.units.HXutility('D401_H', ins=D401-0, V=0, rigorous=True)
D401_P = units.BDOPump('D401_P', ins=D401-1)
# M402 = bst.units.Mixer('M402', ins=(D401_H-0))
M402_P = units.BDOPump('M402_P', ins=D401_H-0, outs='ethanol_recycle')
# Ethanol recycle
M402_P-0-3-M401
# Separate out Acetoin
D402 = bst.units.ShortcutColumn('D402', ins=D401_P-0,
outs=('D402_g', 'D402_l'),
LHK=('Acetoin', 'BDO'),
is_divided=True,
product_specification_format='Recovery',
Lr=0.9995, Hr=0.9995, k=1.2,
vessel_material = 'Stainless steel 316')
D402.specification = adjust_D402_streams
D402_H = bst.units.HXutility('D402_H', ins=D402-0, V=0, rigorous=True)
D402_P = units.BDOPump('D402_P', ins=D402-1)
# %%
# =============================================================================
# Wastewater treatment streams
# =============================================================================
# For aerobic digestion, flow will be updated in AerobicDigestion
air_lagoon = Stream('air_lagoon', phase='g', units='kg/hr')
# To neutralize nitric acid formed by nitrification in aerobic digestion
# flow will be updated in AerobicDigestion
# The active chemical is modeled as NaOH, but the price is cheaper than that of NaOH
aerobic_caustic = Stream('aerobic_caustic', units='kg/hr', T=20+273.15, P=2*101325,
price=price['Caustics'])
# =============================================================================
# Wastewater treatment units
# =============================================================================
# Mix waste liquids for treatment
M501 = bst.units.Mixer('M501', ins=(F401_H-0, F301-1, M205_P-0))
# This represents the total cost of wastewater treatment system
WWT_cost = units.WastewaterSystemCost('WWT_cost', ins=M501-0)
R501 = units.AnaerobicDigestion('R501', ins=WWT_cost-0,
outs=('biogas', 'anaerobic_treated_water',
'anaerobic_sludge'),
reactants=soluble_organics,
split=find_split(splits_df.index,
splits_df['stream_611'],
splits_df['stream_612'],
chemical_groups),
T=35+273.15)
# Mix recycled stream and wastewater after R501
M502 = bst.units.Mixer('M502', ins=(R501-1, ''))
R502 = units.AerobicDigestion('R502', ins=(M502-0, air_lagoon, aerobic_caustic),
outs=('aerobic_vent', 'aerobic_treated_water'),
reactants=soluble_organics,
ratio=U101.feedstock_flow_rate/2205)
# Membrane bioreactor to split treated wastewater from R502
S501 = bst.units.Splitter('S501', ins=R502-1, outs=('membrane_treated_water',
'membrane_sludge'),
split=find_split(splits_df.index,
splits_df['stream_624'],
splits_df['stream_625'],
chemical_groups))
S501.line = 'Membrane bioreactor'
# Recycled sludge stream of memberane bioreactor, the majority of it (96%)
# goes to aerobic digestion and the rest to sludge holding tank then to BT
S502 = bst.units.Splitter('S502', ins=S501-1, outs=('to_aerobic_digestion',
'to_boiler_turbogenerator'),
split=0.96)
M503 = bst.units.Mixer('M503', ins=(S502-0, 'centrate'), outs=1-M502)
# Mix anaerobic and 4% of membrane bioreactor sludge
M504 = bst.units.Mixer('M504', ins=(R501-2, S502-1))
# Sludge centrifuge to separate water (centrate) from sludge
S503 = bst.units.Splitter('S503', ins=M504-0, outs=(1-M503, 'sludge'),
split=find_split(splits_df.index,
splits_df['stream_616'],
splits_df['stream_623'],
chemical_groups))
S503.line = 'Sludge centrifuge'
# Reverse osmosis to treat membrane separated water
S504 = bst.units.Splitter('S504', ins=S501-0, outs=('discharged_water', 'waste_brine'),
split=find_split(splits_df.index,
splits_df['stream_626'],
splits_df['stream_627'],
chemical_groups))
S504.line = 'Reverse osmosis'
# Mix solid wastes to boiler turbogeneration
M505 = bst.units.Mixer('M505', ins=(S503-1, S401-0),
outs='wastes_to_boiler_turbogenerator')
# %%
# =============================================================================
# Facilities streams
# =============================================================================
sulfuric_acid_fresh = Stream('sulfuric_acid_fresh', price=price['Sulfuric acid'])
ammonia_fresh = Stream('ammonia_fresh', price=price['AmmoniumHydroxide'])
CSL_fresh = Stream('CSL_fresh', price=price['CSL'])
# lime_fresh = Stream('lime_fresh', price=price['Lime'])
# S401_out1_F_mass = S401.outs[1].F_mass
# if not (S401_out1_F_mass == 0):
# ethanol_fresh = Stream('ethanol_fresh', Ethanol = 0.24 * S401_out1_F_mass, units='kg/hr', price=price['Ethanol']) - M401.ins[3].imass['Ethanol']
# DPHP_fresh = Stream('DPHP_fresh', DPHP = 0.25 * S401_out1_F_mass, units='kg/hr', price=price['DPHP']) - M401.ins[3].imass['Dipotassium hydrogen phosphate']
# else:
ethanol_fresh = Stream('ethanol_fresh', Ethanol = feedstock_dry_mass*48*22.1/1000*0.93, units='kg/hr', price=price['Ethanol'])
DPHP_fresh = Stream('DPHP_fresh', DPHP = feedstock_dry_mass*50*22.1/1000*0.93, units='kg/hr', price=price['DPHP'])
# Water used to keep system water usage balanced
system_makeup_water = Stream('system_makeup_water', price=price['Makeup water'])
# Final product, not pure acid (which should be the case in reality)
BDO = Stream('BDO', units='kg/hr', price=price['BDO'])
# Acetoin product
Acetoin = Stream('Acetoin', units='kg/hr', price=price['Acetoin'])
# Chemicals used/generated in BT
FGD_lime = Stream('FGD_lime')
ash = Stream('ash', price=price['Ash disposal'])
boiler_chems = Stream('boiler_chems', price=price['Boiler chems'])
baghouse_bag = Stream('baghouse_bag', price=price['Baghouse bag'])
# Supplementary natural gas for BT if produced steam not enough for regenerating
# all steam streams required by the system
natural_gas = Stream('natural_gas', price=price['Natural gas'])
# Cooling tower chemicals
cooling_tower_chems = Stream('cooling_tower_chems', price=price['Cooling tower chems'])
# 145 based on equipment M-910 (clean-in-place system) in Humbird et al.
CIP_chems_in = Stream('CIP_chems_in', Water=145*U101.feedstock_flow_rate/2205,
units='kg/hr')
CIP_chems_out = Stream('CIP_chems_out')
CIP_chems_out.copy_like(CIP_chems_in)
# 1372608 based on stream 950 in Humbird et al.
# Air needed for multiple processes (including enzyme production that was not included here),
# not rigorously modeled, only scaled based on plant size
plant_air_in = Stream('plant_air_in', phase='g', units='kg/hr',
N2=0.79*1372608*U101.feedstock_flow_rate/2205,
O2=0.21*1372608*U101.feedstock_flow_rate/2205)
# 8021 based on stream 713 in Humbird et al.
fire_water_in = Stream('fire_water_in',
Water=8021*U101.feedstock_flow_rate/2205, units='kg/hr')
# =============================================================================
# Facilities units
# =============================================================================
T601 = units.SulfuricAcidStorageTank('T601', ins=sulfuric_acid_fresh)
T601.line = 'Sulfuric acid storage tank'
S601 = bst.units.ReversedSplitter('S601', ins=T601-0,
outs=(pretreatment_sulfuric_acid,
''))
T602 = units.AmmoniaStorageTank('T602', ins=ammonia_fresh, outs=ammonia)
T602.line = 'Ammonia storage tank'
T603 = units.CSLstorageTank('T603', ins=CSL_fresh, outs=CSL)
T603.line = 'CSL storage tank'
# DPHP storage
T604 = units.DPHPStorageTank('T604', ins=DPHP_fresh)
T604.line = 'DPHP storage tank'
T604_P = bst.units.ConveyingBelt('T604_P', ins=T604-0)
# 7-day storage time, similar to ethanol's in Humbird et al.
T605 = units.EthanolStorageTank('T605', ins=ethanol_fresh,
tau=7*24, V_wf=0.9,
vessel_type='Floating roof',
vessel_material='Carbon steel')
T605.line = 'Ethanol storage tank'
T605_P = units.BDOPump('T605_P', ins=T605-0)
# Connections to ATPE Mixer
T604_P-0-1-M401
T605_P-0-2-M401
# 7-day storage time, similar to ethanol's in Humbird et al.
T606 = units.BDOStorageTank('T606', ins=D402_P-0, tau=7*24, V_wf=0.9,
vessel_type='Floating roof',
vessel_material='Stainless steel')
T606.line = 'BDOStorageTank'
T606_P = units.BDOPump('T606_P', ins=T606-0, outs=BDO)
# 7-day storage time, similar to ethanol's in Humbird et al.
T607 = units.BDOStorageTank('T607', ins=D402_H-0, tau=7*24, V_wf=0.9,
vessel_type='Floating roof',
vessel_material='Stainless steel')
T607.line = 'AcetoinStorageTank'
T607_P = units.BDOPump('T607_P', ins=T607-0, outs=Acetoin)
CIP = facilities.OrganicAcidsCIP('CIP', ins=CIP_chems_in, outs=CIP_chems_out)
ADP = facilities.OrganicAcidsADP('ADP', ins=plant_air_in, outs='plant_air_out')
FWT = units.FireWaterTank('FWT', ins=fire_water_in, outs='fire_water_out')
# M505-0 is the liquid/solid mixture, R501-0 is the biogas, blowdown is discharged
BT = facilities.OrganicAcidsBT('BT', ins=(M505-0, R501-0,
FGD_lime, boiler_chems,
baghouse_bag, natural_gas,
'BT_makeup_water'),
B_eff=0.8, TG_eff=0.85,
combustibles=combustibles,
side_streams_to_heat=(pretreatment_feedstock_water,
pretreatment_acid_water,
pretreatment_steam),
outs=('gas_emission', ash, 'boiler_blowdown_water'))
# BT = bst.BDunits.BoilerTurbogenerator('BT',
# ins=(M505-0, R501-0, 'boiler_makeup_water', 'natural_gas', FGD_lime, boiler_chems),
# boiler_efficiency=0.80,
# turbogenerator_efficiency=0.85)
# Blowdown is discharged
CT = facilities.OrganicAcidsCT('CT',
ins=('return_cooling_water',
'CT_makeup_water',
cooling_tower_chems),
outs=('process_cooling_water',
'cooling_tower_blowdown'))
# All water used in the system, here only consider water usage,
# if heating needed, then heeating duty required is considered in BT
process_water_streams = (pretreatment_feedstock_water, pretreatment_acid_water,
pretreatment_steam, pretreatment_ammonia_water,
enzyme_water,
aerobic_caustic,
CIP.ins[-1], BT.ins[-1], CT.ins[-1])
PWC = facilities.OrganicAcidsPWC('PWC', ins=system_makeup_water,
process_water_streams=process_water_streams,
outs='process_water')
# Heat exchange network
HXN = bst.facilities.HeatExchangerNetwork('HXN')
# %%
# =============================================================================
# Complete system
# =============================================================================
# BDO_sys = System('BDO_sys',
# [
# # Feedstock preprocessing
# U101,
# # Pretreatment
# T201, M201, # sulfuric acid mixing and addition
# M202, # feedstock mixing
# M203, R201, R201_H, # pretreatment
# T202, T203,# blowdown and oligomer conversion
# F201, F201_H, # pretreatment flash and waste vapor condensation
# M204, T204, T204_P, # ammonia addition
# M205, M205_P, # waste vapor mixing and pumping
# # Conversion
# H301, # hydrolysate cooler
# M301, # enzyme addition
# System('fermentation_recycle',
# [M302, R301, # simultaneous saccharification and co-fermentation
# R302, T301], # seed train and seed holding tank
# recycle=T301-0), # recycle seed
# # Separation
# S401, # cell mass filter
# R401, R401_P, # acidulation
# T401, T401_P, # sulfuric acid addition
# S402, # gypsum filter
# F401, F401_H, F401_P, # separate water
# D401, D401_H, D401_P, # separate other volatiles
# System('esterification_recycle',
# [System('outer_loop_acid_and_ester_recycle',
# [System('inner_loop_ethanol_cycle',
# [R402, R402_P, # esterification of lactic acid
# D401, D401_H, D401_P], # separate out ethanol
# recycle=D401_H-0), # recycle ethanol
# D401, D401_H, D401_P, S403], # separate out acid and ester
# recycle=S403-0), # recycle acid and ester
# System('hydrolysis_recycle',
# [R403, R403_P, # hydrolysis of ester
# D404, D404_H, D404_P, # separate out ethanol for recylcing
# F402, F402_H, F402_P], # separate out volatiles
# recycle=F402_H-0), # recycle ester
# ],
# recycle=D404_H-0), # recycle ethanol
# D405, D405_H1, D405_H2, D405_P, # final purification of the acid product
# # Wastewater treatment
# M501, # mix all wastewater streams
# WWT_cost, # total cost of wastewater treatment process
# R501, # anaerobic digestion
# System('wastewater_treatment_recycle',
# [M502, R502, # aerobic digestion
# S501, # membrane bioreactor
# S502, M503], # sludge centrifuge
# recycle=M503-0), # recycle sludge
# M504, S503, # sludge centrifuge
# S504, # reverse osmosis
# M505, # sludge mixer
# # Facilities
# S601, T601, # sulfuric acid storage
# T602, # ammonia storage
# T603, # CSL storage
# T604, T604_P, # lime storage
# T605, T605_P, # ethanol storage
# T606, T606_P], # lactic acid product storage
# # facilities=(BT, CT, PWC, CIP, ADP, FWT))
# facilities=(HXN, BT, CT, PWC, CIP, ADP, FWT))
BDO_sys = bst.main_flowsheet.create_system(
'BDO_sys', feeds=[i for i in bst.main_flowsheet.stream
if i.sink and not i.source])
BT_sys = System('BT_sys', path=(BT,))
# =============================================================================
# TEA
# =============================================================================
BDO_no_BT_tea = BDOTEA(
system=BDO_sys, IRR=0.10, duration=(2016, 2046),
depreciation='MACRS7', income_tax=0.35, operating_days=350.4,
lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
startup_VOCfrac=0.75, WC_over_FCI=0.05,
finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# biosteam Splitters and Mixers have no cost,
# cost of all wastewater treatment units are included in WWT_cost,
# BT is not included in this TEA
OSBL_units=(U101, WWT_cost,
T601, T602, T603, T604, T604_P, T605, T605_P, T606, T606_P,
CT, PWC, CIP, ADP, FWT),
warehouse=0.04, site_development=0.09, additional_piping=0.045,
proratable_costs=0.10, field_expenses=0.10, construction=0.20,
contingency=0.10, other_indirect_costs=0.10,
labor_cost=2.5e6*_labor_2007to2016*U101.feedstock_flow_rate/2205,
labor_burden=0.90, property_insurance=0.007, maintenance=0.03)
BDO_no_BT_tea.units.remove(BT)
# # Removed because there is not double counting anyways.
# # Removes feeds/products of BT_sys from BDO_sys to avoid double-counting
# for i in BT_sys.feeds:
# BDO_sys.feeds.remove(i)
# for i in BT_sys.products:
# BDO_sys.products.remove(i)
# Boiler turbogenerator potentially has different depreciation schedule
BT_tea = bst.TEA.like(BT_sys, BDO_no_BT_tea)
BT_tea.labor_cost = 0
# Changed to MACRS 20 to be consistent with Humbird
BT_tea.depreciation = 'MACRS20'
BT_tea.OSBL_units = (BT,)
BDO_tea = bst.CombinedTEA([BDO_no_BT_tea, BT_tea], IRR=0.10)
BDO_sys._TEA = BDO_tea
# =============================================================================
# Simulate system and get results
# =============================================================================
get_BDO_MPSP()
# R301 = F('R301') # Fermentor
yearly_production = 125000 # ton/yr
spec = ProcessSpecification(
evaporator = F301,
mixer = M303,
reactor=R302,
reaction_name='fermentation_reaction',
substrates=('Xylose', 'Glucose'),
products=('BDO',),
yield_=0.909,
titer=100,
productivity=18.5,
path = (M303_H, M303_H_P),
xylose_utilization_fraction = 0.80)
path = (F301, R302)
@np.vectorize
@np.vectorize
# vapor_fractions = np.linspace(0.20, 0.80)
# titers = calculate_titer(vapor_fractions)
# MPSPs = calculate_MPSP(vapor_fractions)
# import matplotlib.pyplot as plt
# plt.plot(vapor_fractions, titers)
# plt.show()
# plt.plot(titers, MPSPs)
# plt.show()
# %%
# =============================================================================
# For Monte Carlo and analyses
# =============================================================================
BDO_sub_sys = {
# 'feedstock_sys': (U101,),
# 'pretreatment_sys': (T201, M201, M202, M203,
# R201, R201_H, T202, T203,
# F201, F201_H,
# M204, T204, T204_P,
# M205, M205_P),
# 'conversion_sys': (H301, M301, M302, R301, R302, T301),
'separation_sys': (S401, M401, M401_P,
S402,
F401, F401_H, F401_P,
D401, D401_H, D401_P, S403,
M402_P, S403,
D402, D402_H, D402_P,
M501,
T606, T606_P, T607, T607_P)
# F402, F402_H, F402_P,
# D405, D405_H1, D405_H2, D405_P,
# M401, M401_P)
# 'wastewater_sys': (M501, WWT_cost, R501,
# M502, R502, S501, S502, M503,
# M504, S503, S504, M505),
# 'HXN': (HXN,),
# 'BT': (BT,),
# 'CT': (CT,),
# 'other_facilities': (T601, S601,
# T602, T603,
# T604, T604_P,
# T605, T605_P,
# T606, T606_P,
# PWC, CIP, ADP, FWT)
}
# for unit in sum(BDO_sub_sys.values(), ()):
# if not unit in BDO_sys.units:
# print(f'{unit.ID} not in BDO_sys.units')
# for unit in BDO_sys.units:
# if not unit in sum(BDO_sub_sys.values(), ()):
# print(f'{unit.ID} not in BDO_sub_sys')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
4280,
1542,
7769,
25,
1314,
25,
1954,
13130,
198,
198,
5841,
1431,
422,
262,
11676,
... | 2.110083 | 17,187 |
import pytest
from ithkuil.morphology.words import remove_stress
txts_to_test = [
('a', 'a'),
('o', 'o'),
('áu', 'au'),
('ái', 'ai'),
('aú', 'aù'),
('aé', 'ae'),
('á', 'a')
]
@pytest.mark.parametrize('txt, expected', txts_to_test) | [
11748,
12972,
9288,
198,
6738,
340,
71,
23063,
346,
13,
24503,
1435,
13,
10879,
1330,
4781,
62,
41494,
198,
198,
14116,
82,
62,
1462,
62,
9288,
796,
685,
198,
220,
220,
220,
19203,
64,
3256,
705,
64,
33809,
198,
220,
220,
220,
19203... | 1.984733 | 131 |
#-------------------------------------------------------------------------------
# Name: Vdc Source
# Author: D.Fathi
# Created: 20/03/2015
# Modified: 01/04/2020
# Copyright: (c) PyAMS
# Licence: CC-BY-SA
#-------------------------------------------------------------------------------
from PyAMS import signal,param,model
from signalType import voltage
#Source for constant voltage
| [
171,
119,
123,
2,
10097,
24305,
198,
2,
6530,
25,
220,
220,
220,
220,
220,
220,
220,
569,
17896,
8090,
198,
2,
6434,
25,
220,
220,
220,
220,
220,
360,
13,
37,
44202,
198,
2,
15622,
25,
220,
220,
220,
220,
1160,
14,
3070,
14,
4... | 3.643478 | 115 |
# Copyright © 2018. All rights reserved.
# Author: German Yakimov
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import os
import inspect
import platform
import sys
from Services.Singleton import Singleton
| [
2,
15069,
10673,
2864,
13,
1439,
2489,
10395,
13,
198,
2,
6434,
25,
2679,
30254,
44273,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.920213 | 188 |
import pytest
@pytest.fixture(scope='function')
@pytest.fixture(scope='module')
@pytest.mark.website
| [
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
8818,
11537,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
21412,
11537,
628,
198,
31,
9078,
9288,
13,
4102,
13,
732,
12485,
628,
198
] | 2.658537 | 41 |
from setuptools import setup, find_packages
from PublisherJiraTestsResults.version import VERSION
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
setup(
name='robotframework-publisher-results-jira',
version=VERSION,
description='Library to publish robot framework automation results on jira',
author='Ismail Ktami',
author_email='ktamiismail@hotmail.com',
license='MIT',
classifiers=classifiers,
keywords='robotframework jira xray testplans results outcomes',
packages=find_packages(),
install_requires=[
'robotframework>=3.2.2',
'requests',
'utils'
]
) | [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28045,
41,
8704,
51,
3558,
25468,
13,
9641,
1330,
44156,
2849,
198,
198,
4871,
13350,
796,
685,
198,
220,
220,
220,
705,
34156,
7904,
7294,
40,
20010,
1079,
7904,
17... | 3.088889 | 225 |
import importlib
_DEVICE = 'frontend'
required_entries = [
'reset',
'init',
'run',
'add_node',
]
| [
11748,
1330,
8019,
198,
198,
62,
7206,
27389,
796,
705,
8534,
437,
6,
198,
35827,
62,
298,
1678,
796,
685,
198,
220,
220,
220,
705,
42503,
3256,
198,
220,
220,
220,
705,
15003,
3256,
198,
220,
220,
220,
705,
5143,
3256,
198,
220,
... | 2.148148 | 54 |
import pdb
import argparse
import matplotlib
import numpy as np
import gym, sys, time, os
import torch
import numpy as np
import datetime
sys.path.insert(0, '..') # NOQA: E402
from logger.logger import Logger
import utils
from mountain_car import MCFeatures, MCFeaturesOnehot
parser = argparse.ArgumentParser()
parser.add_argument('--policy-path', type=str, nargs='?', default=None)
parser.add_argument('--play', action='store_true',
help='play given or latest stored policy.')
parser.add_argument('--dont-save', action='store_true',
help="don't save the policy network weights.")
parser.add_argument('--render', action='store_true', help="show the env.")
parser.add_argument('--on-server', action='store_true',
help="True if the code is being run on a server.")
parser.add_argument('--store-train-results', action='store_true',
help='True if you want to store intermediate results')
parser.add_argument('--store-interval', action='store_true',
help='Interval of storing the results.')
parser.add_argument('--rl-episodes', type=int, default=50)
parser.add_argument('--rl-ep-length', type=int, default=30)
parser.add_argument('--irl-iterations', type=int, default=100)
parser.add_argument('--rl-log-intervals', type=int, default=10)
parser.add_argument('--regularizer', type=float, default=0, help='The regularizer to use.')
parser.add_argument('--seed', type=int, default=7, help='The seed for the run')
parser.add_argument('--save-folder', type=str, default=None,
help='The name of the directory to store the results in. The name will be used to \
save the plots, the policy and the reward networks.(Relative path)')
parser.add_argument('--exp-trajectory-path', type=str, default=None, help='The name of the directory in which \
the expert trajectories are stored.(Relative path)')
parser.add_argument('--reward-net-hidden-dims', nargs="*", type=int , default=[128], help='The dimensions of the \
hidden layers of the reward network.')
parser.add_argument('--policy-net-hidden-dims', nargs="*", type=int , default=[128], help='The dimensions of the \
hidden layers of the policy network.')
parser.add_argument('--lr', type=float, default=1e-3, help='The learning rate for the reward network.')
parser.add_argument('--feat-extractor', type=str, default='MCFeatures', help='The feature extractor \
to be used in the experiment')
parser.add_argument('--scale-svf', action='store_true', default=None, help='If true, will scale the states \
based on the reward the trajectory got.')
parser.add_argument('--clipping-value', type=float, default=None, help='For gradient clipping of the \
reward network.')
#IMPORTANT*** search for 'CHANGE HERE' to find that most probably need changing
#before running on different settings
'''
python run_experiment.py --on-server --rl-episodes 1000 --rl-ep-length 200 --irl-itreations 200 --rl-log-intervals 100
--seed 100 --exp-trajectory-path './exp_traj_mountain_car/' --reward-net-hidden-dims 256
'''
if __name__ == '__main__':
main()
| [
11748,
279,
9945,
198,
11748,
1822,
29572,
198,
11748,
2603,
29487,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
11,
25064,
11,
640,
11,
28686,
198,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
220,
198,
198,
1... | 2.742475 | 1,196 |
import contextlib
import typing as t
from dataclasses import dataclass
from ..scanner import Scanner
if t.TYPE_CHECKING:
from .extractor import TokenExtractor
T = t.TypeVar('T')
U = t.TypeVar('U')
V = t.TypeVar('V')
R = t.TypeVar('R')
@dataclass
@dataclass
class RuleSet(t.Generic[T, U]):
"""
A ordered list of parsing rules that is used a the #Tokenizer.
"""
@t.overload
def __init__(self: 'RuleSet[str, str]') -> None:
""" The default constructor for a #RuleSet uses string for both the token and value type. """
@t.overload
def __init__(self: 'RuleSet[T, U]', sentinel: t.Union[t.Tuple[T, U], Sentinel[T, U]]) -> None:
""" Initialize the #RuleSet with a sentinel configuration. """
@property
@property
def rule(self, type_: T, extractor: 'TokenExtractor[U]', skip: bool = False) -> 'RuleSet[T, U]':
""" Add a rule and return self. """
self._rules.append(Rule(type_, extractor, skip))
self._token_types.add(type_)
return self
class RuleConfigSet(t.Generic[T, U, V]):
""" Helper class to manage values associated with token types. """
def set(self, token_types: t.Union[T, t.Collection[T]], value: V) -> t.ContextManager[None]:
"""
Set the value of one or more token types. The returned context manager _may_ be used, but
does not _have_ to be used, to revert to the previous state.
Implementation detail: strings are not considered collections when identifying the type
of the *token_types* argument.
"""
if isinstance(token_types, str) or not isinstance(token_types, t.Collection):
token_types_set = frozenset([t.cast(T, token_types)])
else:
token_types_set = frozenset(token_types)
current_values = {k: v for k, v in self._values.items() if k in token_types_set}
for token_type in token_types_set:
if not self._rules.has_token_type(token_type):
raise ValueError(f'not a possible token type: {token_type!r}')
for token_type in token_types_set:
self._values[token_type] = value
@contextlib.contextmanager
return _revert()
| [
198,
11748,
4732,
8019,
198,
11748,
19720,
355,
256,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
11485,
35836,
1008,
1330,
20937,
1008,
198,
198,
361,
256,
13,
25216,
62,
50084,
2751,
25,
198,
220,
422,
764,
2... | 2.813765 | 741 |
import cv2
import numpy as np
import random
from augraphy.base.augmentation import Augmentation
from augraphy.base.augmentationresult import AugmentationResult
class BrightnessTexturizeAugmentation(Augmentation):
"""Creates a random noise in the brightness channel to emulate paper
textures.
:param range: Pair of ints determining the range from which to sample values
for the brightness matrix.
:type range: tuple, optional
:param deviation: Additional variation for the uniform sample.
:type deviation: float, optional
:param layer: The image layer to apply the brightness texturization to
:type layer: string, optional
:param p: The probability that this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self, range=(0.9, 0.99), deviation=0.03, layer="paper", p=0.5
):
"""Constructor method"""
super().__init__(p=p)
self.low = range[0]
self.high = range[1]
self.deviation = deviation
self.layer = layer
self.range = range
# Constructs a string representation of this Augmentation.
# Applies the Augmentation to input data.
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
198,
6738,
16339,
1470,
88,
13,
8692,
13,
559,
5154,
341,
1330,
2447,
14374,
198,
6738,
16339,
1470,
88,
13,
8692,
13,
559,
5154,
341,
20274,
1330,
2447,
... | 2.9775 | 400 |
from __future__ import division
from utils import *
def test_state_gen_and_count():
"""Test generation of binary states using bin_states() and xbin_states()."""
assert np.array_equal( bin_states(5),np.vstack([i for i in xbin_states(5)]) )
states = bin_states(5)
p,s = state_probs(states)
assert (p==1/32).all()
assert np.array_equal(s,states)
states = bin_states(5,sym=True)
p,s = state_probs(states)
assert (p==1/32).all()
assert np.array_equal(s,states)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
3384,
4487,
1330,
1635,
198,
198,
4299,
1332,
62,
5219,
62,
5235,
62,
392,
62,
9127,
33529,
198,
220,
220,
220,
37227,
14402,
5270,
286,
13934,
2585,
1262,
9874,
62,
27219,
3419,
290,
21... | 2.5 | 200 |
soma = 0
cont = 0
maior = 0
mulher = 0
nomemaisvelho = ''
for p in range(1, 5):
print(f'----- {p}ª pessoa -----')
nome = str(input('nome: ')).strip()
idade = int(input('idade: '))
sexo = str(input('sexo [M/F]: ')).strip().lower()
soma = idade + soma
cont = cont + 1
if p == 1 and sexo == 'm':
nomemaisvelho = nome
maior = idade
if sexo == 'm' and idade > maior:
maior = idade
nomemaisvelho = nome
if sexo == 'f' or sexo == 'm':
if sexo == 'f':
if idade < 20:
mulher = mulher + 1
else:
print('sexo invalido')
print(f'a media de idade do grupo è de {soma / cont}')
print(f'o homem mais velho do grupo e o {nomemaisvelho} que tem {maior} anos')
print(f'no registro de pessoas existem {mulher} mulheres com menos de 20 anos')
| [
82,
6086,
796,
657,
198,
3642,
796,
657,
198,
2611,
1504,
796,
657,
198,
76,
377,
372,
796,
657,
198,
26601,
368,
15152,
626,
8873,
796,
10148,
198,
1640,
279,
287,
2837,
7,
16,
11,
642,
2599,
198,
220,
220,
220,
3601,
7,
69,
6,... | 2.053922 | 408 |
import unittest
import numpy as np
from gcode_gen import number
| [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
308,
8189,
62,
5235,
1330,
1271,
628
] | 3.421053 | 19 |
import datetime
import typing
from ..transform import Transform, TransformContext, Transformer
from .common import create_random
| [
11748,
4818,
8079,
198,
11748,
19720,
198,
198,
6738,
11485,
35636,
1330,
26981,
11,
26981,
21947,
11,
3602,
16354,
198,
6738,
764,
11321,
1330,
2251,
62,
25120,
628,
198
] | 4.551724 | 29 |
import sys
sys.path.append('../G26/Instrucciones')
from instruccion import *
| [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
38,
2075,
14,
6310,
622,
535,
295,
274,
11537,
198,
198,
6738,
916,
622,
535,
295,
1330,
1635,
198
] | 2.689655 | 29 |
#---------------
#MIT License
#Copyright (c) 2019 Simon Liljestrand
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#----------------
# If this file is run, an overview of the stage will be displayed
import pygame
import math
from character import Character
from solid import Solid
from stage import Stage
from solid import Goal
WIDTH = 800
HEIGHT = 600
BROWN = (int(255/2),0,0)
GREEN = (0,int(255/3*2),0)
WHITE = (255,255,255)
STAGE_WIDTH = WIDTH
STAGE_HEIGHT = HEIGHT + 300
pygame.init()
screen = pygame.display.set_mode((WIDTH,HEIGHT))
character = Character(screen, int(WIDTH/2), int(HEIGHT/2), (255,215,0), 50)
solids = []
ground = Solid(BROWN,
[(0,200,math.floor(WIDTH/3),200),
(math.floor(WIDTH/3),100,math.floor(WIDTH/3),100),
(math.floor(WIDTH/3)*2,200,(WIDTH-1-math.floor(WIDTH/3)*2),200)])
platform =Solid(GREEN,[(0,int(HEIGHT*2/3),250,50)])
goalWidth = 100
goalHeight = 150
goal = Goal(WHITE, [(STAGE_WIDTH - goalWidth - 50, 700+goalHeight, goalWidth, goalHeight)])
solids.append(ground)
solids.append(platform)
solids.append(goal)
isPeriodic = True
stage = Stage(screen, character, isPeriodic, STAGE_WIDTH, STAGE_HEIGHT, solids)
if __name__ == '__main__':
stage.DrawStage(800)
input('press anything to exit')
| [
2,
24305,
198,
2,
36393,
13789,
198,
198,
2,
15269,
357,
66,
8,
13130,
11288,
16342,
73,
395,
25192,
198,
198,
2,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
1659,
428,
3788,
29... | 2.959371 | 763 |
import argparse
import sys
import yaml
sys.path.insert(0, "../../../util/python")
import Cons
_conf = None
# Implement when needed
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
331,
43695,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
366,
40720,
40720,
40720,
22602,
14,
29412,
4943,
198,
11748,
3515,
198,
198,
62,
10414,
796,
6045,
628,
628,
198,
2,
4828... | 3.044444 | 45 |
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
if __name__ == '__main__':
main()
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
3... | 3.162791 | 86 |
import gym
import random
import torch
import numpy as np
from collections import deque
# imort matplotlib.pyplot as plt
from MyAgent import Agent
env = gym.make('Pendulum-v0')
env.seed(2)
agent = Agent(state_size=3, action_size=1)
scores = ddpg()
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plt.plot(np.arange(1, len(scores)+1), scores)
# plt.ylabel('Score')
# plt.xlabel('Episode #')
# plt.show() | [
11748,
11550,
198,
11748,
4738,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
390,
4188,
198,
2,
545,
419,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2011,
36772,
1330,
15906,
198,
198,... | 2.54375 | 160 |
from voxel_globe.common_tasks import shared_task, VipTask
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(base=VipTask, bind=True)
def cleanup(self, upload_session_id):
''' Clean up after successful ingest
Currently this only entails removing the upload session information '''
from voxel_globe.ingest.models import UploadSession
upload_session = UploadSession.objects.get(id=upload_session_id)
upload_session.delete()
| [
6738,
410,
1140,
417,
62,
4743,
5910,
13,
11321,
62,
83,
6791,
1330,
4888,
62,
35943,
11,
569,
541,
25714,
198,
198,
6738,
18725,
1924,
13,
26791,
13,
6404,
1330,
651,
62,
35943,
62,
6404,
1362,
198,
6404,
1362,
796,
651,
62,
35943,... | 3.149351 | 154 |
import json
import boto3
import re
import datetime
s3 = boto3.client('s3')
GET_RAW_PATH = '/getLogCount'
POST_RAW_PATH = '/postLogCount'
GRPC_RAW_PATH = '/GRPC'
TEST_RAW_PATH = '/test'
regexPattern = re.compile('([a-c][e-g][0-3]|[A-Z][5-9][f-w]){5,15}')
## Method to parse input for GET, POST, GRPC requests
## Method to parse input for Test cases
## Binary Search program to get indices of start and end time of the required interval from log file
## Method to extract start and end time of log file
## Driver method
| [
11748,
33918,
198,
11748,
275,
2069,
18,
198,
11748,
302,
198,
11748,
4818,
8079,
198,
198,
82,
18,
796,
275,
2069,
18,
13,
16366,
10786,
82,
18,
11537,
198,
198,
18851,
62,
20530,
62,
34219,
796,
31051,
1136,
11187,
12332,
6,
198,
... | 2.854054 | 185 |
"""
Author: shikechen
Function: Write image to sheet
Version: 1.0
Date: 2019/3/6
"""
from openpyxl import Workbook
from openpyxl.drawing.image import Image
if __name__ == '__main__':
main()
| [
37811,
198,
220,
220,
220,
6434,
25,
427,
522,
6607,
198,
220,
220,
220,
15553,
25,
19430,
2939,
284,
9629,
198,
220,
220,
220,
10628,
25,
352,
13,
15,
198,
220,
220,
220,
7536,
25,
13130,
14,
18,
14,
21,
198,
37811,
198,
6738,
... | 2.535714 | 84 |
import numpy as np
import pickle
# Load in color lookup table data
with open('colors.pkl', 'rb') as f:
LERPED = pickle.load(f)
LUT = np.load('LUT.npy')
def set_color(bg, fg):
'''
Generates a character sequence to set the foreground and background colors
'''
return f'\u001b[48;5;{bg};38;5;{fg}m'
def convert_img(img, charset=' ,(S#g@@g#S(, ', width=80, height=1):
'''
Convert an RGB image to a stream of text with ANSI color codes
'''
line = ''
for row in img:
for color in row:
color = np.round(color).astype(int)
b, g, r = color[0], color[1], color[2]
# Lookup the color index in the RGB lookup table
idx = LUT[b, g, r]
# Get the ANSI color codes and lerp character
bg, fg, lerp, rgb = LERPED[idx]
char = charset[lerp]
line += set_color(bg, fg) + char
# End each line with a black background to avoid color fringe
line += '\u001b[48;5;16;38;5;16m\n'
# Move the cursor back to the top of the frame to prevent rolling
line += f'\u001b[{width}D\u001b[{height + 1}A'
return line
if __name__ == '__main__':
import cv2
import sys
# Width of the output in terminal characters
WIDTH = 80
HEIGHT = 1
if len(sys.argv) == 2:
img = cv2.imread(sys.argv[1])
# Match the aspect ratio to that of the provided image
src_height, src_width, _ = img.shape
aspect_ratio = src_width / src_height
HEIGHT = int(WIDTH / (2 * aspect_ratio))
img = cv2.resize(img, (WIDTH, HEIGHT))
print(convert_img(img, width=WIDTH, height=HEIGHT))
else:
print('Expected image file as argument.')
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2298,
293,
201,
198,
201,
198,
2,
8778,
287,
3124,
35847,
3084,
1366,
201,
198,
4480,
1280,
10786,
4033,
669,
13,
79,
41582,
3256,
705,
26145,
11537,
355,
277,
25,
201,
198,
220,
220,
... | 2.036585 | 902 |